diff --git a/.circleci/config.yml b/.circleci/config.yml index dddd8828b2..1d0e41f62c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -736,7 +736,7 @@ jobs: - setup_remote_docker: # CircleCI Image Policy # https://circleci.com/docs/remote-docker-images-support-policy/ - version: 24.0.9 + version: docker23 docker_layer_caching: true - run: name: Docker build @@ -1102,10 +1102,6 @@ workflows: tags: only: /v.*/ - test_updated: - requires: - - lint - - check_helm - - check_compliance matrix: parameters: platform: @@ -1116,10 +1112,6 @@ workflows: tags: only: /v.*/ - test: - requires: - - lint - - check_helm - - check_compliance matrix: parameters: platform: @@ -1130,10 +1122,6 @@ workflows: tags: only: /v.*/ - build_release: - requires: - - pre_verify_release - - test - - test_updated matrix: parameters: platform: @@ -1144,7 +1132,14 @@ workflows: tags: only: /v.*/ - publish_github_release: - requires: [ build_release ] + requires: + - build_release + - lint + - check_helm + - check_compliance + - pre_verify_release + - test + - test_updated filters: branches: ignore: /.*/ diff --git a/.config/nextest.toml b/.config/nextest.toml index 9deb55fd49..f2c4ef3618 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -24,14 +24,27 @@ or ( binary_id(=apollo-router) & test(=axum_factory::tests::response_with_custom or ( binary_id(=apollo-router) & test(=axum_factory::tests::response_with_custom_endpoint_wildcard) ) or ( binary_id(=apollo-router) & test(=axum_factory::tests::response_with_custom_prefix_endpoint) ) or ( binary_id(=apollo-router) & test(=axum_factory::tests::response_with_root_wildcard) ) +or ( binary_id(=apollo-router) & test(=layers::map_first_graphql_response::tests::test_map_first_graphql_response) ) or ( binary_id(=apollo-router) & test(=notification::tests::it_test_ttl) ) or ( binary_id(=apollo-router) & test(=plugins::authentication::subgraph::test::test_credentials_provider_refresh_on_stale) ) +or ( binary_id(=apollo-router) & test(=plugins::expose_query_plan::tests::it_expose_query_plan) ) +or ( binary_id(=apollo-router) & test(=plugins::include_subgraph_errors::test::it_does_not_redact_all_explicit_allow_account_explict_redact_for_product_query) ) +or ( binary_id(=apollo-router) & test(=plugins::include_subgraph_errors::test::it_does_not_redact_all_explicit_allow_review_explict_redact_for_product_query) ) +or ( binary_id(=apollo-router) & test(=plugins::include_subgraph_errors::test::it_does_not_redact_all_implicit_redact_product_explict_allow_for_product_query) ) +or ( binary_id(=apollo-router) & test(=plugins::include_subgraph_errors::test::it_does_redact_all_explicit_allow_account_explict_redact_for_account_query) ) +or ( binary_id(=apollo-router) & test(=plugins::include_subgraph_errors::test::it_does_redact_all_explicit_allow_product_explict_redact_for_product_query) ) +or ( binary_id(=apollo-router) & test(=plugins::include_subgraph_errors::test::it_redacts_all_subgraphs_implicit_redact) ) +or ( binary_id(=apollo-router) & test(=plugins::include_subgraph_errors::test::it_returns_valid_response) ) or ( binary_id(=apollo-router) & test(=plugins::telemetry::config_new::instruments::tests::test_instruments) ) or ( binary_id(=apollo-router) & test(=plugins::telemetry::metrics::apollo::test::apollo_metrics_enabled) ) or ( binary_id(=apollo-router) & test(=plugins::telemetry::tests::it_test_prometheus_metrics) ) or ( binary_id(=apollo-router) & test(=router::tests::basic_event_stream_test) ) or ( binary_id(=apollo-router) & test(=router::tests::schema_update_test) ) or ( binary_id(=apollo-router) & test(=services::subgraph_service::tests::test_subgraph_service_websocket_with_error) ) +or ( binary_id(=apollo-router) & test(=services::supergraph::tests::aliased_subgraph_data_rewrites_on_non_root_fetch) ) +or ( binary_id(=apollo-router) & test(=services::supergraph::tests::interface_object_typename_rewrites) ) +or ( binary_id(=apollo-router) & test(=services::supergraph::tests::only_query_interface_object_subgraph) ) +or ( binary_id(=apollo-router) & test(=uplink::license_stream::test::license_expander_claim_no_claim) ) or ( binary_id(=apollo-router) & test(=uplink::license_stream::test::license_expander_claim_pause_claim) ) or ( binary_id(=apollo-router) & test(=uplink::persisted_queries_manifest_stream::test::integration_test) ) or ( binary_id(=apollo-router) & test(=uplink::schema_stream::test::integration_test) ) @@ -96,21 +109,33 @@ or ( binary_id(=apollo-router::integration_tests) & test(=integration::lifecycle or ( binary_id(=apollo-router::integration_tests) & test(=integration::lifecycle::test_reload_config_valid) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::lifecycle::test_reload_config_with_broken_plugin) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::lifecycle::test_reload_config_with_broken_plugin_recovery) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::lifecycle::test_shutdown_with_idle_connection) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::query_planner::progressive_override_with_legacy_qp_reload_to_both_best_effort_keep_previous_config) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::apq) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::connection_failure_blocks_startup) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::entity_cache) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::entity_cache_authorization) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::query_planner_redis_update_defer) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::query_planner_redis_update_introspection) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::query_planner_redis_update_query_fragments) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::query_planner_redis_update_reuse_query_fragments) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::query_planner_redis_update_type_conditional_fetching) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::test::connection_failure_blocks_startup) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::subgraph_response::test_invalid_error_locations_contains_negative_one_location) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::datadog::test_basic) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::datadog::test_resource_mapping_default) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::datadog::test_resource_mapping_override) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::datadog::test_span_metrics) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::datadog::test_with_parent_span) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::jaeger::test_decimal_trace_id) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::jaeger::test_default_operation) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::jaeger::test_local_root) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::jaeger::test_remote_root) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::jaeger::test_selected_operation) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::logging::test_json) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::logging::test_json_promote_span_attributes) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::logging::test_json_sampler_off) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::logging::test_json_uuid_format) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::logging::test_text) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::logging::test_text_sampler_off) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::metrics::test_bad_queries) ) @@ -118,6 +143,10 @@ or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::metrics::test_metrics_bad_query) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::metrics::test_metrics_reloading) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::metrics::test_subgraph_auth_metrics) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::propagation::test_trace_id_via_header) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::traffic_shaping::test_router_rate_limit) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::traffic_shaping::test_subgraph_rate_limit) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::traffic_shaping::test_subgraph_timeout) ) or ( binary_id(=apollo-router::integration_tests) & test(=normal_query_with_defer_accept_header) ) or ( binary_id(=apollo-router::integration_tests) & test(=persisted_queries) ) or ( binary_id(=apollo-router::integration_tests) & test(=queries_should_work_over_get) ) @@ -129,6 +158,7 @@ or ( binary_id(=apollo-router::samples) & test(=/basic/query1) ) or ( binary_id(=apollo-router::samples) & test(=/basic/query2) ) or ( binary_id(=apollo-router::samples) & test(=/enterprise/entity-cache/invalidation) ) or ( binary_id(=apollo-router::samples) & test(=/enterprise/entity-cache/invalidation-subgraph) ) +or ( binary_id(=apollo-router::samples) & test(=/enterprise/entity-cache/invalidation-subgraph-name) ) or ( binary_id(=apollo-router::samples) & test(=/enterprise/entity-cache/invalidation-subgraph-type) ) or ( binary_id(=apollo-router::samples) & test(=/enterprise/query-planning-redis) ) or ( binary_id(=apollo-router::set_context) & test(=test_set_context) ) diff --git a/CHANGELOG.md b/CHANGELOG.md index 12541ffff4..98e93b3042 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,106 @@ All notable changes to Router will be documented in this file. This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html). +# [1.54.0] - 2024-09-10 + +## 🚀 Features + +### Add configurability of span attributes in logs ([Issue #5540](https://github.com/apollographql/router/issues/5540)) + +The router supports a new `telemetry.exporters.logging.stdout.format.json.span_attributes` option that enables you to choose a subset of all span attributes to display in your logs. + +When `span_attributes` is specified, the router searches for the first attribute in its input list of span attributes from the root span to the current span and attaches it to the outermost JSON object for the log event. If you set the same attribute name for different spans at different levels, the router chooses the attributes of child spans before the attributes of parent spans. + + +For example, if you have spans that contains `span_attr_1` attribute and you only want to display this span attribute: + +```yaml title="router.yaml" +telemetry: + exporters: + logging: + stdout: + enabled: true + format: + json: + display_span_list: false + span_attributes: + - span_attr_1 +``` + +Example output with a list of spans: + +```json +{ + "timestamp": "2023-10-30T14:09:34.771388Z", + "level": "INFO", + "fields": { + "event_attr_1": "event_attr_1", + "event_attr_2": "event_attr_2" + }, + "target": "event_target", + "span_attr_1": "span_attr_1" +} +``` + +To learn more, go to [`span_attributes`](https://www.apollographql.com/docs/router/configuration/telemetry/exporters/logging/stdout#span_attributes) docs. +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5867 + +### Add a histogram metric tracking evaluated query plans ([PR #5875](https://github.com/apollographql/router/pull/5875)) + +The router supports the new `apollo.router.query_planning.plan.evaluated_plans` histogram metric to track the number of evaluated query plans. + +You can use it to help set an optimal `supergraph.query_planning.experimental_plans_limit` option that limits the number of query plans evaluated for a query and reduces the time spent planning. + + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5875 + +## 🐛 Fixes + +### Fix Datadog sampling ([PR #5788](https://github.com/apollographql/router/pull/5788)) + +The router's Datadog exporter has been fixed so that traces are sampled as intended. + +Previously, the Datadog exporter's context may not have been set correctly, causing traces to be undersampled. + +By [@BrynCooke](https://github.com/BrynCooke) & [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5788 + +## 📃 Configuration + +### General availability of Apollo usage report generation ([#5807](https://github.com/apollographql/router/pull/5807)) + +The router's Apollo usage report generation feature that was previously [experimental](https://www.apollographql.com/docs/resources/product-launch-stages/#experimental-features) is now [generally available](https://www.apollographql.com/docs/resources/product-launch-stages/#general-availability). + +If you used its experimental configuration, you should migrate to the new configuration options: + +* `telemetry.apollo.experimental_apollo_metrics_reference_mode` is now `telemetry.apollo.metrics_reference_mode` +* `telemetry.apollo.experimental_apollo_signature_normalization_algorithm` is now `telemetry.apollo.signature_normalization_algorithm` +* `experimental_apollo_metrics_generation_mode` has been removed because the Rust implementation (the default since router v1.49.0) is generating reports identical to the previous router-bridge implementation + +The experimental configuration options are now deprecated. They are functional but will log warnings. + +By [@bonnici](https://github.com/bonnici) in https://github.com/apollographql/router/pull/5807 + +### Helm: Enable easier Kubernetes debugging with heaptrack ([Issue #5789](https://github.com/apollographql/router/issues/5789)) + +The router's Helm chart has been updated to help make debugging with heaptrack easier. + +Previously, when debugging multiple Pods with heaptrack, all Pods wrote to the same file, so they'd overwrite each others' results. This issue has been fixed by adding a `hostname` to each output data file from heaptrack. + +Also, the Helm chart now supports a `restartPolicy` that enables you to configure a Pod's [restart policy](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-restarts). The default value of `restartPolicy` is `Always` (the same as the Kubernetes default). + + +By [@cyberhck](https://github.com/cyberhck) in https://github.com/apollographql/router/pull/5850 + +## 📚 Documentation + +### Document OpenTelemetry information for operation limits ([PR #5884](https://github.com/apollographql/router/pull/5884)) + +The router's docs for operation limits now describe [using telemetry to set operation limits](https://www.apollographql.com/docs/router/configuration/operation-limits#using-telemetry-to-set-operation-limits) and [logging values](https://www.apollographql.com/docs/router/configuration/operation-limits#logging-values). + +By [@andrewmcgivery](https://github.com/andrewmcgivery) in https://github.com/apollographql/router/pull/5884 + + + # [1.53.0] - 2024-08-28 > [!IMPORTANT] diff --git a/Cargo.lock b/Cargo.lock index 8a99dd3deb..bcff62dff1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -57,7 +57,7 @@ version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "const-random", "getrandom 0.2.15", "once_cell", @@ -159,9 +159,9 @@ checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "apollo-compiler" -version = "1.0.0-beta.20" +version = "1.0.0-beta.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07961541ebb5c85cc02ea0f08357e31b30537674bbca818884f1fc658fa99116" +checksum = "9496debc2b28a7da94aa6329b653fae37e0f0ec44da93d82a8d5d2a6a82abe0e" dependencies = [ "ahash", "apollo-parser", @@ -178,7 +178,7 @@ dependencies = [ [[package]] name = "apollo-federation" -version = "1.53.0" +version = "1.54.0" dependencies = [ "apollo-compiler", "derive_more", @@ -229,7 +229,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.53.0" +version = "1.54.0" dependencies = [ "access-json", "ahash", @@ -243,6 +243,9 @@ dependencies = [ "async-trait", "aws-config", "aws-credential-types", + "aws-sdk-sso", + "aws-sdk-ssooidc", + "aws-sdk-sts", "aws-sigv4", "aws-smithy-runtime-api", "aws-types", @@ -398,7 +401,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.53.0" +version = "1.54.0" dependencies = [ "apollo-parser", "apollo-router", @@ -414,7 +417,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.53.0" +version = "1.54.0" dependencies = [ "anyhow", "cargo-scaffold", @@ -446,9 +449,9 @@ dependencies = [ [[package]] name = "apollo-smith" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84ef0a8fba05f32a14d03eb3ff74f556cecca820012d5846770b839c75332b38" +checksum = "de03c56d7feec663e7f9e981cf4570db68a0901de8c4667f5b5d20321b88af6e" dependencies = [ "apollo-compiler", "apollo-parser", @@ -731,7 +734,7 @@ checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ "async-lock 2.8.0", "autocfg", - "cfg-if 1.0.0", + "cfg-if", "concurrent-queue", "futures-lite 1.13.0", "log", @@ -750,7 +753,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d6baa8f0178795da0e71bc42c9e5d13261aac7ee549853162e66a241ba17964" dependencies = [ "async-lock 3.4.0", - "cfg-if 1.0.0", + "cfg-if", "concurrent-queue", "futures-io", "futures-lite 2.3.0", @@ -792,7 +795,7 @@ dependencies = [ "async-lock 2.8.0", "async-signal", "blocking", - "cfg-if 1.0.0", + "cfg-if", "event-listener 3.1.0", "futures-lite 1.13.0", "rustix 0.38.34", @@ -808,7 +811,7 @@ dependencies = [ "async-io 2.3.3", "async-lock 3.4.0", "atomic-waker", - "cfg-if 1.0.0", + "cfg-if", "futures-core", "futures-io", "rustix 0.38.34", @@ -828,7 +831,7 @@ dependencies = [ "async-io 1.13.0", "async-lock 2.8.0", "async-process", - "crossbeam-utils 0.8.20", + "crossbeam-utils", "futures-channel", "futures-core", "futures-io", @@ -951,9 +954,9 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87c5f920ffd1e0526ec9e70e50bf444db50b204395a0fa7016bbf9e31ea1698f" +checksum = "f42c2d4218de4dcd890a109461e2f799a1a2ba3bcd2cde9af88360f5df9266c6" dependencies = [ "aws-credential-types", "aws-sigv4", @@ -966,6 +969,7 @@ dependencies = [ "fastrand 2.1.0", "http 0.2.12", "http-body 0.4.6", + "once_cell", "percent-encoding", "pin-project-lite", "tracing", @@ -974,9 +978,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.35.0" +version = "1.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc3ef4ee9cdd19ec6e8b10d963b79637844bbf41c31177b77a188eaa941e69f7" +checksum = "11822090cf501c316c6f75711d77b96fba30658e3867a7762e5e2f5d32d31e81" dependencies = [ "aws-credential-types", "aws-runtime", @@ -996,9 +1000,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.36.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527f3da450ea1f09f95155dba6153bd0d83fe0923344a12e1944dfa5d0b32064" +checksum = "78a2a06ff89176123945d1bbe865603c4d7101bea216a550bb4d2e4e9ba74d74" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1018,9 +1022,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.35.0" +version = "1.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94316606a4aa2cb7a302388411b8776b3fbd254e8506e2dc43918286d8212e9b" +checksum = "a20a91795850826a6f456f4a48eff1dfa59a0e69bdbf5b8c50518fd372106574" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1114,9 +1118,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.6.2" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce87155eba55e11768b8c1afa607f3e864ae82f03caf63258b37455b0ad02537" +checksum = "0abbf454960d0db2ad12684a1640120e7557294b0ff8e2f11236290a1b293225" dependencies = [ "aws-smithy-async", "aws-smithy-http", @@ -1141,9 +1145,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime-api" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30819352ed0a04ecf6a2f3477e344d2d1ba33d43e0f09ad9047c12e0d923616f" +checksum = "e086682a53d3aa241192aa110fa8dfce98f2f5ac2ead0de84d41582c7e8fdb96" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -1158,13 +1162,14 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.2.0" +version = "1.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfe321a6b21f5d8eabd0ade9c55d3d0335f3c3157fc2b3e87f05f34b539e4df5" +checksum = "273dcdfd762fae3e1650b8024624e7cd50e484e37abdab73a7a706188ad34543" dependencies = [ "base64-simd", "bytes", "bytes-utils", + "futures-core", "http 0.2.12", "http 1.1.0", "http-body 0.4.6", @@ -1177,6 +1182,8 @@ dependencies = [ "ryu", "serde", "time", + "tokio", + "tokio-util", ] [[package]] @@ -1263,7 +1270,7 @@ checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object", @@ -1513,24 +1520,6 @@ dependencies = [ "tower", ] -[[package]] -name = "camino" -version = "1.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" -dependencies = [ - "serde", -] - -[[package]] -name = "cargo-platform" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" -dependencies = [ - "serde", -] - [[package]] name = "cargo-scaffold" version = "0.14.0" @@ -1553,19 +1542,6 @@ dependencies = [ "walkdir", ] -[[package]] -name = "cargo_metadata" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" -dependencies = [ - "camino", - "cargo-platform", - "semver 1.0.23", - "serde", - "serde_json", -] - [[package]] name = "cast" version = "0.3.0" @@ -1582,12 +1558,6 @@ dependencies = [ "libc", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -1726,7 +1696,7 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ - "crossbeam-utils 0.8.20", + "crossbeam-utils", ] [[package]] @@ -1763,7 +1733,7 @@ checksum = "7481d4c57092cd1c19dd541b92bdce883de840df30aa5d03fd48a3935c01842e" dependencies = [ "console-api", "crossbeam-channel", - "crossbeam-utils 0.8.20", + "crossbeam-utils", "futures-task", "hdrhistogram", "humantime", @@ -1906,7 +1876,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1953,7 +1923,7 @@ version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ - "crossbeam-utils 0.8.20", + "crossbeam-utils", ] [[package]] @@ -1962,23 +1932,8 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "crossbeam-epoch 0.9.18", - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", + "crossbeam-epoch", + "crossbeam-utils", ] [[package]] @@ -1987,18 +1942,7 @@ version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "lazy_static", + "crossbeam-utils", ] [[package]] @@ -2096,7 +2040,7 @@ version = "5.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "hashbrown 0.14.5", "lock_api", "once_cell", @@ -2508,7 +2452,7 @@ version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -2571,15 +2515,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "error-chain" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" -dependencies = [ - "version_check", -] - [[package]] name = "escape8259" version = "0.5.3" @@ -2635,7 +2570,6 @@ dependencies = [ "futures", "lazy_static", "log", - "moka", "rand 0.8.5", "serde_json", "tokio", @@ -2711,7 +2645,7 @@ version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "redox_syscall 0.4.1", "windows-sys 0.52.0", @@ -3034,7 +2968,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -3045,7 +2979,7 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -3209,7 +3143,7 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crunchy", ] @@ -3679,7 +3613,7 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -4115,15 +4049,6 @@ dependencies = [ "linked-hash-map", ] -[[package]] -name = "mach" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" -dependencies = [ - "libc", -] - [[package]] name = "maplit" version = "1.0.2" @@ -4151,12 +4076,6 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "md5" version = "0.7.0" @@ -4175,15 +4094,6 @@ version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" -[[package]] -name = "memoffset" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" -dependencies = [ - "autocfg", -] - [[package]] name = "memoffset" version = "0.9.1" @@ -4258,7 +4168,7 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "downcast", "fragile", "lazy_static", @@ -4273,37 +4183,12 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "proc-macro2", "quote", "syn 1.0.109", ] -[[package]] -name = "moka" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "975fa04238144061e7f8df9746b2e9cd93ef85881da5548d842a7c6a4b614415" -dependencies = [ - "async-io 1.13.0", - "async-lock 2.8.0", - "crossbeam-channel", - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.8.20", - "futures-util", - "num_cpus", - "once_cell", - "parking_lot", - "quanta", - "scheduled-thread-pool", - "skeptic", - "smallvec", - "tagptr", - "thiserror", - "triomphe", - "uuid", -] - [[package]] name = "multer" version = "2.1.0" @@ -4899,7 +4784,7 @@ version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "redox_syscall 0.5.3", "smallvec", @@ -5100,7 +4985,7 @@ checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg", "bitflags 1.3.2", - "cfg-if 1.0.0", + "cfg-if", "concurrent-queue", "libc", "log", @@ -5114,7 +4999,7 @@ version = "3.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3ed00ed3fbf728b5816498ecd316d1716eecaced9c0c8d2c5a6740ca214985b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", @@ -5215,7 +5100,7 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fnv", "lazy_static", "memchr", @@ -5345,33 +5230,6 @@ version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" -[[package]] -name = "pulldown-cmark" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57206b407293d2bcd3af849ce869d52068623f19e1b5ff8e8778e3309439682b" -dependencies = [ - "bitflags 2.6.0", - "memchr", - "unicase", -] - -[[package]] -name = "quanta" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" -dependencies = [ - "crossbeam-utils 0.8.20", - "libc", - "mach", - "once_cell", - "raw-cpuid", - "wasi 0.10.2+wasi-snapshot-preview1", - "web-sys", - "winapi", -] - [[package]] name = "quick-error" version = "1.2.3" @@ -5458,15 +5316,6 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "raw-cpuid" -version = "10.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "rayon" version = "1.10.0" @@ -5484,7 +5333,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ "crossbeam-deque", - "crossbeam-utils 0.8.20", + "crossbeam-utils", ] [[package]] @@ -5748,7 +5597,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", - "cfg-if 1.0.0", + "cfg-if", "getrandom 0.2.15", "libc", "spin", @@ -5781,9 +5630,9 @@ dependencies = [ [[package]] name = "router-bridge" -version = "0.6.0+v2.9.0" +version = "0.6.1+v2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ef4910ade6753863c8437a76e88e236ab91688dcfe739d73417ae7848f3b92" +checksum = "b3be2d3ebbcbceb19dc813f5cee507295c673bb4e3f7a7cd532c8c27c95f92fc" dependencies = [ "anyhow", "async-channel 1.9.0", @@ -5835,7 +5684,7 @@ checksum = "32a58fa8a7ccff2aec4f39cc45bf5f985cec7125ab271cf681c279fd00192b49" dependencies = [ "countme", "hashbrown 0.14.5", - "memoffset 0.9.1", + "memoffset", "rustc-hash", "text-size", ] @@ -5846,7 +5695,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7df9d3ebd4f17b52e6134efe2fa20021c80688cbe823d481a729a993b730493" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "dw", "lazy_static", "libc", @@ -6027,15 +5876,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "scheduled-thread-pool" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" -dependencies = [ - "parking_lot", -] - [[package]] name = "schemars" version = "0.8.21" @@ -6134,9 +5974,6 @@ name = "semver" version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" -dependencies = [ - "serde", -] [[package]] name = "semver-parser" @@ -6324,7 +6161,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest", ] @@ -6335,7 +6172,7 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest", ] @@ -6407,21 +6244,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" -[[package]] -name = "skeptic" -version = "0.13.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" -dependencies = [ - "bytecount", - "cargo_metadata", - "error-chain", - "glob", - "pulldown-cmark", - "tempfile", - "walkdir", -] - [[package]] name = "slab" version = "0.4.9" @@ -6650,19 +6472,13 @@ dependencies = [ "libc", ] -[[package]] -name = "tagptr" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" - [[package]] name = "tempfile" version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fastrand 2.1.0", "rustix 0.38.34", "windows-sys 0.52.0", @@ -6793,7 +6609,7 @@ version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", ] @@ -7385,7 +7201,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3119112651c157f4488931a01e586aa459736e9d6046d3bd9105ffb69352d374" dependencies = [ "async-trait", - "cfg-if 1.0.0", + "cfg-if", "data-encoding", "enum-as-inner", "futures-channel", @@ -7409,7 +7225,7 @@ version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10a3e6c3aff1718b3c73e395d1f35202ba2ffa847c6a62eea0db8fb4cfe30be6" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "futures-util", "ipconfig", "lru-cache", @@ -7763,12 +7579,6 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -7781,7 +7591,7 @@ version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] @@ -7806,7 +7616,7 @@ version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -8148,7 +7958,7 @@ version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-sys 0.48.0", ] diff --git a/Cargo.toml b/Cargo.toml index 2d5abeb92d..b332785452 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,9 +49,9 @@ debug = 1 # Dependencies used in more than one place are specified here in order to keep versions in sync: # https://doc.rust-lang.org/cargo/reference/workspaces.html#the-dependencies-table [workspace.dependencies] -apollo-compiler = "=1.0.0-beta.20" +apollo-compiler = "=1.0.0-beta.21" apollo-parser = "0.8.0" -apollo-smith = "0.10.0" +apollo-smith = "0.11.0" async-trait = "0.1.77" hex = { version = "0.4.3", features = ["serde"] } http = "0.2.11" @@ -75,4 +75,4 @@ serde_json_bytes = { version = "0.2.4", features = ["preserve_order"] } sha1 = "0.10.6" tempfile = "3.10.1" tokio = { version = "1.36.0", features = ["full"] } -tower = { version = "0.4.13", features = ["full"] } \ No newline at end of file +tower = { version = "0.4.13", features = ["full"] } diff --git a/apollo-federation/Cargo.toml b/apollo-federation/Cargo.toml index 220c25b371..c29f03a54a 100644 --- a/apollo-federation/Cargo.toml +++ b/apollo-federation/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-federation" -version = "1.53.0" +version = "1.54.0" authors = ["The Apollo GraphQL Contributors"] edition = "2021" description = "Apollo Federation" diff --git a/apollo-federation/src/api_schema.rs b/apollo-federation/src/api_schema.rs index 158453c64e..bf1b1316d7 100644 --- a/apollo-federation/src/api_schema.rs +++ b/apollo-federation/src/api_schema.rs @@ -39,48 +39,36 @@ fn remove_core_feature_elements(schema: &mut FederationSchema) -> Result<(), Fed for position in &types_for_removal { match position { position::TypeDefinitionPosition::Object(position) => { - let object = position.get(schema.schema())?; - let remove_children = object + let object = position.get(schema.schema())?.clone(); + object .fields .keys() .map(|field_name| position.field(field_name.clone())) - .collect::>(); - for child in remove_children { - child.remove(schema)?; - } + .try_for_each(|child| child.remove(schema))?; } position::TypeDefinitionPosition::Interface(position) => { - let interface = position.get(schema.schema())?; - let remove_children = interface + let interface = position.get(schema.schema())?.clone(); + interface .fields .keys() .map(|field_name| position.field(field_name.clone())) - .collect::>(); - for child in remove_children { - child.remove(schema)?; - } + .try_for_each(|child| child.remove(schema))?; } position::TypeDefinitionPosition::InputObject(position) => { - let input_object = position.get(schema.schema())?; - let remove_children = input_object + let input_object = position.get(schema.schema())?.clone(); + input_object .fields .keys() .map(|field_name| position.field(field_name.clone())) - .collect::>(); - for child in remove_children { - child.remove(schema)?; - } + .try_for_each(|child| child.remove(schema))?; } position::TypeDefinitionPosition::Enum(position) => { - let enum_ = position.get(schema.schema())?; - let remove_children = enum_ + let enum_ = position.get(schema.schema())?.clone(); + enum_ .values .keys() .map(|field_name| position.value(field_name.clone())) - .collect::>(); - for child in remove_children { - child.remove(schema)?; - } + .try_for_each(|child| child.remove(schema))?; } _ => {} } diff --git a/apollo-federation/src/error/mod.rs b/apollo-federation/src/error/mod.rs index 555d1a4339..9e4487c30f 100644 --- a/apollo-federation/src/error/mod.rs +++ b/apollo-federation/src/error/mod.rs @@ -4,9 +4,11 @@ use std::fmt::Display; use std::fmt::Formatter; use std::fmt::Write; +use apollo_compiler::executable::GetOperationError; use apollo_compiler::validation::DiagnosticList; use apollo_compiler::validation::WithErrors; use apollo_compiler::InvalidNameError; +use apollo_compiler::Name; use lazy_static::lazy_static; use crate::subgraph::spec::FederationSpecError; @@ -56,6 +58,10 @@ pub enum SingleFederationError { InvalidGraphQLName(#[from] InvalidNameError), #[error("Subgraph invalid: {message}")] InvalidSubgraph { message: String }, + #[error("Operation name not found")] + UnknownOperation, + #[error("Unsupported custom directive @{name} on fragment spread. Due to query transformations during planning, the router requires directives on fragment spreads to support both the FRAGMENT_SPREAD and INLINE_FRAGMENT locations.")] + UnsupportedSpreadDirective { name: Name }, #[error("{message}")] DirectiveDefinitionInvalid { message: String }, #[error("{message}")] @@ -225,6 +231,12 @@ impl SingleFederationError { SingleFederationError::InvalidGraphQL { .. } | SingleFederationError::InvalidGraphQLName(_) => ErrorCode::InvalidGraphQL, SingleFederationError::InvalidSubgraph { .. } => ErrorCode::InvalidGraphQL, + // TODO(@goto-bus-stop): this should have a different error code: it's not the graphql + // that's invalid, but the operation name + SingleFederationError::UnknownOperation => ErrorCode::InvalidGraphQL, + // TODO(@goto-bus-stop): this should have a different error code: it's not invalid, + // just unsupported due to internal limitations. + SingleFederationError::UnsupportedSpreadDirective { .. } => ErrorCode::InvalidGraphQL, SingleFederationError::DirectiveDefinitionInvalid { .. } => { ErrorCode::DirectiveDefinitionInvalid } @@ -406,6 +418,12 @@ impl From for FederationError { } } +impl From for FederationError { + fn from(_: GetOperationError) -> Self { + SingleFederationError::UnknownOperation.into() + } +} + impl From for FederationError { fn from(err: FederationSpecError) -> Self { // TODO: When we get around to finishing the composition port, we should really switch it to diff --git a/apollo-federation/src/operation/merging.rs b/apollo-federation/src/operation/merging.rs index 4c2b31cbd3..c938f2e564 100644 --- a/apollo-federation/src/operation/merging.rs +++ b/apollo-federation/src/operation/merging.rs @@ -1,14 +1,13 @@ //! Provides methods for recursively merging selections and selection sets. use std::sync::Arc; -use apollo_compiler::collections::IndexMap; +use selection_map::SelectionMap; use super::selection_map; use super::FieldSelection; use super::FieldSelectionValue; use super::FragmentSpreadSelection; use super::FragmentSpreadSelectionValue; -use super::HasSelectionKey as _; use super::InlineFragmentSelection; use super::InlineFragmentSelectionValue; use super::NamedFragments; @@ -16,6 +15,8 @@ use super::Selection; use super::SelectionSet; use super::SelectionValue; use crate::error::FederationError; +use crate::operation::HasSelectionKey; +use crate::schema::position::CompositeTypeDefinitionPosition; impl<'a> FieldSelectionValue<'a> { /// Merges the given field selections into this one. @@ -28,43 +29,38 @@ impl<'a> FieldSelectionValue<'a> { /// Returns an error if: /// - The parent type or schema of any selection does not match `self`'s. /// - Any selection does not select the same field position as `self`. - fn merge_into<'op>( - &mut self, - others: impl Iterator, - ) -> Result<(), FederationError> { + fn merge_into(&mut self, other: &FieldSelection) -> Result<(), FederationError> { let self_field = &self.get().field; - let mut selection_sets = vec![]; - for other in others { - let other_field = &other.field; - if other_field.schema != self_field.schema { - return Err(FederationError::internal( - "Cannot merge field selections from different schemas", - )); - } - if other_field.field_position != self_field.field_position { - return Err(FederationError::internal(format!( + let mut selection_set = None; + let other_field = &other.field; + if other_field.schema != self_field.schema { + return Err(FederationError::internal( + "Cannot merge field selections from different schemas", + )); + } + if other_field.field_position != self_field.field_position { + return Err(FederationError::internal(format!( "Cannot merge field selection for field \"{}\" into a field selection for field \"{}\"", other_field.field_position, self_field.field_position, ))); - } - if self.get().selection_set.is_some() { - let Some(other_selection_set) = &other.selection_set else { - return Err(FederationError::internal(format!( - "Field \"{}\" has composite type but not a selection set", - other_field.field_position, - ))); - }; - selection_sets.push(other_selection_set); - } else if other.selection_set.is_some() { + } + if self.get().selection_set.is_some() { + let Some(other_selection_set) = &other.selection_set else { return Err(FederationError::internal(format!( - "Field \"{}\" has non-composite type but also has a selection set", + "Field \"{}\" has composite type but not a selection set", other_field.field_position, ))); - } + }; + selection_set = Some(other_selection_set); + } else if other.selection_set.is_some() { + return Err(FederationError::internal(format!( + "Field \"{}\" has non-composite type but also has a selection set", + other_field.field_position, + ))); } if let Some(self_selection_set) = self.get_selection_set_mut() { - self_selection_set.merge_into(selection_sets.into_iter())?; + self_selection_set.merge_into(selection_set.into_iter())?; } Ok(()) } @@ -79,35 +75,26 @@ impl<'a> InlineFragmentSelectionValue<'a> { /// /// # Errors /// Returns an error if the parent type or schema of any selection does not match `self`'s. - fn merge_into<'op>( - &mut self, - others: impl Iterator, - ) -> Result<(), FederationError> { + fn merge_into(&mut self, other: &InlineFragmentSelection) -> Result<(), FederationError> { let self_inline_fragment = &self.get().inline_fragment; - let mut selection_sets = vec![]; - for other in others { - let other_inline_fragment = &other.inline_fragment; - if other_inline_fragment.schema != self_inline_fragment.schema { - return Err(FederationError::internal( - "Cannot merge inline fragment from different schemas", - )); - } - if other_inline_fragment.parent_type_position - != self_inline_fragment.parent_type_position - { - return Err(FederationError::internal( - format!( - "Cannot merge inline fragment of parent type \"{}\" into an inline fragment of parent type \"{}\"", - other_inline_fragment.parent_type_position, - self_inline_fragment.parent_type_position, - ), - )); - } - selection_sets.push(&other.selection_set); + let other_inline_fragment = &other.inline_fragment; + if other_inline_fragment.schema != self_inline_fragment.schema { + return Err(FederationError::internal( + "Cannot merge inline fragment from different schemas", + )); + } + if other_inline_fragment.parent_type_position != self_inline_fragment.parent_type_position { + return Err(FederationError::internal( + format!( + "Cannot merge inline fragment of parent type \"{}\" into an inline fragment of parent type \"{}\"", + other_inline_fragment.parent_type_position, + self_inline_fragment.parent_type_position, + ), + )); } + self.get_selection_set_mut() - .merge_into(selection_sets.into_iter())?; - Ok(()) + .merge_into(std::iter::once(&other.selection_set)) } } @@ -120,24 +107,19 @@ impl<'a> FragmentSpreadSelectionValue<'a> { /// /// # Errors /// Returns an error if the parent type or schema of any selection does not match `self`'s. - fn merge_into<'op>( - &mut self, - others: impl Iterator, - ) -> Result<(), FederationError> { + fn merge_into(&mut self, other: &FragmentSpreadSelection) -> Result<(), FederationError> { let self_fragment_spread = &self.get().spread; - for other in others { - let other_fragment_spread = &other.spread; - if other_fragment_spread.schema != self_fragment_spread.schema { - return Err(FederationError::internal( - "Cannot merge fragment spread from different schemas", - )); - } - // Nothing to do since the fragment spread is already part of the selection set. - // Fragment spreads are uniquely identified by fragment name and applied directives. - // Since there is already an entry for the same fragment spread, there is no point - // in attempting to merge its sub-selections, as the underlying entry should be - // exactly the same as the currently processed one. + let other_fragment_spread = &other.spread; + if other_fragment_spread.schema != self_fragment_spread.schema { + return Err(FederationError::internal( + "Cannot merge fragment spread from different schemas", + )); } + // Nothing to do since the fragment spread is already part of the selection set. + // Fragment spreads are uniquely identified by fragment name and applied directives. + // Since there is already an entry for the same fragment spread, there is no point + // in attempting to merge its sub-selections, as the underlying entry should be + // exactly the same as the currently processed one. Ok(()) } } @@ -173,68 +155,65 @@ impl SelectionSet { } selections_to_merge.extend(other.selections.values()); } - self.merge_selections_into(selections_to_merge.into_iter()) + self.merge_selections_into(selections_to_merge.into_iter(), false) } /// NOTE: This is a private API and should be used with care, use `add_selection` instead. /// /// A helper function for merging the given selections into this one. /// + /// The `do_fragment_inlining` flag enables a check to see if any inline fragments yielded from + /// `others` can be recursively merged into the selection set instead of just merging in the + /// fragment. This requires that the fragment has no directives and either has no type + /// condition or the type condition matches this selection set's type position. + /// /// # Errors /// Returns an error if the parent type or schema of any selection does not match `self`'s. /// /// Returns an error if any selection contains invalid GraphQL that prevents the merge. + #[allow(unreachable_code)] pub(super) fn merge_selections_into<'op>( &mut self, - others: impl Iterator, + mut others: impl Iterator, + do_fragment_inlining: bool, ) -> Result<(), FederationError> { - let mut fields = IndexMap::default(); - let mut fragment_spreads = IndexMap::default(); - let mut inline_fragments = IndexMap::default(); - let target = Arc::make_mut(&mut self.selections); - for other_selection in others { - let other_key = other_selection.key(); - match target.entry(other_key.clone()) { - selection_map::Entry::Occupied(existing) => match existing.get() { - Selection::Field(self_field_selection) => { - let Selection::Field(other_field_selection) = other_selection else { - return Err(FederationError::internal( - format!( - "Field selection key for field \"{}\" references non-field selection", - self_field_selection.field.field_position, - ), - )); + fn insert_selection( + target: &mut SelectionMap, + selection: &Selection, + ) -> Result<(), FederationError> { + match target.entry(selection.key()) { + selection_map::Entry::Vacant(vacant) => { + vacant.insert(selection.clone())?; + Ok(()) + } + selection_map::Entry::Occupied(mut entry) => match entry.get_mut() { + SelectionValue::Field(mut field) => { + let Selection::Field(other_field) = selection else { + return Err(FederationError::internal(format!( + "Field selection key for field \"{}\" references non-field selection", + field.get().field.field_position, + ))); }; - fields - .entry(other_key) - .or_insert_with(Vec::new) - .push(other_field_selection); + field.merge_into(other_field) } - Selection::FragmentSpread(self_fragment_spread_selection) => { - let Selection::FragmentSpread(other_fragment_spread_selection) = - other_selection - else { + SelectionValue::FragmentSpread(mut spread) => { + let Selection::FragmentSpread(other_spread) = selection else { return Err(FederationError::internal( format!( "Fragment spread selection key for fragment \"{}\" references non-field selection", - self_fragment_spread_selection.spread.fragment_name, + spread.get().spread.fragment_name, ), )); }; - fragment_spreads - .entry(other_key) - .or_insert_with(Vec::new) - .push(other_fragment_spread_selection); + spread.merge_into(other_spread) } - Selection::InlineFragment(self_inline_fragment_selection) => { - let Selection::InlineFragment(other_inline_fragment_selection) = - other_selection - else { + SelectionValue::InlineFragment(mut inline) => { + let Selection::InlineFragment(other_inline) = selection else { return Err(FederationError::internal( format!( "Inline fragment selection key under parent type \"{}\" {}references non-field selection", - self_inline_fragment_selection.inline_fragment.parent_type_position, - self_inline_fragment_selection.inline_fragment.type_condition_position.clone() + inline.get().inline_fragment.parent_type_position, + inline.get().inline_fragment.type_condition_position.clone() .map_or_else( String::new, |cond| format!("(type condition: {}) ", cond), @@ -242,53 +221,36 @@ impl SelectionSet { ), )); }; - inline_fragments - .entry(other_key) - .or_insert_with(Vec::new) - .push(other_inline_fragment_selection); + inline.merge_into(other_inline) } }, - selection_map::Entry::Vacant(vacant) => { - vacant.insert(other_selection.clone())?; - } } } - for (key, self_selection) in target.iter_mut() { - match self_selection { - SelectionValue::Field(mut self_field_selection) => { - if let Some(other_field_selections) = fields.shift_remove(key) { - self_field_selection.merge_into( - other_field_selections.iter().map(|selection| &***selection), - )?; - } - } - SelectionValue::FragmentSpread(mut self_fragment_spread_selection) => { - if let Some(other_fragment_spread_selections) = - fragment_spreads.shift_remove(key) - { - self_fragment_spread_selection.merge_into( - other_fragment_spread_selections - .iter() - .map(|selection| &***selection), - )?; - } - } - SelectionValue::InlineFragment(mut self_inline_fragment_selection) => { - if let Some(other_inline_fragment_selections) = - inline_fragments.shift_remove(key) - { - self_inline_fragment_selection.merge_into( - other_inline_fragment_selections - .iter() - .map(|selection| &***selection), - )?; + let target = Arc::make_mut(&mut self.selections); + + if do_fragment_inlining { + fn recurse_on_inline_fragment<'a>( + target: &mut SelectionMap, + type_pos: &CompositeTypeDefinitionPosition, + mut others: impl Iterator, + ) -> Result<(), FederationError> { + others.try_for_each(|selection| match selection { + Selection::InlineFragment(inline) if inline.is_unnecessary(type_pos) => { + recurse_on_inline_fragment( + target, + type_pos, + inline.selection_set.selections.values(), + ) } - } + selection => insert_selection(target, selection), + }) } - } - Ok(()) + recurse_on_inline_fragment(target, &self.type_position, others) + } else { + others.try_for_each(|selection| insert_selection(target, selection)) + } } /// Inserts a `Selection` into the inner map. Should a selection with the same key already @@ -305,13 +267,14 @@ impl SelectionSet { pub(crate) fn add_local_selection( &mut self, selection: &Selection, + do_fragment_inlining: bool, ) -> Result<(), FederationError> { debug_assert_eq!( &self.schema, selection.schema(), "In order to add selection it needs to point to the same schema" ); - self.merge_selections_into(std::iter::once(selection)) + self.merge_selections_into(std::iter::once(selection), do_fragment_inlining) } /// Inserts a `SelectionSet` into the inner map. Should any sub selection with the same key already diff --git a/apollo-federation/src/operation/mod.rs b/apollo-federation/src/operation/mod.rs index 71b4aece8b..4c17083c3b 100644 --- a/apollo-federation/src/operation/mod.rs +++ b/apollo-federation/src/operation/mod.rs @@ -20,6 +20,7 @@ use std::ops::Deref; use std::sync::atomic; use std::sync::Arc; +use apollo_compiler::collections::HashSet; use apollo_compiler::collections::IndexMap; use apollo_compiler::collections::IndexSet; use apollo_compiler::executable; @@ -27,6 +28,7 @@ use apollo_compiler::name; use apollo_compiler::validation::Valid; use apollo_compiler::Name; use apollo_compiler::Node; +use itertools::Itertools; use serde::Serialize; use crate::compat::coerce_executable_values; @@ -45,6 +47,7 @@ use crate::schema::position::FieldDefinitionPosition; use crate::schema::position::InterfaceTypeDefinitionPosition; use crate::schema::position::SchemaRootDefinitionKind; use crate::schema::ValidFederationSchema; +use crate::utils::FallibleIterator; mod contains; mod directive_list; @@ -670,8 +673,8 @@ mod selection_map { if *self.key() != value.key() { return Err(Internal { message: format!( - "Key mismatch when inserting selection {} into vacant entry ", - value + "Key mismatch when inserting selection `{value}` into vacant entry. Expected {:?}, found {:?}", + self.key(), value.key() ), } .into()); @@ -786,6 +789,7 @@ impl Selection { pub(crate) fn from_element( element: OpPathElement, sub_selections: Option, + unnecessary_directives: Option<&HashSet>>, ) -> Result { // PORT_NOTE: This is TODO item is copied from the JS `selectionOfElement` function. // TODO: validate that the subSelection is ok for the element @@ -797,7 +801,21 @@ impl Selection { "unexpected inline fragment without sub-selections", )); }; - Ok(InlineFragmentSelection::new(inline_fragment, sub_selections).into()) + if let Some(unnecessary_directives) = unnecessary_directives { + let directives = inline_fragment + .directives + .iter() + .filter(|dir| !unnecessary_directives.contains(dir.as_ref())) + .cloned() + .collect::(); + Ok(InlineFragmentSelection::new( + inline_fragment.with_updated_directives(directives), + sub_selections, + ) + .into()) + } else { + Ok(InlineFragmentSelection::new(inline_fragment, sub_selections).into()) + } } } } @@ -894,10 +912,6 @@ impl Selection { } } - fn sub_selection_type_position(&self) -> Option { - Some(self.selection_set()?.type_position.clone()) - } - pub(crate) fn conditions(&self) -> Result { let self_conditions = Conditions::from_directives(self.directives())?; if let Conditions::Boolean(false) = self_conditions { @@ -1672,6 +1686,16 @@ mod inline_fragment_selection { selection_set: self.selection_set.clone(), } } + pub(crate) fn with_updated_directives_and_selection_set( + &self, + directives: impl Into, + selection_set: SelectionSet, + ) -> Self { + Self { + inline_fragment: self.inline_fragment.with_updated_directives(directives), + selection_set, + } + } } impl HasSelectionKey for InlineFragmentSelection { @@ -1907,7 +1931,7 @@ impl SelectionSet { .flat_map(SelectionSet::split_top_level_fields) .filter_map(move |set| { let parent_type = ele.parent_type_position(); - Selection::from_element(ele.clone(), Some(set)) + Selection::from_element(ele.clone(), Some(set), None) .ok() .map(|sel| SelectionSet::from_selection(parent_type, sel)) }), @@ -2027,7 +2051,7 @@ impl SelectionSet { type_position, selections: Arc::new(SelectionMap::new()), }; - merged.merge_selections_into(normalized_selections.iter())?; + merged.merge_selections_into(normalized_selections.iter(), false)?; Ok(merged) } @@ -2124,7 +2148,7 @@ impl SelectionSet { type_position: self.type_position.clone(), selections: Arc::new(SelectionMap::new()), }; - expanded.merge_selections_into(expanded_selections.iter())?; + expanded.merge_selections_into(expanded_selections.iter(), false)?; Ok(expanded) } @@ -2405,16 +2429,14 @@ impl SelectionSet { selection_key_groups: impl Iterator>, named_fragments: &NamedFragments, ) -> Result { - let mut result = SelectionMap::new(); - for group in selection_key_groups { - let selection = Self::make_selection(schema, parent_type, group, named_fragments)?; - result.insert(selection); - } - Ok(SelectionSet { - schema: schema.clone(), - type_position: parent_type.clone(), - selections: Arc::new(result), - }) + selection_key_groups + .map(|group| Self::make_selection(schema, parent_type, group, named_fragments)) + .try_collect() + .map(|result| SelectionSet { + schema: schema.clone(), + type_position: parent_type.clone(), + selections: Arc::new(result), + }) } // PORT_NOTE: Some features of the TypeScript `lazyMap` were not ported: @@ -2471,6 +2493,7 @@ impl SelectionSet { // Now update the rest of the selections using the `mapper` function. update_new_selection(first_changed); + for selection in iter { update_new_selection(mapper(selection)?) } @@ -2499,19 +2522,27 @@ impl SelectionSet { sibling_typename.alias().cloned(), ); let typename_selection = - Selection::from_element(field_element.into(), /*subselection*/ None)?; + Selection::from_element(field_element.into(), /*subselection*/ None, None)?; Ok([typename_selection, updated].into_iter().collect()) }) } + /// Adds __typename field for selection sets on abstract types. + /// + /// __typename is added to the sub selection set of a given selection in following conditions + /// * if a given selection is a field, we add a __typename sub selection if its selection set type + /// position is an abstract type + /// * if a given selection is a fragment, we only add __typename sub selection if fragment specifies + /// type condition and that type condition is an abstract type. pub(crate) fn add_typename_field_for_abstract_types( &self, parent_type_if_abstract: Option, ) -> Result { let mut selection_map = SelectionMap::new(); if let Some(parent) = parent_type_if_abstract { - // XXX(@goto-bus-stop): if the selection set has an *alias* named __typename for some - // other field, this doesn't work right. is that allowed? + // We don't handle aliased __typename fields. This means we may end up with additional + // __typename sub selection. This should be fine though as aliased __typenames should + // be rare occurrence. if !self.has_top_level_typename_field() { let typename_selection = Selection::from_field( Field::new_introspection_typename(&self.schema, &parent.into(), None), @@ -2522,11 +2553,24 @@ impl SelectionSet { } for selection in self.selections.values() { selection_map.insert(if let Some(selection_set) = selection.selection_set() { - let type_if_abstract = selection - .sub_selection_type_position() - .and_then(|ty| ty.try_into().ok()); + let abstract_type = match selection { + Selection::Field(field_selection) => field_selection + .selection_set + .as_ref() + .map(|s| s.type_position.clone()), + Selection::FragmentSpread(fragment_selection) => { + Some(fragment_selection.spread.type_condition_position.clone()) + } + Selection::InlineFragment(inline_fragment_selection) => { + inline_fragment_selection + .inline_fragment + .type_condition_position + .clone() + } + } + .and_then(|ty| ty.try_into().ok()); let updated_selection_set = - selection_set.add_typename_field_for_abstract_types(type_if_abstract)?; + selection_set.add_typename_field_for_abstract_types(abstract_type)?; if updated_selection_set == *selection_set { selection.clone() @@ -2582,6 +2626,16 @@ impl SelectionSet { &mut self, path: &[Arc], selection_set: Option<&Arc>, + ) -> Result<(), FederationError> { + let mut unnecessary_directives = HashSet::default(); + self.add_at_path_inner(path, selection_set, &mut unnecessary_directives) + } + + fn add_at_path_inner( + &mut self, + path: &[Arc], + selection_set: Option<&Arc>, + unnecessary_directives: &mut HashSet>, ) -> Result<(), FederationError> { // PORT_NOTE: This method was ported from the JS class `SelectionSetUpdates`. Unlike the // JS code, this mutates the selection set map in-place. @@ -2592,28 +2646,39 @@ impl SelectionSet { let Some(sub_selection_type) = element.sub_selection_type_position()? else { return Err(FederationError::internal("unexpected error: add_at_path encountered a field that is not of a composite type".to_string())); }; - let mut selection = Arc::make_mut(&mut self.selections) - .entry(ele.key()) - .or_insert(|| { - Selection::from_element( + let target = Arc::make_mut(&mut self.selections); + let mut selection = match target.get_mut(&ele.key()) { + Some(selection) => selection, + None => { + let selection = Selection::from_element( element, // We immediately add a selection afterward to make this selection set // valid. Some(SelectionSet::empty(self.schema.clone(), sub_selection_type)), - ) - })?; + Some(&*unnecessary_directives), + )?; + target.entry(selection.key()).or_insert(|| Ok(selection))? + } + }; + unnecessary_directives.extend( + selection + .get_directives_mut() + .iter() + .filter(|d| d.name == "include" || d.name == "skip") + .cloned(), + ); match &mut selection { SelectionValue::Field(field) => match field.get_selection_set_mut() { - Some(sub_selection) => sub_selection.add_at_path(path, selection_set)?, + Some(sub_selection) => sub_selection.add_at_path_inner(path, selection_set, unnecessary_directives), None => return Err(FederationError::internal("add_at_path encountered a field without a subselection which should never happen".to_string())), }, SelectionValue::InlineFragment(fragment) => fragment .get_selection_set_mut() - .add_at_path(path, selection_set)?, + .add_at_path_inner(path, selection_set, unnecessary_directives), SelectionValue::FragmentSpread(_fragment) => { - return Err(FederationError::internal("add_at_path encountered a named fragment spread which should never happen".to_string())); + return Err(FederationError::internal("add_at_path encountered a named fragment spread which should never happen".to_string())) } - }; + }?; } // If we have no sub-path, we can add the selection. Some((ele, &[])) => { @@ -2632,8 +2697,9 @@ impl SelectionSet { return Ok(()); } else { // add leaf - let selection = Selection::from_element(element, None)?; - self.add_local_selection(&selection)? + let selection = + Selection::from_element(element, None, Some(&*unnecessary_directives))?; + self.add_local_selection(&selection, true)? } } else { let selection_set = selection_set @@ -2648,8 +2714,12 @@ impl SelectionSet { }) .transpose()? .map(|selection_set| selection_set.without_unnecessary_fragments()); - let selection = Selection::from_element(element, selection_set)?; - self.add_local_selection(&selection)? + let selection = Selection::from_element( + element, + selection_set, + Some(&*unnecessary_directives), + )?; + self.add_local_selection(&selection, true)?; } } // If we don't have any path, we rebase and merge in the given sub selections at the root. @@ -2854,13 +2924,10 @@ impl SelectionSet { if self.selections.is_empty() { Err(FederationError::internal("Invalid empty selection set")) } else { - for selection in self.selections.values() { - if let Some(s) = selection.selection_set() { - s.validate(_variable_definitions)?; - } - } - - Ok(()) + self.selections + .values() + .filter_map(|selection| selection.selection_set()) + .try_for_each(|s| s.validate(_variable_definitions)) } } @@ -2912,12 +2979,9 @@ impl SelectionSet { &self, predicate: &mut impl FnMut(OpPathElement) -> Result, ) -> Result { - for selection in self.selections.values() { - if selection.any_element(self.type_position.clone(), predicate)? { - return Ok(true); - } - } - Ok(false) + self.selections + .values() + .fallible_any(|selection| selection.any_element(self.type_position.clone(), predicate)) } } @@ -3123,13 +3187,12 @@ fn compute_aliases_for_non_merging_fields( } } - for selections in seen_response_names.into_values() { - if let Some(selections) = selections.selections { - compute_aliases_for_non_merging_fields(selections, alias_collector, schema)?; - } - } - - Ok(()) + seen_response_names + .into_values() + .filter_map(|selections| selections.selections) + .try_for_each(|selections| { + compute_aliases_for_non_merging_fields(selections, alias_collector, schema) + }) } fn gen_alias_name(base_name: &Name, unavailable_names: &IndexMap) -> Name { @@ -3322,6 +3385,25 @@ impl InlineFragmentSelection { parent_type_position: CompositeTypeDefinitionPosition, fragment_spread_selection: &Arc, ) -> Result { + let schema = fragment_spread_selection.spread.schema.schema(); + for directive in fragment_spread_selection.spread.directives.iter() { + let Some(definition) = schema.directive_definitions.get(&directive.name) else { + return Err(FederationError::internal(format!( + "Undefined directive {}", + directive.name + ))); + }; + if !definition + .locations + .contains(&apollo_compiler::schema::DirectiveLocation::InlineFragment) + { + return Err(SingleFederationError::UnsupportedSpreadDirective { + name: directive.name.clone(), + } + .into()); + } + } + // Note: We assume that fragment_spread_selection.spread.type_condition_position is the same as // fragment_spread_selection.selection_set.type_position. Ok(InlineFragmentSelection::new( diff --git a/apollo-federation/src/operation/optimize.rs b/apollo-federation/src/operation/optimize.rs index 2bd4262e88..ce2802bf4e 100644 --- a/apollo-federation/src/operation/optimize.rs +++ b/apollo-federation/src/operation/optimize.rs @@ -1007,7 +1007,7 @@ impl SelectionSet { &fragment, /*directives*/ &Default::default(), ); - optimized.add_local_selection(&fragment_selection.into())?; + optimized.add_local_selection(&fragment_selection.into(), false)?; } optimized.add_local_selection_set(¬_covered_so_far)?; @@ -1697,17 +1697,21 @@ impl FragmentGenerator { self.visit_selection_set(selection_set)?; } new_selection_set - .add_local_selection(&Selection::Field(Arc::clone(field.get())))?; + .add_local_selection(&Selection::Field(Arc::clone(field.get())), false)?; } SelectionValue::FragmentSpread(frag) => { - new_selection_set - .add_local_selection(&Selection::FragmentSpread(Arc::clone(frag.get())))?; + new_selection_set.add_local_selection( + &Selection::FragmentSpread(Arc::clone(frag.get())), + false, + )?; } SelectionValue::InlineFragment(frag) if !Self::is_worth_using(&frag.get().selection_set) => { - new_selection_set - .add_local_selection(&Selection::InlineFragment(Arc::clone(frag.get())))?; + new_selection_set.add_local_selection( + &Selection::InlineFragment(Arc::clone(frag.get())), + false, + )?; } SelectionValue::InlineFragment(mut candidate) => { self.visit_selection_set(candidate.get_selection_set_mut())?; @@ -1725,9 +1729,10 @@ impl FragmentGenerator { // we can't just transfer them to the generated fragment spread, // so we have to keep this inline fragment. let Ok(skip_include) = skip_include else { - new_selection_set.add_local_selection(&Selection::InlineFragment( - Arc::clone(candidate.get()), - ))?; + new_selection_set.add_local_selection( + &Selection::InlineFragment(Arc::clone(candidate.get())), + false, + )?; continue; }; @@ -1736,9 +1741,10 @@ impl FragmentGenerator { // there's any directives on it. This code duplicates the body from the // previous condition so it's very easy to remove when we're ready :) if !skip_include.is_empty() { - new_selection_set.add_local_selection(&Selection::InlineFragment( - Arc::clone(candidate.get()), - ))?; + new_selection_set.add_local_selection( + &Selection::InlineFragment(Arc::clone(candidate.get())), + false, + )?; continue; } @@ -1763,8 +1769,8 @@ impl FragmentGenerator { }); self.fragments.get(&name).unwrap() }; - new_selection_set.add_local_selection(&Selection::from( - FragmentSpreadSelection { + new_selection_set.add_local_selection( + &Selection::from(FragmentSpreadSelection { spread: FragmentSpread::new(FragmentSpreadData { schema: selection_set.schema.clone(), fragment_name: existing.name.clone(), @@ -1774,8 +1780,9 @@ impl FragmentGenerator { selection_id: crate::operation::SelectionId::new(), }), selection_set: existing.selection_set.clone(), - }, - ))?; + }), + false, + )?; } } } @@ -2281,12 +2288,12 @@ mod tests { type Query { t: T } - + type T { a: A b: Int } - + type A { x: String y: String @@ -2312,14 +2319,14 @@ mod tests { x y } - + fragment FT on T { a { __typename ...FA } } - + query { t { ...FT @@ -2346,19 +2353,19 @@ mod tests { type Query { t: T } - + type T { a: String b: B c: Int d: D } - + type B { x: String y: String } - + type D { m: String n: String @@ -2376,7 +2383,7 @@ mod tests { m } } - + { t { ...FragT @@ -2415,23 +2422,23 @@ mod tests { type Query { i: I } - + interface I { a: String } - + type T implements I { a: String b: B c: Int d: D } - + type B { x: String y: String } - + type D { m: String n: String @@ -2449,7 +2456,7 @@ mod tests { m } } - + { i { ... on T { @@ -2489,19 +2496,19 @@ mod tests { type Query { t: T } - + type T { a: String b: B c: Int d: D } - + type B { x: String y: String } - + type D { m: String n: String @@ -2527,7 +2534,7 @@ mod tests { m } } - + fragment Frag2 on T { a b { @@ -2539,7 +2546,7 @@ mod tests { n } } - + { t { ...Frag1 @@ -2573,11 +2580,11 @@ mod tests { type Query { t: T } - + interface I { x: String } - + type T implements I { x: String a: String @@ -2591,7 +2598,7 @@ mod tests { a } } - + { t { ...FragI @@ -2615,12 +2622,12 @@ mod tests { type Query { t: T } - + type T { a: String u: U } - + type U { x: String y: String @@ -2631,7 +2638,7 @@ mod tests { fragment Frag1 on T { a } - + fragment Frag2 on T { u { x @@ -2639,13 +2646,13 @@ mod tests { } ...Frag1 } - + fragment Frag3 on Query { t { ...Frag2 } } - + { ...Frag3 } @@ -2670,16 +2677,16 @@ mod tests { type Query { t1: T1 } - + interface I { x: Int } - + type T1 implements I { x: Int y: Int } - + type T2 implements I { x: Int z: Int @@ -2695,7 +2702,7 @@ mod tests { z } } - + { t1 { ...FragOnI @@ -2718,24 +2725,24 @@ mod tests { type Query { i2: I2 } - + interface I1 { x: Int } - + interface I2 { y: Int } - + interface I3 { z: Int } - + type T1 implements I1 & I2 { x: Int y: Int } - + type T2 implements I1 & I3 { x: Int z: Int @@ -2751,7 +2758,7 @@ mod tests { z } } - + { i2 { ...FragOnI1 @@ -2781,13 +2788,13 @@ mod tests { type Query { t1: T1 } - + union U = T1 | T2 - + type T1 { x: Int } - + type T2 { y: Int } @@ -2802,7 +2809,7 @@ mod tests { y } } - + { t1 { ...OnU @@ -2912,18 +2919,18 @@ mod tests { type Query { t1: T1 } - + union U1 = T1 | T2 | T3 union U2 = T2 | T3 - + type T1 { x: Int } - + type T2 { y: Int } - + type T3 { z: Int } @@ -2935,7 +2942,7 @@ mod tests { ...Outer } } - + fragment Outer on U1 { ... on T1 { x @@ -2947,7 +2954,7 @@ mod tests { ... Inner } } - + fragment Inner on U2 { ... on T2 { y @@ -3006,23 +3013,23 @@ mod tests { type Query { t1: T1 } - + union U1 = T1 | T2 | T3 union U2 = T2 | T3 - + type T1 { x: Int } - + type T2 { y1: Y y2: Y } - + type T3 { z: Int } - + type Y { v: Int } @@ -3034,7 +3041,7 @@ mod tests { ...Outer } } - + fragment Outer on U1 { ... on T1 { x @@ -3046,7 +3053,7 @@ mod tests { ... Inner } } - + fragment Inner on U2 { ... on T2 { y1 { @@ -3057,7 +3064,7 @@ mod tests { } } } - + fragment WillBeUnused on Y { v } @@ -3092,14 +3099,14 @@ mod tests { t1: T t2: T } - + type T { a1: Int a2: Int b1: B b2: B } - + type B { x: Int y: Int @@ -3115,7 +3122,7 @@ mod tests { ...TFields } } - + fragment TFields on T { ...DirectFieldsOfT b1 { @@ -3125,12 +3132,12 @@ mod tests { ...BFields } } - + fragment DirectFieldsOfT on T { a1 a2 } - + fragment BFields on B { x y @@ -3205,66 +3212,6 @@ mod tests { /// applied directives /// - #[test] - #[should_panic(expected = "directive is not supported for FRAGMENT_DEFINITION")] - // XXX(@goto-bus-stop): this test does not make sense, we should remove this feature - fn reuse_fragments_with_same_directive_on_the_fragment() { - let schema_doc = r#" - type Query { - t1: T - t2: T - t3: T - } - - type T { - a: Int - b: Int - c: Int - d: Int - } - "#; - - let query = r#" - fragment DirectiveOnDef on T @include(if: $cond1) { - a - } - - query myQuery($cond1: Boolean!, $cond2: Boolean!) { - t1 { - ...DirectiveOnDef - } - t2 { - ... on T @include(if: $cond2) { - a - } - } - t3 { - ...DirectiveOnDef @include(if: $cond2) - } - } - "#; - - test_fragments_roundtrip!(schema_doc, query, @r###" - query myQuery($cond1: Boolean!, $cond2: Boolean!) { - t1 { - ... on T @include(if: $cond1) { - a - } - } - t2 { - ... on T @include(if: $cond2) { - a - } - } - t3 { - ... on T @include(if: $cond1) @include(if: $cond2) { - a - } - } - } - "###); - } - #[test] fn reuse_fragments_with_same_directive_in_the_fragment_selection() { let schema_doc = r#" @@ -3273,7 +3220,7 @@ mod tests { t2: T t3: T } - + type T { a: Int b: Int @@ -3286,7 +3233,7 @@ mod tests { fragment DirectiveInDef on T { a @include(if: $cond1) } - + query myQuery($cond1: Boolean!, $cond2: Boolean!) { t1 { a @@ -3323,7 +3270,7 @@ mod tests { t2: T t3: T } - + type T { a: Int b: Int @@ -3336,7 +3283,7 @@ mod tests { fragment NoDirectiveDef on T { a } - + query myQuery($cond1: Boolean!) { t1 { ...NoDirectiveDef diff --git a/apollo-federation/src/operation/rebase.rs b/apollo-federation/src/operation/rebase.rs index 99c770aa74..9ba4d08e58 100644 --- a/apollo-federation/src/operation/rebase.rs +++ b/apollo-federation/src/operation/rebase.rs @@ -26,6 +26,7 @@ use crate::error::FederationError; use crate::schema::position::CompositeTypeDefinitionPosition; use crate::schema::position::OutputTypeDefinitionPosition; use crate::schema::ValidFederationSchema; +use crate::utils::FallibleIterator; fn print_possible_runtimes( composite_type: &CompositeTypeDefinitionPosition, @@ -784,12 +785,9 @@ impl SelectionSet { parent_type: &CompositeTypeDefinitionPosition, schema: &ValidFederationSchema, ) -> Result { - for selection in self.selections.values() { - if !selection.can_add_to(parent_type, schema)? { - return Ok(false); - } - } - Ok(true) + self.selections + .values() + .fallible_all(|selection| selection.can_add_to(parent_type, schema)) } } diff --git a/apollo-federation/src/operation/simplify.rs b/apollo-federation/src/operation/simplify.rs index 8555b241a2..802bbfcab1 100644 --- a/apollo-federation/src/operation/simplify.rs +++ b/apollo-federation/src/operation/simplify.rs @@ -460,7 +460,7 @@ impl SelectionSet { { match selection_or_set { SelectionOrSet::Selection(normalized_selection) => { - normalized_selections.add_local_selection(&normalized_selection)?; + normalized_selections.add_local_selection(&normalized_selection, false)?; } SelectionOrSet::SelectionSet(normalized_set) => { // Since the `selection` has been expanded/lifted, we use diff --git a/apollo-federation/src/operation/tests/mod.rs b/apollo-federation/src/operation/tests/mod.rs index ed69e54d71..276b0a579f 100644 --- a/apollo-federation/src/operation/tests/mod.rs +++ b/apollo-federation/src/operation/tests/mod.rs @@ -10,6 +10,7 @@ use super::Operation; use super::Selection; use super::SelectionKey; use super::SelectionSet; +use crate::error::FederationError; use crate::query_graph::graph_path::OpPathElement; use crate::schema::position::InterfaceTypeDefinitionPosition; use crate::schema::position::ObjectTypeDefinitionPosition; @@ -41,6 +42,26 @@ pub(super) fn parse_operation(schema: &ValidFederationSchema, query: &str) -> Op Operation::parse(schema.clone(), query, "query.graphql", None).unwrap() } +pub(super) fn parse_and_expand( + schema: &ValidFederationSchema, + query: &str, +) -> Result { + let doc = apollo_compiler::ExecutableDocument::parse_and_validate( + schema.schema(), + query, + "query.graphql", + )?; + + let operation = doc + .operations + .anonymous + .as_ref() + .expect("must have anonymous operation"); + let fragments = NamedFragments::new(&doc.fragments, schema); + + normalize_operation(operation, fragments, schema, &Default::default()) +} + /// Parse and validate the query similarly to `parse_operation`, but does not construct the /// `Operation` struct. pub(super) fn validate_operation(schema: &ValidFederationSchema, query: &str) { @@ -1183,7 +1204,7 @@ mod make_selection_tests { base_selection_set.type_position.clone(), selection.clone(), ); - Selection::from_element(base.element().unwrap(), Some(subselections)).unwrap() + Selection::from_element(base.element().unwrap(), Some(subselections), None).unwrap() }; let foo_with_a = clone_selection_at_path(foo, &[name!("a")]); @@ -1310,7 +1331,7 @@ mod lazy_map_tests { let field_element = Field::new_introspection_typename(s.schema(), &parent_type_pos, None); let typename_selection = - Selection::from_element(field_element.into(), /*subselection*/ None)?; + Selection::from_element(field_element.into(), /*subselection*/ None, None)?; // return `updated` and `typename_selection` Ok([updated, typename_selection].into_iter().collect()) }) @@ -1615,3 +1636,76 @@ fn used_variables() { variables.sort(); assert_eq!(variables, ["c", "d"], "works for a subset of the query"); } + +#[test] +fn directive_propagation() { + let schema_doc = r#" + type Query { + t1: T + t2: T + t3: T + } + + type T { + a: Int + b: Int + c: Int + d: Int + } + + directive @fragDefOnly on FRAGMENT_DEFINITION + directive @fragSpreadOnly on FRAGMENT_SPREAD + directive @fragInlineOnly on INLINE_FRAGMENT + directive @fragAll on FRAGMENT_DEFINITION | FRAGMENT_SPREAD | INLINE_FRAGMENT + "#; + + let schema = parse_schema(schema_doc); + + let query = parse_and_expand( + &schema, + r#" + fragment DirectiveOnDef on T @fragDefOnly @fragAll { a } + query { + t2 { + ... on T @fragInlineOnly @fragAll { a } + } + t3 { + ...DirectiveOnDef @fragAll + } + } + "#, + ) + .expect("directive applications to be valid"); + insta::assert_snapshot!(query, @r###" + fragment DirectiveOnDef on T @fragDefOnly @fragAll { + a + } + + { + t2 { + ... on T @fragInlineOnly @fragAll { + a + } + } + t3 { + ... on T @fragAll { + a + } + } + } + "###); + + let err = parse_and_expand( + &schema, + r#" + fragment DirectiveOnDef on T @fragDefOnly @fragAll { a } + query { + t1 { + ...DirectiveOnDef @fragSpreadOnly @fragAll + } + } + "#, + ) + .expect_err("directive @fragSpreadOnly to be rejected"); + insta::assert_snapshot!(err, @"Unsupported custom directive @fragSpreadOnly on fragment spread. Due to query transformations during planning, the router requires directives on fragment spreads to support both the FRAGMENT_SPREAD and INLINE_FRAGMENT locations."); +} diff --git a/apollo-federation/src/query_graph/mod.rs b/apollo-federation/src/query_graph/mod.rs index 15e83f49f9..f95588446c 100644 --- a/apollo-federation/src/query_graph/mod.rs +++ b/apollo-federation/src/query_graph/mod.rs @@ -27,6 +27,7 @@ use crate::schema::position::ObjectTypeDefinitionPosition; use crate::schema::position::OutputTypeDefinitionPosition; use crate::schema::position::SchemaRootDefinitionKind; use crate::schema::ValidFederationSchema; +use crate::utils::FallibleIterator; pub mod build_query_graph; pub(crate) mod condition_resolver; @@ -620,20 +621,22 @@ impl QueryGraph { let composite_type_position: CompositeTypeDefinitionPosition = type_position.clone().try_into()?; let type_ = composite_type_position.get(subgraph_schema.schema())?; - for key in type_.directives().get_all(&key_directive_definition.name) { - let key_value = metadata - .federation_spec_definition() - .key_directive_arguments(key)?; - let selection = parse_field_set( - subgraph_schema, - composite_type_position.type_name().clone(), - key_value.fields, - )?; - if !external_metadata.selects_any_external_field(&selection) { - return Ok(Some(selection)); - } - } - Ok(None) + type_ + .directives() + .get_all(&key_directive_definition.name) + .map(|key| { + metadata + .federation_spec_definition() + .key_directive_arguments(key) + }) + .and_then(|key_value| { + parse_field_set( + subgraph_schema, + composite_type_position.type_name().clone(), + key_value.fields, + ) + }) + .find_ok(|selection| !external_metadata.selects_any_external_field(selection)) } pub(crate) fn edge_for_field(&self, node: NodeIndex, field: &Field) -> Option { @@ -849,20 +852,15 @@ impl QueryGraph { let ty = type_name.get(schema.schema())?; - for key in ty.directives().get_all(&key_directive_definition.name) { - let Some(value) = key.argument_by_name("fields").and_then(|arg| arg.as_str()) else { - continue; - }; - let selection = parse_field_set(schema, ty.name().clone(), value)?; - let has_external = metadata - .external_metadata() - .selects_any_external_field(&selection); - if !has_external { - return Ok(Some(selection)); - } - } - - Ok(None) + ty.directives() + .get_all(&key_directive_definition.name) + .filter_map(|key| key.argument_by_name("fields").and_then(|arg| arg.as_str())) + .map(|value| parse_field_set(schema, ty.name().clone(), value)) + .find_ok(|selection| { + !metadata + .external_metadata() + .selects_any_external_field(selection) + }) } pub(crate) fn is_cross_subgraph_edge(&self, edge: EdgeIndex) -> Result { @@ -901,17 +899,14 @@ impl QueryGraph { .federation_spec_definition() .provides_directive_definition(schema)?; - for object_type_definition_position in - schema.possible_runtime_types(interface_field_definition_position.parent().into())? - { - let field_pos = object_type_definition_position - .field(interface_field_definition_position.field_name.clone()); - let field = field_pos.get(schema.schema())?; - if field.directives.has(&provides_directive_definition.name) { - return Ok(true); - } - } - - Ok(false) + Ok(schema + .possible_runtime_types(interface_field_definition_position.parent().into())? + .into_iter() + .map(|object_type_definition_position| { + let field_pos = object_type_definition_position + .field(interface_field_definition_position.field_name.clone()); + field_pos.get(schema.schema()) + }) + .ok_and_any(|field| field.directives.has(&provides_directive_definition.name))?) } } diff --git a/apollo-federation/src/query_graph/path_tree.rs b/apollo-federation/src/query_graph/path_tree.rs index 02812159a3..8711376a45 100644 --- a/apollo-federation/src/query_graph/path_tree.rs +++ b/apollo-federation/src/query_graph/path_tree.rs @@ -16,6 +16,7 @@ use crate::query_graph::graph_path::OpGraphPath; use crate::query_graph::graph_path::OpGraphPathTrigger; use crate::query_graph::QueryGraph; use crate::query_graph::QueryGraphNode; +use crate::utils::FallibleIterator; /// A "merged" tree representation for a vector of `GraphPath`s that start at a common query graph /// node, in which each node of the tree corresponds to a node in the query graph, and a tree's node @@ -153,12 +154,9 @@ impl OpPathTree { if node_weight.source != *target { return Ok(false); } - for child in &self.childs { - if !child.tree.is_all_in_same_subgraph_internal(target)? { - return Ok(false); - } - } - Ok(true) + self.childs + .iter() + .fallible_all(|child| child.tree.is_all_in_same_subgraph_internal(target)) } fn fmt_internal( diff --git a/apollo-federation/src/query_plan/conditions.rs b/apollo-federation/src/query_plan/conditions.rs index d4a84b9f49..a91681de68 100644 --- a/apollo-federation/src/query_plan/conditions.rs +++ b/apollo-federation/src/query_plan/conditions.rs @@ -223,12 +223,12 @@ pub(crate) fn remove_conditions_from_selection_set( selection.with_updated_selection_set(Some(updated_selection_set))? } } else { - Selection::from_element(updated_element, Some(updated_selection_set))? + Selection::from_element(updated_element, Some(updated_selection_set), None)? } } else if updated_element == element { selection.clone() } else { - Selection::from_element(updated_element, None)? + Selection::from_element(updated_element, None, None)? }; selection_map.insert(new_selection); } @@ -262,14 +262,12 @@ pub(crate) fn remove_unneeded_top_level_fragment_directives( // if there is no type condition we should preserve the directive info selection_map.insert(selection.clone()); } else { - let mut needed_directives: Vec> = Vec::new(); - if fragment.directives.len() > 0 { - for directive in fragment.directives.iter() { - if !unneded_directives.contains(directive) { - needed_directives.push(directive.clone()); - } - } - } + let needed_directives: Vec> = fragment + .directives + .iter() + .filter(|directive| !unneded_directives.contains(directive)) + .cloned() + .collect(); // We recurse, knowing that we'll stop as soon as we hit field selections, so this only cover the fragments // at the "top-level" of the set. @@ -282,12 +280,15 @@ pub(crate) fn remove_unneeded_top_level_fragment_directives( let final_selection = inline_fragment.with_updated_selection_set(updated_selections); selection_map.insert(Selection::InlineFragment(Arc::new(final_selection))); + } else { + // We can skip some of the fragment directives directive. + let final_selection = inline_fragment + .with_updated_directives_and_selection_set( + DirectiveList::from_iter(needed_directives), + updated_selections, + ); + selection_map.insert(Selection::InlineFragment(Arc::new(final_selection))); } - - // We can skip some of the fragment directives directive. - let final_selection = inline_fragment - .with_updated_directives(DirectiveList::from_iter(needed_directives)); - selection_map.insert(Selection::InlineFragment(Arc::new(final_selection))); } } _ => { diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index dfc862e23b..de9a3a87c3 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -1080,10 +1080,9 @@ impl FetchDependencyGraph { /// - Calls `on_modification` if necessary. fn remove_useless_nodes(&mut self) -> Result<(), FederationError> { let root_nodes: Vec<_> = self.root_node_by_subgraph_iter().map(|(_, i)| *i).collect(); - for node_index in root_nodes { - self.remove_useless_nodes_bottom_up(node_index)?; - } - Ok(()) + root_nodes + .into_iter() + .try_for_each(|node_index| self.remove_useless_nodes_bottom_up(node_index)) } /// Recursively collect removable useless nodes from the bottom up. @@ -1362,10 +1361,9 @@ impl FetchDependencyGraph { /// - Calls `on_modification` if necessary. fn merge_child_fetches_for_same_subgraph_and_path(&mut self) -> Result<(), FederationError> { let root_nodes: Vec<_> = self.root_node_by_subgraph_iter().map(|(_, i)| *i).collect(); - for node_index in root_nodes { - self.recursive_merge_child_fetches_for_same_subgraph_and_path(node_index)?; - } - Ok(()) // done + root_nodes.into_iter().try_for_each(|node_index| { + self.recursive_merge_child_fetches_for_same_subgraph_and_path(node_index) + }) } /// Recursively merge child fetches top-down @@ -1430,11 +1428,9 @@ impl FetchDependencyGraph { // Note: `children_nodes` above may contain invalid nodes at this point. // So, we need to re-collect the children nodes after the merge. let children_nodes_after_merge: Vec<_> = self.children_of(node_index).collect(); - for c in children_nodes_after_merge { - self.recursive_merge_child_fetches_for_same_subgraph_and_path(c)?; - } - - Ok(()) + children_nodes_after_merge + .into_iter() + .try_for_each(|c| self.recursive_merge_child_fetches_for_same_subgraph_and_path(c)) } fn merge_fetches_to_same_subgraph_and_same_inputs(&mut self) -> Result<(), FederationError> { @@ -2051,7 +2047,7 @@ impl FetchDependencyGraph { if path.is_empty() { mutable_node - .selection_set + .selection_set_mut() .add_selections(&merged.selection_set.selection_set)?; } else { // The merged nodes might have some @include/@skip at top-level that are already part of the path. If so, @@ -2061,7 +2057,7 @@ impl FetchDependencyGraph { &path.conditional_directives(), )?; mutable_node - .selection_set + .selection_set_mut() .add_at_path(path, Some(&Arc::new(merged_selection_set)))?; } @@ -2361,9 +2357,9 @@ impl FetchDependencyGraphNode { } fn remove_inputs_from_selection(&mut self) -> Result<(), FederationError> { - let fetch_selection_set = &mut self.selection_set; if let Some(inputs) = &mut self.inputs { self.cached_cost = None; + let fetch_selection_set = &mut self.selection_set; for (_, selection) in &inputs.selection_sets_per_parent_type { fetch_selection_set.selection_set = Arc::new(fetch_selection_set.selection_set.minus(selection)?); @@ -2472,7 +2468,15 @@ impl FetchDependencyGraphNode { }; let operation = operation_compression.compress(&self.subgraph_name, subgraph_schema, operation)?; - let operation_document = operation.try_into()?; + let operation_document = operation.try_into().map_err(|err| match err { + FederationError::SingleFederationError { + inner: SingleFederationError::InvalidGraphQL { diagnostics }, + .. + } => FederationError::internal(format!( + "Query planning produced an invalid subgraph operation.\n{diagnostics}" + )), + _ => err, + })?; let node = super::PlanNode::Fetch(Box::new(super::FetchNode { subgraph_name: self.subgraph_name.clone(), @@ -2755,6 +2759,7 @@ fn operation_for_entities_fetch( sibling_typename: None, })), Some(selection_set), + None, )?; let type_position: CompositeTypeDefinitionPosition = subgraph_schema @@ -2866,7 +2871,8 @@ impl FetchSelectionSet { path_in_node: &OpPath, selection_set: Option<&Arc>, ) -> Result<(), FederationError> { - Arc::make_mut(&mut self.selection_set).add_at_path(path_in_node, selection_set)?; + let target = Arc::make_mut(&mut self.selection_set); + target.add_at_path(path_in_node, selection_set)?; // TODO: when calling this multiple times, maybe only re-compute conditions at the end? // Or make it lazily-initialized and computed on demand? self.conditions = self.selection_set.conditions()?; @@ -2906,10 +2912,10 @@ impl FetchInputs { } fn add_all(&mut self, other: &Self) -> Result<(), FederationError> { - for selections in other.selection_sets_per_parent_type.values() { - self.add(selections)?; - } - Ok(()) + other + .selection_sets_per_parent_type + .values() + .try_for_each(|selections| self.add(selections)) } fn contains(&self, other: &Self) -> bool { diff --git a/apollo-federation/src/query_plan/query_planner.rs b/apollo-federation/src/query_plan/query_planner.rs index 5670c685d9..ec338ef59d 100644 --- a/apollo-federation/src/query_plan/query_planner.rs +++ b/apollo-federation/src/query_plan/query_planner.rs @@ -332,13 +332,11 @@ impl QueryPlanner { ) -> Result { let operation = document .operations - .get(operation_name.as_ref().map(|name| name.as_str())) - // TODO(@goto-bus-stop) this is not an internal error, but a user error - .map_err(|_| FederationError::internal("requested operation does not exist"))?; + .get(operation_name.as_ref().map(|name| name.as_str()))?; - if operation.selection_set.selections.is_empty() { + if operation.selection_set.is_empty() { // This should never happen because `operation` comes from a known-valid document. - // TODO(@goto-bus-stop) it's probably fair to panic here :) + // We could panic here but we are returning a `Result` already anyways, so shrug! return Err(FederationError::internal( "Invalid operation: empty selection set", )); @@ -413,7 +411,7 @@ impl QueryPlanner { }; */ - if normalized_operation.selection_set.selections.is_empty() { + if normalized_operation.selection_set.is_empty() { return Ok(QueryPlan::default()); } diff --git a/apollo-federation/src/supergraph/mod.rs b/apollo-federation/src/supergraph/mod.rs index 66d6078871..01210155d9 100644 --- a/apollo-federation/src/supergraph/mod.rs +++ b/apollo-federation/src/supergraph/mod.rs @@ -3,6 +3,7 @@ mod subgraph; use std::fmt::Write; use std::ops::Deref; +use std::ops::Not; use std::sync::Arc; use apollo_compiler::ast::Argument; @@ -80,6 +81,7 @@ use crate::schema::type_and_directive_specification::ScalarTypeSpecification; use crate::schema::type_and_directive_specification::TypeAndDirectiveSpecification; use crate::schema::type_and_directive_specification::UnionTypeSpecification; use crate::schema::FederationSchema; +use crate::utils::FallibleIterator; /// Assumes the given schema has been validated. /// @@ -96,16 +98,19 @@ pub(crate) fn extract_subgraphs_from_supergraph( let (mut subgraphs, federation_spec_definitions, graph_enum_value_name_to_subgraph_name) = collect_empty_subgraphs(supergraph_schema, join_spec_definition)?; - let mut filtered_types = Vec::new(); - for type_definition_position in supergraph_schema.get_types() { - if !join_spec_definition - .is_spec_type_name(supergraph_schema, type_definition_position.type_name())? - && !link_spec_definition - .is_spec_type_name(supergraph_schema, type_definition_position.type_name())? - { - filtered_types.push(type_definition_position); - } - } + let filtered_types: Vec<_> = supergraph_schema + .get_types() + .fallible_filter(|type_definition_position| { + join_spec_definition + .is_spec_type_name(supergraph_schema, type_definition_position.type_name()) + .map(Not::not) + }) + .and_then_filter(|type_definition_position| { + link_spec_definition + .is_spec_type_name(supergraph_schema, type_definition_position.type_name()) + .map(Not::not) + }) + .try_collect()?; if is_fed_1 { let unsupported = SingleFederationError::UnsupportedFederationVersion { @@ -403,11 +408,11 @@ fn add_all_empty_subgraph_types( for type_definition_position in filtered_types { let type_ = type_definition_position.get(supergraph_schema.schema())?; - let mut type_directive_applications = Vec::new(); - for directive in type_.directives().get_all(&type_directive_definition.name) { - type_directive_applications - .push(join_spec_definition.type_directive_arguments(directive)?); - } + let type_directive_applications: Vec<_> = type_ + .directives() + .get_all(&type_directive_definition.name) + .map(|directive| join_spec_definition.type_directive_arguments(directive)) + .try_collect()?; let types_mut = match &type_definition_position { TypeDefinitionPosition::Scalar(pos) => { // Scalar are a bit special in that they don't have any sub-component, so we don't diff --git a/apollo-federation/src/utils/fallible_iterator.rs b/apollo-federation/src/utils/fallible_iterator.rs new file mode 100644 index 0000000000..83564c74f6 --- /dev/null +++ b/apollo-federation/src/utils/fallible_iterator.rs @@ -0,0 +1,529 @@ +#![deny(rustdoc::broken_intra_doc_links)] + +use itertools::Itertools; + +/// A common use for iteator is to collect into a container and grow that container. This trait +/// extends the standard library's `Extend` trait to work for containers that can be extended with +/// `T`s to also be extendable with `Result`. If an `Err` is encountered, that `Err` is +/// returned. Notably, this means the container will contain all prior `Ok` values. +pub(crate) trait FallibleExtend: Extend { + fn fallible_extend(&mut self, iter: I) -> Result<(), E> + where + I: IntoIterator>, + { + iter.into_iter() + .process_results(|results| self.extend(results)) + } + + // NOTE: The standard extend trait provides `extend_one` and `extend_reserve` methods. These + // have not been added and can be if a use arises. +} + +impl FallibleExtend for T where T: Extend {} + +/// An extension trait for `Iterator`, similar to `Itertools`, that seeks to improve the ergonomics +/// around fallible operations. +/// +/// "Fallible" here has a few different meanings. The operation that you are performing (such as a +/// `filter`) might yield a `Result` containing the value you actually want/need, but fallible can +/// also refer to the stream of items that you're iterating over (or both!). As much as possible, I +/// will use the following naming scheme in order to keep these ideas consistent: +/// - If the iterator yeilds an arbitary `T` and the operation that you wish to apply is of the +/// form `T -> Result`, then it will named `fallible_*`. +/// - If the iterator yields `Result` and the operation is of the form `T -> U` (for arbitary +/// `U`), then it will named `*_ok`. +/// - If both iterator and operation yield `Result`, then it will named `and_then_*` (more on that +/// fewer down). +/// +/// The first category mostly describes combinators that take closures that need specific types, +/// such as `filter` and things in the `any`/`all`/`find`/`fold` family. There are several +/// expirement features in `std` that offer similar functionalities. +/// +/// The second category is mostly taken care of by `Itertools`. While they are not currently +/// implemented here (or in `Itertools`), this category would also contain methods like `*_err` +/// in addition to the "ok" methods. +/// +/// The third category is the hardest to pin down. There are a ton of ways that you can combine two +/// results (just look at the docs page for `Result`), but, in general, the most common use case +/// that needs to be captured is the use of the `?` operator. For example, if you have a check that +/// is fallible, you likely will write that code like so: +/// ```no_test +/// for val in things { +/// let val = fallible_transformation(val)?; +/// if fallible_check(&val)? { continue } +/// process_value(val); +/// } +/// ``` +/// In such a case, `process_value` is called if and only if both the transformation and check +/// return `Ok`. This is why methods in this category are named `and_then_*`. +/// +/// There are, of course, methods that fall out of this taxonimy, but this covers the broad +/// strokes. +/// +/// In all cases, errors are passed along until they are processed in some way. This includes +/// `Result::collect`, but also includes things like `Itertools::process_results` and things like +/// the `find` family. +/// +/// Lastly, if you come across something that fits what this trait is trying to do and you have a +/// usecase for but that is not served by already, feel free to expand the functionalities! +// TODO: In std, methods like `all` and `any` are actually just specializations of `try_fold` using +// bools and `FlowControl`. When initially writting this, I, @TylerBloom, didn't take the time to +// write equalivalent folding methods. Should they be implemented in the future, we should rework +// existing methods to use them. +pub(crate) trait FallibleIterator: Sized + Itertools { + /// The method transforms the existing iterator, which yields `T`s, into an iterator that + /// yields `Result`. The predicate that is provided is fallible. If the predicate yields + /// `Ok(false)`, the item is skipped. If the predicate yields `Err`, that `T` is discard and + /// the iterator will yield the `Err` in its place. Lastly, if the predicate yields `Ok(true)`, + /// the iterator will yield `Ok(val)`. + /// + /// ```rust + /// use apollo_federation::utils::FallibleIterator; + /// + /// // A totally accurate prime checker + /// fn is_prime(i: usize) -> Result { + /// match i { + /// 0 | 1 => Err(()), // 0 and 1 are neither prime or composite + /// 2 | 3 => Ok(true), + /// _ => Ok(false), // Every other number is composite, I guess + /// } + /// } + /// + /// let vals = (1..6).fallible_filter(|i| is_prime(*i)); + /// itertools::assert_equal(vals, vec![Err(()), Ok(2), Ok(3)]); + /// ``` + fn fallible_filter(self, predicate: F) -> FallibleFilter + where + F: FnMut(&Self::Item) -> Result, + { + FallibleFilter { + iter: self, + predicate, + } + } + + // NOTE: There is a `filter_ok` method in `Itertools`, but there is not a `filter_err`. That + // might be useful at some point. + + /// This method functions similarly to `Iterator::filter` but where the existing iterator + /// yeilds `Result`s and the given predicate also returns `Result`s. + /// + /// The predicate is only called if the existing iterator yields `Ok`. `Err`s are ignored. + /// Should the predicate return an `Err`, the `Ok` value was replaced with the `Err`. This + /// method is very similar to `Itertools::filter_ok` except the predicate for this method is + /// fallible. + /// + /// ```rust + /// use apollo_federation::utils::FallibleIterator; + /// + /// // A totally accurate prime checker + /// fn is_prime(i: usize) -> Result { + /// match i { + /// 0 | 1 => Err(()), // 0 and 1 are neither prime or composite + /// 2 | 3 => Ok(true), + /// _ => Ok(false), // Every other number is composite, I guess + /// } + /// } + /// + /// let vals = vec![Ok(0), Err(()), Err(()), Ok(3), Ok(4)].into_iter().and_then_filter(|i| is_prime(*i)); + /// itertools::assert_equal(vals, vec![Err(()), Err(()), Err(()), Ok(3)]); + /// ``` + fn and_then_filter(self, predicate: F) -> AndThenFilter + where + Self: Iterator>, + F: FnMut(&T) -> Result, + { + AndThenFilter { + iter: self, + predicate, + } + } + + /// This method functions similarly to `Iterator::all` but where the given predicate returns + /// `Result`s. + /// + /// Like `Iterator::all`, this function short-curcuits but will short-curcuit if the predicate + /// returns anything other than `Ok(true)`. If the first item that is not `Ok(true)` is + /// `Ok(false)`, the returned value will be `Ok(false)`. If that item is `Err`, than that `Err` + /// is returned. + /// + /// ```rust + /// use apollo_federation::utils::FallibleIterator; + /// + /// // A totally accurate prime checker + /// fn is_prime(i: usize) -> Result { + /// match i { + /// 0 | 1 => Err(()), // 0 and 1 are neither prime or composite + /// 2 | 3 => Ok(true), + /// _ => Ok(false), // Every other number is composite, I guess + /// } + /// } + /// + /// assert_eq!(Ok(true), [].into_iter().fallible_all(is_prime)); + /// assert_eq!(Ok(true), (2..4).fallible_all(is_prime)); + /// assert_eq!(Err(()), (1..4).fallible_all(is_prime)); + /// assert_eq!(Ok(false), (2..5).fallible_all(is_prime)); + /// assert_eq!(Err(()), (1..5).fallible_all(is_prime)); + /// ``` + fn fallible_all(&mut self, mut predicate: F) -> Result + where + F: FnMut(Self::Item) -> Result, + { + let mut digest = true; + for val in self.by_ref() { + digest &= predicate(val)?; + if !digest { + break; + } + } + Ok(digest) + } + + /// This method functions similarly to `FallibleIterator::fallible_all` but inverted. The + /// existing iterator yields `Result`s but the predicate is not fallible. + /// + /// Like `FallibleIterator::fallible_all`, this function short-curcuits but will short-curcuit + /// if it encounters an `Err` or `false`. If the existing iterator yields an `Err`, this + /// function short-curcuits, does not call the predicate, and returns that `Err`. If the value + /// is `Ok`, it is given to the predicate. If the predicate returns `false`, this method + /// returns `Ok(false)`. + /// + /// ```rust + /// use apollo_federation::utils::FallibleIterator; + /// + /// type Item = Result; + /// + /// fn is_even(i: usize) -> bool { + /// i % 2 == 0 + /// } + /// + /// let first_values: Vec = vec![]; + /// let second_values: Vec = vec![Ok(1), Err(())]; + /// let third_values: Vec = vec![Ok(0), Ok(1), Ok(2)]; + /// let fourth_values: Vec = vec![Err(()), Ok(0)]; + /// + /// assert_eq!(Ok(true), first_values.into_iter().ok_and_all(is_even)); + /// assert_eq!(Ok(false), second_values.into_iter().ok_and_all(is_even)); + /// assert_eq!(Ok(false), third_values.into_iter().ok_and_all(is_even)); + /// assert_eq!(Err(()), fourth_values.into_iter().ok_and_all(is_even)); + /// ``` + fn ok_and_all(&mut self, predicate: F) -> Result + where + Self: Iterator>, + F: FnMut(T) -> bool, + { + self.process_results(|mut results| results.all(predicate)) + } + + /// This method functions similarly to `FallibleIterator::fallible_all` but both the + /// existing iterator and predicate yield `Result`s. + /// + /// Like `FallibleIterator::fallible_all`, this function short-curcuits but will short-curcuit + /// if it encounters an `Err` or `Ok(false)`. If the existing iterator yields an `Err`, this + /// function returns that `Err`. If the value is `Ok`, it is given to the predicate. If the + /// predicate returns `Err`, that `Err` is returned. If the predicate returns `Ok(false)`, + /// `Ok(false)` is returned. By default, this function returned `Ok(true)`. + /// + /// ```rust + /// use apollo_federation::utils::FallibleIterator; + /// + /// type Item = Result; + /// + /// // A totally accurate prime checker + /// fn is_prime(i: usize) -> Result { + /// match i { + /// 0 | 1 => Err(()), // 0 and 1 are neither prime or composite + /// 2 | 3 => Ok(true), + /// _ => Ok(false), // Every other number is composite, I guess + /// } + /// } + /// + /// let first_values: Vec = vec![]; + /// let second_values: Vec = vec![Ok(0), Err(())]; + /// let third_values: Vec = vec![Ok(2), Ok(3)]; + /// let fourth_values: Vec = vec![Err(()), Ok(2)]; + /// let fifth_values: Vec = vec![Ok(2), Err(())]; + /// let sixth_values: Vec = vec![Ok(4), Ok(3)]; + /// + /// assert_eq!(Ok(true), first_values.into_iter().and_then_all(is_prime)); + /// assert_eq!(Err(()), second_values.into_iter().and_then_all(is_prime)); + /// assert_eq!(Ok(true), third_values.into_iter().and_then_all(is_prime)); + /// assert_eq!(Err(()), fourth_values.into_iter().and_then_all(is_prime)); + /// assert_eq!(Err(()), fifth_values.into_iter().and_then_all(is_prime)); + /// assert_eq!(Ok(false), sixth_values.into_iter().and_then_all(is_prime)); + /// ``` + fn and_then_all(&mut self, mut predicate: F) -> Result + where + Self: Iterator>, + F: FnMut(T) -> Result, + { + let mut digest = true; + for val in self.by_ref() { + digest &= val.and_then(&mut predicate)?; + if !digest { + break; + } + } + Ok(digest) + } + + /// This method functions similarly to `Iterator::any` but where the given predicate returns + /// `Result`s. + /// + /// Like `Iterator::any`, this function short-curcuits but will short-curcuit if the predicate + /// returns anything other than `Ok(false)`. If the first item that is not `Ok(false)` is + /// `Ok(true)`, the returned value will be `Ok(true)`. If that item is `Err`, than that `Err` + /// is returned. + /// + /// ```rust + /// use apollo_federation::utils::FallibleIterator; + /// + /// // A totally accurate prime checker + /// fn is_prime(i: usize) -> Result { + /// match i { + /// 0 | 1 => Err(()), // 0 and 1 are neither prime or composite + /// 2 | 3 => Ok(true), + /// _ => Ok(false), // Every other number is composite, I guess + /// } + /// } + /// + /// assert_eq!(Ok(false), [].into_iter().fallible_any(is_prime)); + /// assert_eq!(Ok(true), (2..5).fallible_any(is_prime)); + /// assert_eq!(Ok(false), (4..5).fallible_any(is_prime)); + /// assert_eq!(Err(()), (1..4).fallible_any(is_prime)); + /// assert_eq!(Err(()), (1..5).fallible_any(is_prime)); + /// ``` + fn fallible_any(&mut self, mut predicate: F) -> Result + where + F: FnMut(Self::Item) -> Result, + { + let mut digest = false; + for val in self.by_ref() { + digest |= predicate(val)?; + if digest { + break; + } + } + Ok(digest) + } + + /// This method functions similarly to `FallibleIterator::fallible_any` but inverted. The + /// existing iterator yields `Result`s but the predicate is not fallible. + /// + /// Like `FallibleIterator::fallible_any`, this function short-curcuits but will short-curcuit + /// if it encounters an `Err` or `true`. If the existing iterator yields an `Err`, this + /// function short-curcuits, does not call the predicate, and returns that `Err`. If the value + /// is `Ok`, it is given to the predicate. If the predicate returns `true`, this method returns + /// `Ok(true)`. + /// + /// ```rust + /// use apollo_federation::utils::FallibleIterator; + /// + /// type Item = Result; + /// + /// fn is_even(i: usize) -> bool { + /// i % 2 == 0 + /// } + /// + /// let first_values: Vec = vec![]; + /// let second_values: Vec = vec![Ok(0), Err(())]; + /// let third_values: Vec = vec![Ok(1), Ok(3)]; + /// let fourth_values: Vec = vec![Err(()), Ok(0)]; + /// + /// assert_eq!(Ok(false), first_values.into_iter().ok_and_any(is_even)); + /// assert_eq!(Ok(true), second_values.into_iter().ok_and_any(is_even)); + /// assert_eq!(Ok(false), third_values.into_iter().ok_and_any(is_even)); + /// assert_eq!(Err(()), fourth_values.into_iter().ok_and_any(is_even)); + /// ``` + fn ok_and_any(&mut self, predicate: F) -> Result + where + Self: Iterator>, + F: FnMut(T) -> bool, + { + self.process_results(|mut results| results.any(predicate)) + } + + /// This method functions similarly to `FallibleIterator::fallible_any` but both the + /// existing iterator and predicate yield `Result`s. + /// + /// Like `FallibleIterator::fallible_any`, this function short-curcuits but will short-curcuit + /// if it encounters an `Err` or `Ok(true)`. If the existing iterator yields an `Err`, this + /// function returns that `Err`. If the value is `Ok`, it is given to the predicate. If the + /// predicate returns `Err`, that `Err` is returned. If the predicate returns `Ok(true)`, + /// `Ok(true)` is returned. By default, this function returned `Ok(false)`. + /// + /// ```rust + /// use apollo_federation::utils::FallibleIterator; + /// + /// type Item = Result; + /// + /// // A totally accurate prime checker + /// fn is_prime(i: usize) -> Result { + /// match i { + /// 0 | 1 => Err(()), // 0 and 1 are neither prime or composite + /// 2 | 3 => Ok(true), + /// _ => Ok(false), // Every other number is composite, I guess + /// } + /// } + /// + /// let first_values: Vec = vec![]; + /// let second_values: Vec = vec![Ok(0), Err(())]; + /// let third_values: Vec = vec![Ok(3), Ok(4)]; + /// let fourth_values: Vec = vec![Err(()), Ok(2)]; + /// let fifth_values: Vec = vec![Ok(2), Err(())]; + /// let sixth_values: Vec = vec![Ok(4), Ok(5)]; + /// + /// assert_eq!(Ok(false), first_values.into_iter().and_then_any(is_prime)); + /// assert_eq!(Err(()), second_values.into_iter().and_then_any(is_prime)); + /// assert_eq!(Ok(true), third_values.into_iter().and_then_any(is_prime)); + /// assert_eq!(Err(()), fourth_values.into_iter().and_then_any(is_prime)); + /// assert_eq!(Ok(true), fifth_values.into_iter().and_then_any(is_prime)); + /// assert_eq!(Ok(false), sixth_values.into_iter().and_then_any(is_prime)); + /// ``` + fn and_then_any(&mut self, mut predicate: F) -> Result + where + Self: Iterator>, + F: FnMut(T) -> Result, + { + let mut digest = false; + for val in self { + digest |= val.and_then(&mut predicate)?; + if digest { + break; + } + } + Ok(digest) + } + + /// A convenience method that is equivalent to calling `.map(|result| + /// result.and_then(fallible_fn))`. + fn and_then(self, map: F) -> AndThen + where + Self: Iterator>, + F: FnMut(T) -> Result, + { + AndThen { iter: self, map } + } + + /// A convenience method that is equivalent to calling `.map(|result| + /// result.or_else(fallible_fn))`. + fn or_else(self, map: F) -> OrElse + where + Self: Iterator>, + F: FnMut(E) -> Result, + { + OrElse { iter: self, map } + } + + /// A convenience method for applying a fallible operation to an iterator of `Result`s and + /// returning the first `Err` if one occurs. + fn and_then_for_each(self, inner: F) -> Result<(), E> + where + Self: Iterator>, + F: FnMut(T) -> Result<(), E>, + { + self.and_then(inner).collect() + } + + /// Tries to find the first `Ok` value that matches the predicate. If an `Err` is found before + /// the finding a match, the `Err` is returned. + // NOTE: This is a nightly feature on `Iterator`. To avoid name collisions, this method is + // named differently :( + // Once stabilized, this method should probably be removed. + fn find_ok(&mut self, predicate: F) -> Result, E> + where + Self: Iterator>, + F: FnMut(&T) -> bool, + { + self.process_results(|mut results| results.find(predicate)) + } +} + +impl FallibleIterator for I {} + +/// The struct returned by [fallible_filter](FallibleIterator::fallible_filter). +pub struct FallibleFilter { + iter: I, + predicate: F, +} + +impl Iterator for FallibleFilter +where + I: Iterator, + F: FnMut(&I::Item) -> Result, +{ + type Item = Result; + + fn next(&mut self) -> Option { + loop { + let val = self.iter.next()?; + match (self.predicate)(&val) { + Ok(true) => return Some(Ok(val)), + Ok(false) => {} + Err(e) => return Some(Err(e)), + } + } + } +} + +/// The struct returned by [and_then_filter](FallibleIterator::and_then_filter). +pub struct AndThenFilter { + iter: I, + predicate: F, +} + +impl Iterator for AndThenFilter +where + I: Iterator>, + F: FnMut(&T) -> Result, +{ + type Item = Result; + + fn next(&mut self) -> Option { + loop { + let val = self.iter.next()?; + return match val { + Err(e) => Some(Err(e)), + Ok(val) => match (self.predicate)(&val) { + Ok(true) => Some(Ok(val)), + Ok(false) => continue, + Err(e) => Some(Err(e)), + }, + }; + } + } +} + +pub struct AndThen { + iter: I, + map: F, +} + +impl Iterator for AndThen +where + I: Iterator>, + F: FnMut(T) -> Result, +{ + type Item = Result; + + fn next(&mut self) -> Option { + self.iter.next().map(|res| res.and_then(&mut self.map)) + } +} + +pub struct OrElse { + iter: I, + map: F, +} + +impl Iterator for OrElse +where + I: Iterator>, + F: FnMut(E) -> Result, +{ + type Item = Result; + + fn next(&mut self) -> Option { + self.iter.next().map(|res| res.or_else(&mut self.map)) + } +} diff --git a/apollo-federation/src/utils/mod.rs b/apollo-federation/src/utils/mod.rs index 31348d2fda..ec910a87f8 100644 --- a/apollo-federation/src/utils/mod.rs +++ b/apollo-federation/src/utils/mod.rs @@ -1 +1,5 @@ +//! This module contains various tools that help the ergonomics of this crate. + +mod fallible_iterator; pub mod logging; +pub(crate) use fallible_iterator::*; diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests.rs b/apollo-federation/tests/query_plan/build_query_plan_tests.rs index a5a8769c2a..8f256843e2 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests.rs @@ -914,3 +914,168 @@ fn test_merging_fetches_do_not_create_cycle_in_fetch_dependency_graph() { "### ); } + +#[test] +fn redundant_typename_for_inline_fragments_without_type_condition() { + let planner = planner!( + Subgraph1: r#" + type Query { + products: [Product] + } + interface Product { + name: String + } + "#, + ); + assert_plan!( + &planner, + r#" + { + products { + ... @skip(if: false) { + name + } + } + } + "#, + @r###" + QueryPlan { + Fetch(service: "Subgraph1") { + { + products { + __typename + ... @skip(if: false) { + name + } + } + } + }, + } + "### + ); +} + +#[test] +fn test_merging_fetches_reset_cached_costs() { + // This is a test for ROUTER-553. + let planner = planner!( + A: r#" + type Query { + start: S @shareable + } + + type S @key(fields: "id") { + id: ID! + u: U @shareable + } + + type U @key(fields: "id") { + id: ID! + } + "#, + B: r#" + type Query { + start: S @shareable + } + + type S @key(fields: "id") { + id: ID! + } + "#, + C: r#" + type S @key(fields: "id") { + id: ID! + x: X + a: String! + } + + type X { + t: T + } + + type T { + u: U @shareable + } + + type U @key(fields: "id") { + id: ID! + b: String + } + "#, + ); + assert_plan!( + &planner, + r#"{ + start { + u { + b + } + a + x { + t { + u { + id + } + } + } + } + }"#, + @r###" + QueryPlan { + Sequence { + Fetch(service: "A") { + { + start { + __typename + u { + __typename + id + } + id + } + } + }, + Parallel { + Flatten(path: "start") { + Fetch(service: "C") { + { + ... on S { + __typename + id + } + } => + { + ... on S { + a + x { + t { + u { + id + } + } + } + } + } + }, + }, + Flatten(path: "start.u") { + Fetch(service: "C") { + { + ... on U { + __typename + id + } + } => + { + ... on U { + b + } + } + }, + }, + }, + }, + } + "### + ); +} diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/requires/include_skip.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/requires/include_skip.rs index b981592f68..ebb98c6653 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/requires/include_skip.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/requires/include_skip.rs @@ -1,13 +1,11 @@ #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: investigate this failure (redundant inline spread) fn it_handles_a_simple_at_requires_triggered_within_a_conditional() { let planner = planner!( Subgraph1: r#" type Query { t: T } - + type T @key(fields: "id") { id: ID! a: Int @@ -73,7 +71,7 @@ fn it_handles_an_at_requires_triggered_conditionally() { type Query { t: T } - + type T @key(fields: "id") { id: ID! a: Int @@ -143,7 +141,7 @@ fn it_handles_an_at_requires_where_multiple_conditional_are_involved() { type Query { a: A } - + type A @key(fields: "idA") { idA: ID! } @@ -153,7 +151,7 @@ fn it_handles_an_at_requires_where_multiple_conditional_are_involved() { idA: ID! b: [B] } - + type B @key(fields: "idB") { idB: ID! required: Int @@ -240,3 +238,159 @@ fn it_handles_an_at_requires_where_multiple_conditional_are_involved() { "### ); } + +#[test] +fn unnecessary_include_is_stripped_from_fragments() { + let planner = planner!( + Subgraph1: r#" + type Query { + foo: Foo, + } + + type Foo @key(fields: "id") { + id: ID, + bar: Bar, + } + + type Bar @key(fields: "id") { + id: ID, + } + "#, + Subgraph2: r#" + type Bar @key(fields: "id") { + id: ID, + a: Int, + } + "#, + ); + assert_plan!( + &planner, + r#" + query foo($test: Boolean!) { + foo @include(if: $test) { + ... on Foo @include(if: $test) { + id + } + } + } + "#, + @r###" + QueryPlan { + Include(if: $test) { + Fetch(service: "Subgraph1") { + { + foo { + ... on Foo { + id + } + } + } + }, + }, + } + "### + ); + assert_plan!( + &planner, + r#" + query foo($test: Boolean!) { + foo @include(if: $test) { + ... on Foo @include(if: $test) { + id + bar { + ... on Bar @include(if: $test) { + id + } + } + } + } + } + "#, + @r###" + QueryPlan { + Include(if: $test) { + Fetch(service: "Subgraph1") { + { + foo { + ... on Foo { + id + bar { + ... on Bar { + id + } + } + } + } + } + }, + }, + } + "### + ); +} + +#[test] +fn selections_are_not_overwritten_after_removing_directives() { + let planner = planner!( + Subgraph1: r#" + type Query { + foo: Foo, + } + + type Foo @key(fields: "id") { + id: ID, + foo: Foo, + bar: Bar, + } + + type Bar @key(fields: "id") { + id: ID, + } + "#, + Subgraph2: r#" + type Bar @key(fields: "id") { + id: ID, + a: Int, + } + "#, + ); + assert_plan!( + &planner, + r#" + query foo($test: Boolean!) { + foo @include(if: $test) { + ... on Foo { + id + foo { + ... on Foo @include(if: $test) { + bar { + id + } + } + } + } + } + } + "#, + @r###" + QueryPlan { + Include(if: $test) { + Fetch(service: "Subgraph1") { + { + foo { + id + foo { + ... on Foo { + bar { + id + } + } + } + } + } + }, + }, + } + "### + ); +} diff --git a/apollo-federation/tests/query_plan/supergraphs/it_handles_a_simple_at_requires_triggered_within_a_conditional.graphql b/apollo-federation/tests/query_plan/supergraphs/it_handles_a_simple_at_requires_triggered_within_a_conditional.graphql index 60ae71d098..49e3180b5b 100644 --- a/apollo-federation/tests/query_plan/supergraphs/it_handles_a_simple_at_requires_triggered_within_a_conditional.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/it_handles_a_simple_at_requires_triggered_within_a_conditional.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: d6db359dab5222fd4d4da957d4ece13e5dee392f +# Composed from subgraphs with hash: 0e4eac66a889724333f86a54a0647cc4f694f511 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/it_handles_an_at_requires_triggered_conditionally.graphql b/apollo-federation/tests/query_plan/supergraphs/it_handles_an_at_requires_triggered_conditionally.graphql index 60ae71d098..49e3180b5b 100644 --- a/apollo-federation/tests/query_plan/supergraphs/it_handles_an_at_requires_triggered_conditionally.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/it_handles_an_at_requires_triggered_conditionally.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: d6db359dab5222fd4d4da957d4ece13e5dee392f +# Composed from subgraphs with hash: 0e4eac66a889724333f86a54a0647cc4f694f511 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/it_handles_an_at_requires_where_multiple_conditional_are_involved.graphql b/apollo-federation/tests/query_plan/supergraphs/it_handles_an_at_requires_where_multiple_conditional_are_involved.graphql index 0760e3ad79..020d3b8da0 100644 --- a/apollo-federation/tests/query_plan/supergraphs/it_handles_an_at_requires_where_multiple_conditional_are_involved.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/it_handles_an_at_requires_where_multiple_conditional_are_involved.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: cc5702c4822eb337c8ae95605bcb51812750cff8 +# Composed from subgraphs with hash: 2ea9dba0e4a9f205821228485cdd112ca0b0c17a schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/redundant_typename_for_inline_fragments_without_type_condition.graphql b/apollo-federation/tests/query_plan/supergraphs/redundant_typename_for_inline_fragments_without_type_condition.graphql new file mode 100644 index 0000000000..0b9050d640 --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/redundant_typename_for_inline_fragments_without_type_condition.graphql @@ -0,0 +1,53 @@ +# Composed from subgraphs with hash: d267b3f18f7736e1a3e39a951696c318d1f46a34 +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +scalar join__FieldSet + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "Subgraph1", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +interface Product + @join__type(graph: SUBGRAPH1) +{ + name: String +} + +type Query + @join__type(graph: SUBGRAPH1) +{ + products: [Product] +} diff --git a/apollo-federation/tests/query_plan/supergraphs/selections_are_not_overwritten_after_removing_directives.graphql b/apollo-federation/tests/query_plan/supergraphs/selections_are_not_overwritten_after_removing_directives.graphql new file mode 100644 index 0000000000..a6d3125d50 --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/selections_are_not_overwritten_after_removing_directives.graphql @@ -0,0 +1,65 @@ +# Composed from subgraphs with hash: d5699e8f9c56867b4a27e8b7b2e942a65af9c97b +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +type Bar + @join__type(graph: SUBGRAPH1, key: "id") + @join__type(graph: SUBGRAPH2, key: "id") +{ + id: ID + a: Int @join__field(graph: SUBGRAPH2) +} + +type Foo + @join__type(graph: SUBGRAPH1, key: "id") +{ + id: ID + foo: Foo + bar: Bar +} + +scalar join__FieldSet + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "Subgraph1", url: "none") + SUBGRAPH2 @join__graph(name: "Subgraph2", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: SUBGRAPH1) + @join__type(graph: SUBGRAPH2) +{ + foo: Foo @join__field(graph: SUBGRAPH1) +} diff --git a/apollo-federation/tests/query_plan/supergraphs/test_merging_fetches_reset_cached_costs.graphql b/apollo-federation/tests/query_plan/supergraphs/test_merging_fetches_reset_cached_costs.graphql new file mode 100644 index 0000000000..c513bf6ad9 --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/test_merging_fetches_reset_cached_costs.graphql @@ -0,0 +1,82 @@ +# Composed from subgraphs with hash: 30529f3ff76bc917d12a637ede6e78ce39e7bca8 +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +scalar join__FieldSet + +enum join__Graph { + A @join__graph(name: "A", url: "none") + B @join__graph(name: "B", url: "none") + C @join__graph(name: "C", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: A) + @join__type(graph: B) + @join__type(graph: C) +{ + start: S @join__field(graph: A) @join__field(graph: B) +} + +type S + @join__type(graph: A, key: "id") + @join__type(graph: B, key: "id") + @join__type(graph: C, key: "id") +{ + id: ID! + u: U @join__field(graph: A) + x: X @join__field(graph: C) + a: String! @join__field(graph: C) +} + +type T + @join__type(graph: C) +{ + u: U +} + +type U + @join__type(graph: A, key: "id") + @join__type(graph: C, key: "id") +{ + id: ID! + b: String @join__field(graph: C) +} + +type X + @join__type(graph: C) +{ + t: T +} diff --git a/apollo-federation/tests/query_plan/supergraphs/unnecessary_include_is_stripped_from_fragments.graphql b/apollo-federation/tests/query_plan/supergraphs/unnecessary_include_is_stripped_from_fragments.graphql new file mode 100644 index 0000000000..bed3adbc67 --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/unnecessary_include_is_stripped_from_fragments.graphql @@ -0,0 +1,64 @@ +# Composed from subgraphs with hash: 07faf0f54a80eb7ef3e78f4fbe6382e135f6aac6 +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +type Bar + @join__type(graph: SUBGRAPH1, key: "id") + @join__type(graph: SUBGRAPH2, key: "id") +{ + id: ID + a: Int @join__field(graph: SUBGRAPH2) +} + +type Foo + @join__type(graph: SUBGRAPH1, key: "id") +{ + id: ID + bar: Bar +} + +scalar join__FieldSet + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "Subgraph1", url: "none") + SUBGRAPH2 @join__graph(name: "Subgraph2", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: SUBGRAPH1) + @join__type(graph: SUBGRAPH2) +{ + foo: Foo @join__field(graph: SUBGRAPH1) +} diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index c051b44df7..6b8d060d88 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.53.0" +version = "1.54.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index 5cf55bfeda..4c47e46b94 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.53.0" +version = "1.54.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.template.toml b/apollo-router-scaffold/templates/base/Cargo.template.toml index 9953fb981c..290952c5a2 100644 --- a/apollo-router-scaffold/templates/base/Cargo.template.toml +++ b/apollo-router-scaffold/templates/base/Cargo.template.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.53.0" +apollo-router = "1.54.0" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml index 482be08889..9c9a956892 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.53.0" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.54.0" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 19950eb520..d34d25e39f 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.53.0" +version = "1.54.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" @@ -68,7 +68,7 @@ askama = "0.12.1" access-json = "0.1.0" anyhow = "1.0.86" apollo-compiler.workspace = true -apollo-federation = { path = "../apollo-federation", version = "=1.53.0" } +apollo-federation = { path = "../apollo-federation", version = "=1.54.0" } arc-swap = "1.6.0" async-channel = "1.9.0" async-compression = { version = "0.4.6", features = [ @@ -197,7 +197,7 @@ regex = "1.10.5" reqwest.workspace = true # note: this dependency should _always_ be pinned, prefix the version with an `=` -router-bridge = "=0.6.0+v2.9.0" +router-bridge = "=0.6.1+v2.9.0" rust-embed = { version = "8.4.0", features = ["include-exclude"] } rustls = "0.21.12" @@ -268,6 +268,9 @@ aws-credential-types = "1.1.6" aws-config = "1.1.6" aws-types = "1.1.6" aws-smithy-runtime-api = { version = "1.1.6", features = ["client"] } +aws-sdk-sso = "=1.39.0" # TODO: unpin when on Rust 1.78+ +aws-sdk-ssooidc = "=1.40.0" # TODO: unpin when on Rust 1.78+ +aws-sdk-sts = "=1.39.0" # TODO: unpin when on Rust 1.78+ sha1.workspace = true tracing-serde = "0.1.3" time = { version = "0.3.36", features = ["serde"] } diff --git a/apollo-router/README.md b/apollo-router/README.md index e591f2b386..96a32e23f1 100644 --- a/apollo-router/README.md +++ b/apollo-router/README.md @@ -27,4 +27,4 @@ Most Apollo Router Core features can be defined using our [YAML configuration](h If you prefer to write customizations in Rust or need more advanced customizations, see our section on [native customizations](https://www.apollographql.com/docs/router/customizations/native) for information on how to use `apollo-router` as a Rust library. We also publish Rust-specific documentation on our [`apollo-router` crate docs](https://docs.rs/crate/apollo-router). -The minimum supported Rust version (MSRV) for this version of `apollo-router` is **1.72.0**. +The minimum supported Rust version (MSRV) for this version of `apollo-router` is **1.76.0**. diff --git a/apollo-router/src/apollo_studio_interop/mod.rs b/apollo-router/src/apollo_studio_interop/mod.rs index 16b1a2f9ee..9aeaee61f6 100644 --- a/apollo-router/src/apollo_studio_interop/mod.rs +++ b/apollo-router/src/apollo_studio_interop/mod.rs @@ -169,65 +169,6 @@ pub(crate) struct ComparableUsageReporting { pub(crate) result: UsageReporting, } -/// Enum specifying the result of a comparison. -#[derive(Debug)] -pub(crate) enum UsageReportingComparisonResult { - /// The UsageReporting instances are the same - Equal, - /// The stats_report_key in the UsageReporting instances are different - StatsReportKeyNotEqual, - /// The referenced_fields in the UsageReporting instances are different. When comparing referenced - /// fields, we ignore the ordering of field names. - ReferencedFieldsNotEqual, - /// Both the stats_report_key and referenced_fields in the UsageReporting instances are different. - BothNotEqual, -} - -impl ComparableUsageReporting { - /// Compare this to another UsageReporting. - pub(crate) fn compare(&self, other: &UsageReporting) -> UsageReportingComparisonResult { - let sig_equal = self.result.stats_report_key == other.stats_report_key; - let refs_equal = self.compare_referenced_fields(&other.referenced_fields_by_type); - match (sig_equal, refs_equal) { - (true, true) => UsageReportingComparisonResult::Equal, - (false, true) => UsageReportingComparisonResult::StatsReportKeyNotEqual, - (true, false) => UsageReportingComparisonResult::ReferencedFieldsNotEqual, - (false, false) => UsageReportingComparisonResult::BothNotEqual, - } - } - - fn compare_referenced_fields( - &self, - other_ref_fields: &HashMap, - ) -> bool { - let self_ref_fields = &self.result.referenced_fields_by_type; - if self_ref_fields.len() != other_ref_fields.len() { - return false; - } - - for (name, self_refs) in self_ref_fields.iter() { - let maybe_other_refs = other_ref_fields.get(name); - if let Some(other_refs) = maybe_other_refs { - if self_refs.is_interface != other_refs.is_interface { - return false; - } - - let self_field_names_set: HashSet<_> = - self_refs.field_names.clone().into_iter().collect(); - let other_field_names_set: HashSet<_> = - other_refs.field_names.clone().into_iter().collect(); - if self_field_names_set != other_field_names_set { - return false; - } - } else { - return false; - } - } - - true - } -} - /// Generate a ComparableUsageReporting containing the stats_report_key (a normalized version of the operation signature) /// and referenced fields of an operation. The document used to generate the signature and for the references can be /// different to handle cases where the operation has been filtered, but we want to keep the same signature. diff --git a/apollo-router/src/apollo_studio_interop/tests.rs b/apollo-router/src/apollo_studio_interop/tests.rs index 34f457a196..466ca301b0 100644 --- a/apollo-router/src/apollo_studio_interop/tests.rs +++ b/apollo-router/src/apollo_studio_interop/tests.rs @@ -765,213 +765,3 @@ async fn test_enums_from_response_fragments() { let generated = enums_from_response(query_str, op_name, schema_str, response_str); assert_enums_from_response!(&generated); } - -#[test(tokio::test)] -async fn test_compare() { - let source = ComparableUsageReporting { - result: UsageReporting { - stats_report_key: "# -\n{basicResponseQuery{field1 field2}}".into(), - referenced_fields_by_type: HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ), - ( - "SomeResponse".into(), - ReferencedFieldsForType { - field_names: vec!["field1".into(), "field2".into()], - is_interface: false, - }, - ), - ]), - }, - }; - - // Same signature and ref fields should match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: source.result.stats_report_key.clone(), - referenced_fields_by_type: source.result.referenced_fields_by_type.clone(), - }), - UsageReportingComparisonResult::Equal - )); - - // Reordered signature should not match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: "# -\n{basicResponseQuery{field2 field1}}".into(), - referenced_fields_by_type: source.result.referenced_fields_by_type.clone(), - }), - UsageReportingComparisonResult::StatsReportKeyNotEqual - )); - - // Different signature should not match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: "# NamedQuery\nquery NamedQuery {basicResponseQuery{field1 field2}}" - .into(), - referenced_fields_by_type: source.result.referenced_fields_by_type.clone(), - }), - UsageReportingComparisonResult::StatsReportKeyNotEqual - )); - - // Reordered parent type should match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: source.result.stats_report_key.clone(), - referenced_fields_by_type: HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ), - ( - "SomeResponse".into(), - ReferencedFieldsForType { - field_names: vec!["field1".into(), "field2".into()], - is_interface: false, - }, - ), - ]) - }), - UsageReportingComparisonResult::Equal - )); - - // Reordered fields should match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: source.result.stats_report_key.clone(), - referenced_fields_by_type: HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ), - ( - "SomeResponse".into(), - ReferencedFieldsForType { - field_names: vec!["field2".into(), "field1".into()], - is_interface: false, - }, - ), - ]) - }), - UsageReportingComparisonResult::Equal - )); - - // Added parent type should not match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: source.result.stats_report_key.clone(), - referenced_fields_by_type: HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ), - ( - "SomeResponse".into(), - ReferencedFieldsForType { - field_names: vec!["field1".into(), "field2".into()], - is_interface: false, - }, - ), - ( - "OtherType".into(), - ReferencedFieldsForType { - field_names: vec!["otherField".into()], - is_interface: false, - }, - ), - ]) - }), - UsageReportingComparisonResult::ReferencedFieldsNotEqual - )); - - // Added field should not match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: source.result.stats_report_key.clone(), - referenced_fields_by_type: HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ), - ( - "SomeResponse".into(), - ReferencedFieldsForType { - field_names: vec!["field1".into(), "field2".into(), "field3".into()], - is_interface: false, - }, - ), - ]) - }), - UsageReportingComparisonResult::ReferencedFieldsNotEqual - )); - - // Missing parent type should not match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: source.result.stats_report_key.clone(), - referenced_fields_by_type: HashMap::from([( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ),]) - }), - UsageReportingComparisonResult::ReferencedFieldsNotEqual - )); - - // Missing field should not match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: source.result.stats_report_key.clone(), - referenced_fields_by_type: HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ), - ( - "SomeResponse".into(), - ReferencedFieldsForType { - field_names: vec!["field1".into()], - is_interface: false, - }, - ), - ]) - }), - UsageReportingComparisonResult::ReferencedFieldsNotEqual - )); - - // Both different should not match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: "# -\n{basicResponseQuery{field2 field1}}".into(), - referenced_fields_by_type: HashMap::from([( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ),]) - }), - UsageReportingComparisonResult::BothNotEqual - )); -} diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index 7e18720703..8e87fa74d0 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -387,9 +387,9 @@ impl InstrumentData { apollo.router.config.apollo_telemetry_options, "$.telemetry.apollo", opt.signature_normalization_algorithm, - "$.experimental_apollo_signature_normalization_algorithm", + "$.signature_normalization_algorithm", opt.metrics_reference_mode, - "$.experimental_apollo_metrics_reference_mode" + "$.metrics_reference_mode" ); // We need to update the entry we just made because the selected strategy is a named object in the config. diff --git a/apollo-router/src/configuration/migrations/0027-apollo_telemetry_experimental.yaml b/apollo-router/src/configuration/migrations/0027-apollo_telemetry_experimental.yaml new file mode 100644 index 0000000000..02ee640c4a --- /dev/null +++ b/apollo-router/src/configuration/migrations/0027-apollo_telemetry_experimental.yaml @@ -0,0 +1,10 @@ +description: Some Apollo telemetry options are no longer experimental +actions: + - type: delete + path: experimental_apollo_metrics_generation_mode + - type: move + from: telemetry.apollo.experimental_apollo_signature_normalization_algorithm + to: telemetry.apollo.signature_normalization_algorithm + - type: move + from: telemetry.apollo.experimental_apollo_metrics_reference_mode + to: telemetry.apollo.metrics_reference_mode diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index b839eb4f90..8f467b9be5 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -51,8 +51,6 @@ use crate::plugins::limits; use crate::plugins::subscription::SubscriptionConfig; use crate::plugins::subscription::APOLLO_SUBSCRIPTION_PLUGIN; use crate::plugins::subscription::APOLLO_SUBSCRIPTION_PLUGIN_NAME; -use crate::plugins::telemetry::config::ApolloMetricsReferenceMode; -use crate::plugins::telemetry::config::ApolloSignatureNormalizationAlgorithm; use crate::uplink::UplinkConfig; use crate::ApolloRouterError; @@ -162,14 +160,14 @@ pub struct Configuration { #[serde(default)] pub(crate) experimental_chaos: Chaos, - /// Set the Apollo usage report signature and referenced field generation implementation to use. - #[serde(default)] - pub(crate) experimental_apollo_metrics_generation_mode: ApolloMetricsGenerationMode, - /// Set the query planner implementation to use. #[serde(default)] pub(crate) experimental_query_planner_mode: QueryPlannerMode, + /// Set the GraphQL schema introspection implementation to use. + #[serde(default)] + pub(crate) experimental_introspection_mode: IntrospectionMode, + /// Plugin configuration #[serde(default)] pub(crate) plugins: UserPlugins, @@ -201,21 +199,6 @@ impl PartialEq for Configuration { } } -/// Apollo usage report signature and referenced field generation modes. -#[derive(Clone, PartialEq, Eq, Default, Derivative, Serialize, Deserialize, JsonSchema)] -#[derivative(Debug)] -#[serde(rename_all = "lowercase")] -pub(crate) enum ApolloMetricsGenerationMode { - /// Use the new Rust-based implementation. - #[default] - New, - /// Use the old JavaScript-based implementation. - Legacy, - /// Use Rust-based and Javascript-based implementations side by side, logging warnings if the - /// implementations disagree. - Both, -} - /// Query planner modes. #[derive(Clone, PartialEq, Eq, Default, Derivative, Serialize, Deserialize, JsonSchema)] #[derivative(Debug)] @@ -247,6 +230,21 @@ pub(crate) enum QueryPlannerMode { BothBestEffort, } +/// Which implementation of GraphQL schema introspection to use, if enabled +#[derive(Copy, Clone, PartialEq, Eq, Default, Derivative, Serialize, Deserialize, JsonSchema)] +#[derivative(Debug)] +#[serde(rename_all = "lowercase")] +pub(crate) enum IntrospectionMode { + /// Use the new Rust-based implementation. + New, + /// Use the old JavaScript-based implementation. + #[default] + Legacy, + /// Use Rust-based and Javascript-based implementations side by side, + /// logging warnings if the implementations disagree. + Both, +} + impl<'de> serde::Deserialize<'de> for Configuration { fn deserialize(deserializer: D) -> Result where @@ -272,8 +270,8 @@ impl<'de> serde::Deserialize<'de> for Configuration { experimental_chaos: Chaos, batching: Batching, experimental_type_conditioned_fetching: bool, - experimental_apollo_metrics_generation_mode: ApolloMetricsGenerationMode, experimental_query_planner_mode: QueryPlannerMode, + experimental_introspection_mode: IntrospectionMode, } let mut ad_hoc: AdHocConfiguration = serde::Deserialize::deserialize(deserializer)?; @@ -299,10 +297,9 @@ impl<'de> serde::Deserialize<'de> for Configuration { persisted_queries: ad_hoc.persisted_queries, limits: ad_hoc.limits, experimental_chaos: ad_hoc.experimental_chaos, - experimental_apollo_metrics_generation_mode: ad_hoc - .experimental_apollo_metrics_generation_mode, experimental_type_conditioned_fetching: ad_hoc.experimental_type_conditioned_fetching, experimental_query_planner_mode: ad_hoc.experimental_query_planner_mode, + experimental_introspection_mode: ad_hoc.experimental_introspection_mode, plugins: ad_hoc.plugins, apollo_plugins: ad_hoc.apollo_plugins, batching: ad_hoc.batching, @@ -348,8 +345,8 @@ impl Configuration { uplink: Option, experimental_type_conditioned_fetching: Option, batching: Option, - experimental_apollo_metrics_generation_mode: Option, experimental_query_planner_mode: Option, + experimental_introspection_mode: Option, ) -> Result { let notify = Self::notify(&apollo_plugins)?; @@ -364,9 +361,8 @@ impl Configuration { persisted_queries: persisted_query.unwrap_or_default(), limits: operation_limits.unwrap_or_default(), experimental_chaos: chaos.unwrap_or_default(), - experimental_apollo_metrics_generation_mode: - experimental_apollo_metrics_generation_mode.unwrap_or_default(), experimental_query_planner_mode: experimental_query_planner_mode.unwrap_or_default(), + experimental_introspection_mode: experimental_introspection_mode.unwrap_or_default(), plugins: UserPlugins { plugins: Some(plugins), }, @@ -468,8 +464,8 @@ impl Configuration { uplink: Option, batching: Option, experimental_type_conditioned_fetching: Option, - experimental_apollo_metrics_generation_mode: Option, experimental_query_planner_mode: Option, + experimental_introspection_mode: Option, ) -> Result { let configuration = Self { validated_yaml: Default::default(), @@ -480,9 +476,8 @@ impl Configuration { cors: cors.unwrap_or_default(), limits: operation_limits.unwrap_or_default(), experimental_chaos: chaos.unwrap_or_default(), - experimental_apollo_metrics_generation_mode: - experimental_apollo_metrics_generation_mode.unwrap_or_default(), experimental_query_planner_mode: experimental_query_planner_mode.unwrap_or_default(), + experimental_introspection_mode: experimental_introspection_mode.unwrap_or_default(), plugins: UserPlugins { plugins: Some(plugins), }, @@ -583,53 +578,6 @@ impl Configuration { } } - if self.experimental_query_planner_mode == QueryPlannerMode::New - && self.experimental_apollo_metrics_generation_mode != ApolloMetricsGenerationMode::New - { - return Err(ConfigurationError::InvalidConfiguration { - message: "`experimental_query_planner_mode: new` requires `experimental_apollo_metrics_generation_mode: new`", - error: "either change to some other query planner mode, or change to new metrics generation".into() - }); - } - - let apollo_telemetry_config = match self.apollo_plugins.plugins.get("telemetry") { - Some(telemetry_config) => { - match serde_json::from_value::( - telemetry_config.clone(), - ) { - Ok(conf) => Some(conf.apollo), - _ => None, - } - } - _ => None, - }; - - if let Some(config) = apollo_telemetry_config { - if matches!( - config.experimental_apollo_signature_normalization_algorithm, - ApolloSignatureNormalizationAlgorithm::Enhanced - ) && self.experimental_apollo_metrics_generation_mode - != ApolloMetricsGenerationMode::New - { - return Err(ConfigurationError::InvalidConfiguration { - message: "`experimental_apollo_signature_normalization_algorithm: enhanced` requires `experimental_apollo_metrics_generation_mode: new`", - error: "either change to the legacy signature normalization mode, or change to new metrics generation".into() - }); - } - - if matches!( - config.experimental_apollo_metrics_reference_mode, - ApolloMetricsReferenceMode::Extended - ) && self.experimental_apollo_metrics_generation_mode - != ApolloMetricsGenerationMode::New - { - return Err(ConfigurationError::InvalidConfiguration { - message: "`experimental_apollo_metrics_reference_mode: extended` requires `experimental_apollo_metrics_generation_mode: new`", - error: "either change to the standard reference generation mode, or change to new metrics generation".into() - }); - }; - } - Ok(self) } } diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index e30ffefbf6..2a143d67b6 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -113,32 +113,6 @@ expression: "&schema" ], "type": "string" }, - "ApolloMetricsGenerationMode": { - "description": "Apollo usage report signature and referenced field generation modes.", - "oneOf": [ - { - "description": "Use the new Rust-based implementation.", - "enum": [ - "new" - ], - "type": "string" - }, - { - "description": "Use the old JavaScript-based implementation.", - "enum": [ - "legacy" - ], - "type": "string" - }, - { - "description": "Use Rust-based and Javascript-based implementations side by side, logging warnings if the implementations disagree.", - "enum": [ - "both" - ], - "type": "string" - } - ] - }, "ApolloMetricsReferenceMode": { "description": "Apollo usage report reference generation modes.", "oneOf": [ @@ -1766,14 +1740,6 @@ expression: "&schema" "$ref": "#/definitions/ErrorsConfiguration", "description": "#/definitions/ErrorsConfiguration" }, - "experimental_apollo_metrics_reference_mode": { - "$ref": "#/definitions/ApolloMetricsReferenceMode", - "description": "#/definitions/ApolloMetricsReferenceMode" - }, - "experimental_apollo_signature_normalization_algorithm": { - "$ref": "#/definitions/ApolloSignatureNormalizationAlgorithm", - "description": "#/definitions/ApolloSignatureNormalizationAlgorithm" - }, "experimental_local_field_metrics": { "default": false, "description": "Enable field metrics that are generated without FTV1 to be sent to Apollo Studio.", @@ -1796,6 +1762,10 @@ expression: "&schema" "$ref": "#/definitions/SamplerOption", "description": "#/definitions/SamplerOption" }, + "metrics_reference_mode": { + "$ref": "#/definitions/ApolloMetricsReferenceMode", + "description": "#/definitions/ApolloMetricsReferenceMode" + }, "send_headers": { "$ref": "#/definitions/ForwardHeaders", "description": "#/definitions/ForwardHeaders" @@ -1803,6 +1773,10 @@ expression: "&schema" "send_variable_values": { "$ref": "#/definitions/ForwardValues", "description": "#/definitions/ForwardValues" + }, + "signature_normalization_algorithm": { + "$ref": "#/definitions/ApolloSignatureNormalizationAlgorithm", + "description": "#/definitions/ApolloSignatureNormalizationAlgorithm" } }, "type": "object" @@ -3594,6 +3568,32 @@ expression: "&schema" }, "type": "object" }, + "IntrospectionMode": { + "description": "Which implementation of GraphQL schema introspection to use, if enabled", + "oneOf": [ + { + "description": "Use the new Rust-based implementation.", + "enum": [ + "new" + ], + "type": "string" + }, + { + "description": "Use the old JavaScript-based implementation.", + "enum": [ + "legacy" + ], + "type": "string" + }, + { + "description": "Use Rust-based and Javascript-based implementations side by side, logging warnings if the implementations disagree.", + "enum": [ + "both" + ], + "type": "string" + } + ] + }, "InvalidationEndpointConfig": { "additionalProperties": false, "properties": { @@ -8151,6 +8151,15 @@ expression: "&schema" "display_trace_id": { "$ref": "#/definitions/DisplayTraceIdFormat", "description": "#/definitions/DisplayTraceIdFormat" + }, + "span_attributes": { + "default": [], + "description": "List of span attributes to attach to the json log object", + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true } }, "type": "object" @@ -8302,14 +8311,14 @@ expression: "&schema" "$ref": "#/definitions/DemandControlConfig", "description": "#/definitions/DemandControlConfig" }, - "experimental_apollo_metrics_generation_mode": { - "$ref": "#/definitions/ApolloMetricsGenerationMode", - "description": "#/definitions/ApolloMetricsGenerationMode" - }, "experimental_chaos": { "$ref": "#/definitions/Chaos", "description": "#/definitions/Chaos" }, + "experimental_introspection_mode": { + "$ref": "#/definitions/IntrospectionMode", + "description": "#/definitions/IntrospectionMode" + }, "experimental_query_planner_mode": { "$ref": "#/definitions/QueryPlannerMode", "description": "#/definitions/QueryPlannerMode" diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@apollo_telemetry_experimental.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@apollo_telemetry_experimental.router.yaml.snap new file mode 100644 index 0000000000..f9c70e95aa --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@apollo_telemetry_experimental.router.yaml.snap @@ -0,0 +1,9 @@ +--- +source: apollo-router/src/configuration/tests.rs +expression: new_config +--- +--- +telemetry: + apollo: + signature_normalization_algorithm: enhanced + metrics_reference_mode: extended diff --git a/apollo-router/src/configuration/testdata/migrations/apollo_telemetry_experimental.router.yaml b/apollo-router/src/configuration/testdata/migrations/apollo_telemetry_experimental.router.yaml new file mode 100644 index 0000000000..1949337902 --- /dev/null +++ b/apollo-router/src/configuration/testdata/migrations/apollo_telemetry_experimental.router.yaml @@ -0,0 +1,6 @@ +experimental_apollo_metrics_generation_mode: new + +telemetry: + apollo: + experimental_apollo_signature_normalization_algorithm: enhanced + experimental_apollo_metrics_reference_mode: extended \ No newline at end of file diff --git a/apollo-router/src/configuration/tests.rs b/apollo-router/src/configuration/tests.rs index 7367da2970..d1288df215 100644 --- a/apollo-router/src/configuration/tests.rs +++ b/apollo-router/src/configuration/tests.rs @@ -1148,51 +1148,3 @@ fn it_prevents_reuse_and_generate_query_fragments_simultaneously() { assert!(conf.supergraph.generate_query_fragments); assert_eq!(conf.supergraph.reuse_query_fragments, Some(false)); } - -#[test] -fn it_requires_rust_apollo_metrics_generation_for_enhanced_signature_normalization() { - let mut plugins_config = serde_json::Map::new(); - plugins_config.insert( - "telemetry".to_string(), - serde_json::json! {{ - "apollo": { - "experimental_apollo_signature_normalization_algorithm": "enhanced" - } - }}, - ); - - let error = Configuration::builder() - .experimental_apollo_metrics_generation_mode(ApolloMetricsGenerationMode::Both) - .apollo_plugins(plugins_config) - .build() - .expect_err("Must have an error because we have conflicting config options"); - - assert_eq!( - error.to_string(), - String::from("`experimental_apollo_signature_normalization_algorithm: enhanced` requires `experimental_apollo_metrics_generation_mode: new`: either change to the legacy signature normalization mode, or change to new metrics generation") - ); -} - -#[test] -fn it_requires_rust_apollo_metrics_generation_for_extended_references() { - let mut plugins_config = serde_json::Map::new(); - plugins_config.insert( - "telemetry".to_string(), - serde_json::json! {{ - "apollo": { - "experimental_apollo_metrics_reference_mode": "extended" - } - }}, - ); - - let error = Configuration::builder() - .experimental_apollo_metrics_generation_mode(ApolloMetricsGenerationMode::Both) - .apollo_plugins(plugins_config) - .build() - .expect_err("Must have an error because we have conflicting config options"); - - assert_eq!( - error.to_string(), - String::from("`experimental_apollo_metrics_reference_mode: extended` requires `experimental_apollo_metrics_generation_mode: new`: either change to the standard reference generation mode, or change to new metrics generation") - ); -} diff --git a/apollo-router/src/graphql/response.rs b/apollo-router/src/graphql/response.rs index 320b4c849d..6b44f1d2cc 100644 --- a/apollo-router/src/graphql/response.rs +++ b/apollo-router/src/graphql/response.rs @@ -9,6 +9,7 @@ use serde_json_bytes::Map; use crate::error::Error; use crate::error::FetchError; +use crate::graphql::IntoGraphQLErrors; use crate::json_ext::Object; use crate::json_ext::Path; use crate::json_ext::Value; @@ -244,6 +245,35 @@ impl IncrementalResponse { } } +impl From for Response { + fn from(response: apollo_compiler::execution::Response) -> Response { + let apollo_compiler::execution::Response { + errors, + data, + extensions, + } = response; + Self { + errors: errors.into_graphql_errors().unwrap(), + data: match data { + apollo_compiler::execution::ResponseData::Object(map) => { + Some(serde_json_bytes::Value::Object(map)) + } + apollo_compiler::execution::ResponseData::Null => { + Some(serde_json_bytes::Value::Null) + } + apollo_compiler::execution::ResponseData::Absent => None, + }, + extensions, + label: None, + path: None, + has_next: None, + subscribed: None, + created_at: None, + incremental: Vec::new(), + } + } +} + #[cfg(test)] mod tests { use router_bridge::planner::Location; diff --git a/apollo-router/src/plugins/authentication/tests.rs b/apollo-router/src/plugins/authentication/tests.rs index cf638fddcb..258c324da7 100644 --- a/apollo-router/src/plugins/authentication/tests.rs +++ b/apollo-router/src/plugins/authentication/tests.rs @@ -177,10 +177,7 @@ async fn it_rejects_when_there_is_no_auth_header() { .unwrap(); // Let's create a request with our operation name - let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) - .build() - .unwrap(); + let request_with_appropriate_name = supergraph::Request::canned_builder().build().unwrap(); // ...And call our service stack with it let mut service_response = test_harness @@ -214,7 +211,6 @@ async fn it_rejects_when_auth_prefix_is_missing() { // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header(http::header::AUTHORIZATION, "invalid") .build() .unwrap(); @@ -251,7 +247,6 @@ async fn it_rejects_when_auth_prefix_has_no_jwt() { // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header(http::header::AUTHORIZATION, "Bearer") .build() .unwrap(); @@ -288,7 +283,6 @@ async fn it_rejects_when_auth_prefix_has_invalid_format_jwt() { // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header(http::header::AUTHORIZATION, "Bearer header.payload") .build() .unwrap(); @@ -327,7 +321,6 @@ async fn it_rejects_when_auth_prefix_has_correct_format_but_invalid_jwt() { // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header( http::header::AUTHORIZATION, "Bearer header.payload.signature", @@ -367,7 +360,6 @@ async fn it_rejects_when_auth_prefix_has_correct_format_and_invalid_jwt() { // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header( http::header::AUTHORIZATION, "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiIsImtpZCI6ImtleTEifQ.eyJleHAiOjEwMDAwMDAwMDAwLCJhbm90aGVyIGNsYWltIjoidGhpcyBpcyBhbm90aGVyIGNsYWltIn0.4GrmfxuUST96cs0YUC0DfLAG218m7vn8fO_ENfXnu5B", @@ -407,7 +399,6 @@ async fn it_accepts_when_auth_prefix_has_correct_format_and_valid_jwt() { // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header( http::header::AUTHORIZATION, "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiIsImtpZCI6ImtleTEifQ.eyJleHAiOjEwMDAwMDAwMDAwLCJhbm90aGVyIGNsYWltIjoidGhpcyBpcyBhbm90aGVyIGNsYWltIn0.4GrmfxuUST96cs0YUC0DfLAG218m7vn8fO_ENfXnu5A", @@ -445,7 +436,6 @@ async fn it_accepts_when_auth_prefix_does_not_match_config_and_is_ignored() { let test_harness = build_a_test_harness(None, None, false, true).await; // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header(http::header::AUTHORIZATION, "Basic dXNlcjpwYXNzd29yZA==") .build() .unwrap(); @@ -481,7 +471,6 @@ async fn it_accepts_when_auth_prefix_has_correct_format_multiple_jwks_and_valid_ // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header( http::header::AUTHORIZATION, "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiIsImtpZCI6ImtleTEifQ.eyJleHAiOjEwMDAwMDAwMDAwLCJhbm90aGVyIGNsYWltIjoidGhpcyBpcyBhbm90aGVyIGNsYWltIn0.4GrmfxuUST96cs0YUC0DfLAG218m7vn8fO_ENfXnu5A", @@ -521,7 +510,6 @@ async fn it_accepts_when_auth_prefix_has_correct_format_and_valid_jwt_custom_aut // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header( "SOMETHING", "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiIsImtpZCI6ImtleTEifQ.eyJleHAiOjEwMDAwMDAwMDAwLCJhbm90aGVyIGNsYWltIjoidGhpcyBpcyBhbm90aGVyIGNsYWltIn0.4GrmfxuUST96cs0YUC0DfLAG218m7vn8fO_ENfXnu5A", @@ -561,7 +549,6 @@ async fn it_accepts_when_auth_prefix_has_correct_format_and_valid_jwt_custom_pre // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header( http::header::AUTHORIZATION, "SOMETHING eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiIsImtpZCI6ImtleTEifQ.eyJleHAiOjEwMDAwMDAwMDAwLCJhbm90aGVyIGNsYWltIjoidGhpcyBpcyBhbm90aGVyIGNsYWltIn0.4GrmfxuUST96cs0YUC0DfLAG218m7vn8fO_ENfXnu5A", @@ -600,7 +587,6 @@ async fn it_accepts_when_no_auth_prefix_and_valid_jwt_custom_prefix() { // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header( http::header::AUTHORIZATION, "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiIsImtpZCI6ImtleTEifQ.eyJleHAiOjEwMDAwMDAwMDAwLCJhbm90aGVyIGNsYWltIjoidGhpcyBpcyBhbm90aGVyIGNsYWltIn0.4GrmfxuUST96cs0YUC0DfLAG218m7vn8fO_ENfXnu5A", @@ -702,7 +688,6 @@ async fn it_extracts_the_token_from_cookies() { // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header( http::header::COOKIE, format!("a= b; c = d HttpOnly; authz = {token}; e = f"), @@ -799,7 +784,6 @@ async fn it_supports_multiple_sources() { // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header("Authz2", format!("Bear {token}")) .build() .unwrap(); @@ -1001,7 +985,6 @@ async fn issuer_check() { .unwrap(); let request = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header(http::header::AUTHORIZATION, format!("Bearer {token}")) .build() .unwrap(); @@ -1039,7 +1022,6 @@ async fn issuer_check() { .unwrap(); let request = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header(http::header::AUTHORIZATION, format!("Bearer {token}")) .build() .unwrap(); @@ -1076,7 +1058,6 @@ async fn issuer_check() { .unwrap(); let request = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header(http::header::AUTHORIZATION, format!("Bearer {token}")) .build() .unwrap(); @@ -1108,7 +1089,6 @@ async fn issuer_check() { .unwrap(); let request = supergraph::Request::canned_builder() - .operation_name("me".to_string()) .header(http::header::AUTHORIZATION, format!("Bearer {token}")) .build() .unwrap(); diff --git a/apollo-router/src/plugins/cache/invalidation.rs b/apollo-router/src/plugins/cache/invalidation.rs index 77736f6598..a566dc6f33 100644 --- a/apollo-router/src/plugins/cache/invalidation.rs +++ b/apollo-router/src/plugins/cache/invalidation.rs @@ -3,7 +3,6 @@ use std::time::Instant; use fred::error::RedisError; use fred::types::Scanner; -use futures::SinkExt; use futures::StreamExt; use itertools::Itertools; use serde::Deserialize; @@ -17,23 +16,19 @@ use tracing::Instrument; use super::entity::Storage as EntityStorage; use crate::cache::redis::RedisCacheStorage; use crate::cache::redis::RedisKey; -use crate::notification::Handle; -use crate::notification::HandleStream; use crate::plugins::cache::entity::hash_entity_key; use crate::plugins::cache::entity::ENTITY_CACHE_VERSION; -use crate::Notify; + +const CHANNEL_SIZE: usize = 1024; #[derive(Clone)] pub(crate) struct Invalidation { #[allow(clippy::type_complexity)] - pub(super) handle: Handle< - InvalidationTopic, - ( - Vec, - InvalidationOrigin, - broadcast::Sender>, - ), - >, + pub(super) handle: tokio::sync::mpsc::Sender<( + Vec, + InvalidationOrigin, + broadcast::Sender>, + )>, } #[derive(Error, Debug, Clone)] @@ -73,15 +68,12 @@ pub(crate) enum InvalidationOrigin { impl Invalidation { pub(crate) async fn new(storage: Arc) -> Result { - let mut notify = Notify::new(None, None, None); - let (handle, _b) = notify.create_or_subscribe(InvalidationTopic, false).await?; - - let h = handle.clone(); + let (tx, rx) = tokio::sync::mpsc::channel(CHANNEL_SIZE); tokio::task::spawn(async move { - start(storage, h.into_stream()).await; + start(storage, rx).await; }); - Ok(Self { handle }) + Ok(Self { handle: tx }) } pub(crate) async fn invalidate( @@ -89,11 +81,11 @@ impl Invalidation { origin: InvalidationOrigin, requests: Vec, ) -> Result { - let mut sink = self.handle.clone().into_sink(); let (response_tx, mut response_rx) = broadcast::channel(2); - sink.send((requests, origin, response_tx.clone())) + self.handle + .send((requests, origin, response_tx.clone())) .await - .map_err(|e| format!("cannot send invalidation request: {}", e.message))?; + .map_err(|e| format!("cannot send invalidation request: {e}"))?; let result = response_rx .recv() @@ -114,16 +106,13 @@ impl Invalidation { #[allow(clippy::type_complexity)] async fn start( storage: Arc, - mut handle: HandleStream< - InvalidationTopic, - ( - Vec, - InvalidationOrigin, - broadcast::Sender>, - ), - >, + mut handle: tokio::sync::mpsc::Receiver<( + Vec, + InvalidationOrigin, + broadcast::Sender>, + )>, ) { - while let Some((requests, origin, response_tx)) = handle.next().await { + while let Some((requests, origin, response_tx)) = handle.recv().await { let origin = match origin { InvalidationOrigin::Endpoint => "endpoint", InvalidationOrigin::Extensions => "extensions", @@ -196,6 +185,7 @@ async fn handle_request( ); } } + scan_res.next()?; } } } diff --git a/apollo-router/src/plugins/cache/invalidation_endpoint.rs b/apollo-router/src/plugins/cache/invalidation_endpoint.rs index 561309c8fb..18af7763f1 100644 --- a/apollo-router/src/plugins/cache/invalidation_endpoint.rs +++ b/apollo-router/src/plugins/cache/invalidation_endpoint.rs @@ -203,31 +203,17 @@ fn valid_shared_key( mod tests { use std::collections::HashMap; - use tokio::sync::broadcast::Sender; - use tokio_stream::StreamExt; + use tokio::sync::broadcast; use tower::ServiceExt; use super::*; use crate::plugins::cache::invalidation::InvalidationError; - use crate::plugins::cache::invalidation::InvalidationTopic; - use crate::Notify; #[tokio::test] async fn test_invalidation_service_bad_shared_key() { - #[allow(clippy::type_complexity)] - let mut notify: Notify< - InvalidationTopic, - ( - Vec, - InvalidationOrigin, - Sender>, - ), - > = Notify::new(None, None, None); - let (handle, _b) = notify - .create_or_subscribe(InvalidationTopic, false) - .await - .unwrap(); + let (handle, _rx) = tokio::sync::mpsc::channel(128); let invalidation = Invalidation { handle }; + let config = Arc::new(SubgraphConfiguration { all: Subgraph { ttl: None, @@ -265,25 +251,14 @@ mod tests { #[tokio::test] async fn test_invalidation_service_good_sub_shared_key() { - #[allow(clippy::type_complexity)] - let mut notify: Notify< - InvalidationTopic, - ( - Vec, - InvalidationOrigin, - Sender>, - ), - > = Notify::new(None, None, None); - let (handle, _b) = notify - .create_or_subscribe(InvalidationTopic, false) - .await - .unwrap(); - let h = handle.clone(); - + let (handle, mut rx) = tokio::sync::mpsc::channel::<( + Vec, + InvalidationOrigin, + broadcast::Sender>, + )>(128); tokio::task::spawn(async move { - let mut handle = h.into_stream(); let mut called = false; - while let Some((requests, origin, response_tx)) = handle.next().await { + while let Some((requests, origin, response_tx)) = rx.recv().await { called = true; if requests != [ @@ -366,60 +341,16 @@ mod tests { .unwrap(); let res = service.oneshot(req).await.unwrap(); assert_eq!(res.response.status(), StatusCode::ACCEPTED); - let h = handle.clone(); - - tokio::task::spawn(async move { - let mut handle = h.into_stream(); - let mut called = false; - while let Some((requests, origin, response_tx)) = handle.next().await { - called = true; - if requests - != [ - InvalidationRequest::Subgraph { - subgraph: String::from("test"), - }, - InvalidationRequest::Type { - subgraph: String::from("test"), - r#type: String::from("Test"), - }, - ] - { - response_tx - .send(Err(InvalidationError::Custom(format!( - "it's not the right invalidation requests : {requests:?}" - )))) - .unwrap(); - return; - } - if origin != InvalidationOrigin::Endpoint { - response_tx - .send(Err(InvalidationError::Custom(format!( - "it's not the right invalidation origin : {origin:?}" - )))) - .unwrap(); - return; - } - response_tx.send(Ok(0)).unwrap(); - } - assert!(called); - }); } #[tokio::test] async fn test_invalidation_service_bad_shared_key_subgraph() { #[allow(clippy::type_complexity)] - let mut notify: Notify< - InvalidationTopic, - ( - Vec, - InvalidationOrigin, - Sender>, - ), - > = Notify::new(None, None, None); - let (handle, _b) = notify - .create_or_subscribe(InvalidationTopic, false) - .await - .unwrap(); + let (handle, _rx) = tokio::sync::mpsc::channel::<( + Vec, + InvalidationOrigin, + broadcast::Sender>, + )>(128); let invalidation = Invalidation { handle }; let config = Arc::new(SubgraphConfiguration { all: Subgraph { @@ -467,25 +398,16 @@ mod tests { #[tokio::test] async fn test_invalidation_service() { - #[allow(clippy::type_complexity)] - let mut notify: Notify< - InvalidationTopic, - ( - Vec, - InvalidationOrigin, - Sender>, - ), - > = Notify::new(None, None, None); - let (handle, _b) = notify - .create_or_subscribe(InvalidationTopic, false) - .await - .unwrap(); - let h = handle.clone(); + let (handle, mut rx) = tokio::sync::mpsc::channel::<( + Vec, + InvalidationOrigin, + broadcast::Sender>, + )>(128); + let invalidation = Invalidation { handle }; tokio::task::spawn(async move { - let mut handle = h.into_stream(); let mut called = false; - while let Some((requests, origin, response_tx)) = handle.next().await { + while let Some((requests, origin, response_tx)) = rx.recv().await { called = true; if requests != [ @@ -518,7 +440,6 @@ mod tests { assert!(called); }); - let invalidation = Invalidation { handle }; let config = Arc::new(SubgraphConfiguration { all: Subgraph { ttl: None, diff --git a/apollo-router/src/plugins/record_replay/record.rs b/apollo-router/src/plugins/record_replay/record.rs index f9dc97b52a..d1c0bc9829 100644 --- a/apollo-router/src/plugins/record_replay/record.rs +++ b/apollo-router/src/plugins/record_replay/record.rs @@ -22,9 +22,6 @@ use crate::services::router; use crate::services::router::body::RouterBody; use crate::services::subgraph; use crate::services::supergraph; -use crate::spec::query::Query; -use crate::spec::Schema; -use crate::Configuration; const RECORD_HEADER: &str = "x-apollo-router-record"; @@ -48,7 +45,6 @@ struct Record { enabled: bool, supergraph_sdl: Arc, storage_path: Arc, - schema: Arc, } register_plugin!("experimental", "record", Record); @@ -67,7 +63,6 @@ impl Plugin for Record { enabled: init.config.enabled, supergraph_sdl: init.supergraph_sdl.clone(), storage_path: storage_path.clone().into(), - schema: Arc::new(Schema::parse_arc(init.supergraph_sdl, &Default::default())?), }; if init.config.enabled { @@ -150,20 +145,11 @@ impl Plugin for Record { return service; } - let schema = self.schema.clone(); let supergraph_sdl = self.supergraph_sdl.clone(); ServiceBuilder::new() .map_request(move |req: supergraph::Request| { - if is_introspection( - req.supergraph_request - .body() - .query - .clone() - .unwrap_or_default(), - req.supergraph_request.body().operation_name.as_deref(), - schema.clone(), - ) { + if is_introspection(&req) { return req; } @@ -317,8 +303,18 @@ async fn write_file(dir: Arc, path: &PathBuf, contents: &[u8]) -> Result<( Ok(()) } -fn is_introspection(query: String, operation_name: Option<&str>, schema: Arc) -> bool { - Query::parse(query, operation_name, &schema, &Configuration::default()) - .map(|q| q.contains_introspection()) - .unwrap_or_default() +fn is_introspection(request: &supergraph::Request) -> bool { + request + .context + .unsupported_executable_document() + .is_some_and(|doc| { + doc.operations + .get(request.supergraph_request.body().operation_name.as_deref()) + .ok() + .is_some_and(|op| { + op.root_fields(&doc).all(|field| { + matches!(field.name.as_str(), "__typename" | "__schema" | "__type") + }) + }) + }) } diff --git a/apollo-router/src/plugins/telemetry/apollo.rs b/apollo-router/src/plugins/telemetry/apollo.rs index ecb8177e68..69780cbc9e 100644 --- a/apollo-router/src/plugins/telemetry/apollo.rs +++ b/apollo-router/src/plugins/telemetry/apollo.rs @@ -107,11 +107,10 @@ pub(crate) struct Config { pub(crate) errors: ErrorsConfiguration, /// Set the signature normalization algorithm to use when sending Apollo usage reports. - pub(crate) experimental_apollo_signature_normalization_algorithm: - ApolloSignatureNormalizationAlgorithm, + pub(crate) signature_normalization_algorithm: ApolloSignatureNormalizationAlgorithm, /// Set the Apollo usage report reference reporting mode to use. - pub(crate) experimental_apollo_metrics_reference_mode: ApolloMetricsReferenceMode, + pub(crate) metrics_reference_mode: ApolloMetricsReferenceMode, /// Enable field metrics that are generated without FTV1 to be sent to Apollo Studio. pub(crate) experimental_local_field_metrics: bool, @@ -215,10 +214,9 @@ impl Default for Config { send_variable_values: ForwardValues::None, batch_processor: BatchProcessorConfig::default(), errors: ErrorsConfiguration::default(), - experimental_apollo_signature_normalization_algorithm: - ApolloSignatureNormalizationAlgorithm::default(), + signature_normalization_algorithm: ApolloSignatureNormalizationAlgorithm::default(), experimental_local_field_metrics: false, - experimental_apollo_metrics_reference_mode: ApolloMetricsReferenceMode::default(), + metrics_reference_mode: ApolloMetricsReferenceMode::default(), } } } diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs index dba6f207f4..4c9be01135 100644 --- a/apollo-router/src/plugins/telemetry/config.rs +++ b/apollo-router/src/plugins/telemetry/config.rs @@ -745,7 +745,7 @@ impl Conf { match configuration.apollo_plugins.plugins.get("telemetry") { Some(telemetry_config) => { match serde_json::from_value::(telemetry_config.clone()) { - Ok(conf) => conf.apollo.experimental_apollo_metrics_reference_mode, + Ok(conf) => conf.apollo.metrics_reference_mode, _ => ApolloMetricsReferenceMode::default(), } } @@ -759,10 +759,7 @@ impl Conf { match configuration.apollo_plugins.plugins.get("telemetry") { Some(telemetry_config) => { match serde_json::from_value::(telemetry_config.clone()) { - Ok(conf) => { - conf.apollo - .experimental_apollo_signature_normalization_algorithm - } + Ok(conf) => conf.apollo.signature_normalization_algorithm, _ => ApolloSignatureNormalizationAlgorithm::default(), } } diff --git a/apollo-router/src/plugins/telemetry/config_new/logging.rs b/apollo-router/src/plugins/telemetry/config_new/logging.rs index be9aeefdb4..db2dda6588 100644 --- a/apollo-router/src/plugins/telemetry/config_new/logging.rs +++ b/apollo-router/src/plugins/telemetry/config_new/logging.rs @@ -1,4 +1,5 @@ use std::collections::BTreeMap; +use std::collections::HashSet; use std::io::IsTerminal; use std::time::Duration; @@ -339,6 +340,8 @@ pub(crate) struct JsonFormat { pub(crate) display_trace_id: DisplayTraceIdFormat, /// Include the span id (if any) with the log event. (default: true) pub(crate) display_span_id: bool, + /// List of span attributes to attach to the json log object + pub(crate) span_attributes: HashSet, } #[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Eq)] @@ -389,6 +392,7 @@ impl Default for JsonFormat { display_resource: true, display_trace_id: DisplayTraceIdFormat::Bool(true), display_span_id: true, + span_attributes: HashSet::new(), } } } diff --git a/apollo-router/src/plugins/telemetry/formatters/json.rs b/apollo-router/src/plugins/telemetry/formatters/json.rs index b7952c2701..8f6551ce14 100644 --- a/apollo-router/src/plugins/telemetry/formatters/json.rs +++ b/apollo-router/src/plugins/telemetry/formatters/json.rs @@ -1,9 +1,11 @@ +use std::collections::HashMap; use std::collections::HashSet; use std::fmt; use std::io; use opentelemetry::sdk::Resource; use opentelemetry::Array; +use opentelemetry::Key; use opentelemetry::OrderMap; use opentelemetry::Value; use serde::ser::SerializeMap; @@ -283,6 +285,14 @@ where } } + if !self.config.span_attributes.is_empty() { + for (key, value) in + extract_span_attributes(ctx.lookup_current(), &self.config.span_attributes) + { + serializer.serialize_entry(key.as_str(), &AttributeValue::from(value))?; + } + } + let mut visitor = tracing_serde::SerdeMapVisitor::new(serializer); event.record(&mut visitor); @@ -318,7 +328,6 @@ where serializer.serialize_entry("dd.trace_id", &dd_trace_id)?; } } - if self.config.display_span_list && current_span.is_some() { serializer.serialize_entry( "spans", @@ -370,6 +379,66 @@ fn extract_dd_trace_id<'a, 'b, T: LookupSpan<'a>>(span: &SpanRef<'a, T>) -> Opti dd_trace_id } +fn extract_span_attributes<'a, 'b, Span>( + current: Option>, + include_attributes: &HashSet, +) -> HashMap +where + Span: for<'lookup> tracing_subscriber::registry::LookupSpan<'lookup>, +{ + let mut attributes = HashMap::new(); + if let Some(leaf_span) = ¤t { + for span in leaf_span.scope().from_root() { + let ext = span.extensions(); + + // Get otel attributes + { + let otel_attributes = ext + .get::() + .and_then(|otel_data| otel_data.builder.attributes.as_ref()); + if let Some(otel_attributes) = otel_attributes { + attributes.extend( + otel_attributes + .iter() + .filter(|(key, _)| { + let key_name = key.as_str(); + !key_name.starts_with(APOLLO_PRIVATE_PREFIX) + && include_attributes.contains(key_name) + }) + .map(|(key, val)| (key.clone(), val.clone())), + ); + } + } + // Get custom dynamic attributes + { + let custom_attributes = ext.get::().map(|attrs| attrs.attributes()); + if let Some(custom_attributes) = custom_attributes { + #[cfg(test)] + let custom_attributes: Vec<&opentelemetry::KeyValue> = { + let mut my_custom_attributes: Vec<&opentelemetry::KeyValue> = + custom_attributes.iter().collect(); + my_custom_attributes.sort_by_key(|kv| &kv.key); + my_custom_attributes + }; + #[allow(clippy::into_iter_on_ref)] + attributes.extend( + custom_attributes + .into_iter() + .filter(|kv| { + let key_name = kv.key.as_str(); + !key_name.starts_with(APOLLO_PRIVATE_PREFIX) + && include_attributes.contains(key_name) + }) + .map(|kv| (kv.key.clone(), kv.value.clone())), + ); + } + } + } + } + + attributes +} + struct WriteAdaptor<'a> { fmt_write: &'a mut dyn fmt::Write, } diff --git a/apollo-router/src/plugins/telemetry/metrics/apollo/mod.rs b/apollo-router/src/plugins/telemetry/metrics/apollo/mod.rs index 00d1e01eeb..47a13762d0 100644 --- a/apollo-router/src/plugins/telemetry/metrics/apollo/mod.rs +++ b/apollo-router/src/plugins/telemetry/metrics/apollo/mod.rs @@ -55,7 +55,7 @@ impl MetricsConfigurator for Config { apollo_graph_ref: Some(reference), schema_id, batch_processor, - experimental_apollo_metrics_reference_mode, + metrics_reference_mode, .. } => { if !ENABLED.swap(true, Ordering::Relaxed) { @@ -69,7 +69,7 @@ impl MetricsConfigurator for Config { reference, schema_id, batch_processor, - *experimental_apollo_metrics_reference_mode, + *metrics_reference_mode, )?; // env variable EXPERIMENTAL_APOLLO_OTLP_METRICS_ENABLED will disappear without warning in future if std::env::var("EXPERIMENTAL_APOLLO_OTLP_METRICS_ENABLED") diff --git a/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_multiple_operations.snap b/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_multiple_operations.snap index a4ce331e08..3d784242db 100644 --- a/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_multiple_operations.snap +++ b/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_multiple_operations.snap @@ -1,5 +1,60 @@ --- -source: apollo-router/src/plugins/telemetry/metrics/apollo.rs +source: apollo-router/src/plugins/telemetry/metrics/apollo/mod.rs expression: results --- -[] +[ + { + "request_id": "[REDACTED]", + "stats": { + "## GraphQLValidationFailure\n": { + "stats_with_context": { + "context": { + "client_name": "test_client", + "client_version": "1.0-test", + "operation_type": "query", + "operation_subtype": "", + "result": "" + }, + "query_latency_stats": { + "latency": { + "secs": 0, + "nanos": 100000000 + }, + "cache_hit": false, + "persisted_query_hit": null, + "cache_latency": null, + "root_error_stats": { + "children": {}, + "errors_count": 0, + "requests_with_errors_count": 0 + }, + "has_errors": true, + "public_cache_ttl_latency": null, + "private_cache_ttl_latency": null, + "registered_operation": false, + "forbidden_operation": false, + "without_field_instrumentation": false + }, + "limits_stats": { + "strategy": null, + "cost_estimated": null, + "cost_actual": null, + "depth": 0, + "height": 0, + "alias_count": 0, + "root_field_count": 0 + }, + "per_type_stat": {}, + "extended_references": { + "referenced_input_fields": {}, + "referenced_enums": {} + }, + "enum_response_references": {}, + "local_per_type_stat": {} + }, + "referenced_fields_by_type": {} + } + }, + "licensed_operation_count_by_type": null + } +] diff --git a/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_parse_failure.snap b/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_parse_failure.snap index a4ce331e08..2f375f6382 100644 --- a/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_parse_failure.snap +++ b/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_parse_failure.snap @@ -1,5 +1,60 @@ --- -source: apollo-router/src/plugins/telemetry/metrics/apollo.rs +source: apollo-router/src/plugins/telemetry/metrics/apollo/mod.rs expression: results --- -[] +[ + { + "request_id": "[REDACTED]", + "stats": { + "## GraphQLParseFailure\n": { + "stats_with_context": { + "context": { + "client_name": "test_client", + "client_version": "1.0-test", + "operation_type": "query", + "operation_subtype": "", + "result": "" + }, + "query_latency_stats": { + "latency": { + "secs": 0, + "nanos": 100000000 + }, + "cache_hit": false, + "persisted_query_hit": null, + "cache_latency": null, + "root_error_stats": { + "children": {}, + "errors_count": 0, + "requests_with_errors_count": 0 + }, + "has_errors": true, + "public_cache_ttl_latency": null, + "private_cache_ttl_latency": null, + "registered_operation": false, + "forbidden_operation": false, + "without_field_instrumentation": false + }, + "limits_stats": { + "strategy": null, + "cost_estimated": null, + "cost_actual": null, + "depth": 0, + "height": 0, + "alias_count": 0, + "root_field_count": 0 + }, + "per_type_stat": {}, + "extended_references": { + "referenced_input_fields": {}, + "referenced_enums": {} + }, + "enum_response_references": {}, + "local_per_type_stat": {} + }, + "referenced_fields_by_type": {} + } + }, + "licensed_operation_count_by_type": null + } +] diff --git a/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_validation_failure.snap b/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_validation_failure.snap index a4ce331e08..3d784242db 100644 --- a/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_validation_failure.snap +++ b/apollo-router/src/plugins/telemetry/metrics/apollo/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_validation_failure.snap @@ -1,5 +1,60 @@ --- -source: apollo-router/src/plugins/telemetry/metrics/apollo.rs +source: apollo-router/src/plugins/telemetry/metrics/apollo/mod.rs expression: results --- -[] +[ + { + "request_id": "[REDACTED]", + "stats": { + "## GraphQLValidationFailure\n": { + "stats_with_context": { + "context": { + "client_name": "test_client", + "client_version": "1.0-test", + "operation_type": "query", + "operation_subtype": "", + "result": "" + }, + "query_latency_stats": { + "latency": { + "secs": 0, + "nanos": 100000000 + }, + "cache_hit": false, + "persisted_query_hit": null, + "cache_latency": null, + "root_error_stats": { + "children": {}, + "errors_count": 0, + "requests_with_errors_count": 0 + }, + "has_errors": true, + "public_cache_ttl_latency": null, + "private_cache_ttl_latency": null, + "registered_operation": false, + "forbidden_operation": false, + "without_field_instrumentation": false + }, + "limits_stats": { + "strategy": null, + "cost_estimated": null, + "cost_actual": null, + "depth": 0, + "height": 0, + "alias_count": 0, + "root_field_count": 0 + }, + "per_type_stat": {}, + "extended_references": { + "referenced_input_fields": {}, + "referenced_enums": {} + }, + "enum_response_references": {}, + "local_per_type_stat": {} + }, + "referenced_fields_by_type": {} + } + }, + "licensed_operation_count_by_type": null + } +] diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index 345d1936ee..7fdc8cf496 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -519,6 +519,8 @@ impl Plugin for Telemetry { .map(|u| { u.stats_report_key == "## GraphQLValidationFailure\n" || u.stats_report_key == "## GraphQLParseFailure\n" + || u.stats_report_key + == "## GraphQLUnknownOperationName\n" }) .unwrap_or(false) }) { diff --git a/apollo-router/src/plugins/telemetry/otel/layer.rs b/apollo-router/src/plugins/telemetry/otel/layer.rs index e1d20ec739..866bf50a35 100644 --- a/apollo-router/src/plugins/telemetry/otel/layer.rs +++ b/apollo-router/src/plugins/telemetry/otel/layer.rs @@ -1101,7 +1101,6 @@ where attributes.insert(OTEL_ORIGINAL_NAME.into(), builder.name.into()); builder.name = forced_span_name.into(); } - // Assign end time, build and start span, drop span to export builder .with_end_time(SystemTime::now()) diff --git a/apollo-router/src/plugins/telemetry/otel/tracer.rs b/apollo-router/src/plugins/telemetry/otel/tracer.rs index 8a6c7402bc..463fd8cb2c 100644 --- a/apollo-router/src/plugins/telemetry/otel/tracer.rs +++ b/apollo-router/src/plugins/telemetry/otel/tracer.rs @@ -16,6 +16,7 @@ use opentelemetry_sdk::trace::Tracer as SdkTracer; use opentelemetry_sdk::trace::TracerProvider as SdkTracerProvider; use super::OtelData; +use crate::plugins::telemetry::tracing::datadog_exporter::DatadogTraceState; /// An interface for authors of OpenTelemetry SDKs to build pre-sampled tracers. /// @@ -158,7 +159,12 @@ fn process_sampling_result( decision: SamplingDecision::RecordAndSample, trace_state, .. - } => Some((trace_flags | TraceFlags::SAMPLED, trace_state.clone())), + } => Some(( + trace_flags | TraceFlags::SAMPLED, + trace_state + .with_priority_sampling(true) + .with_measuring(true), + )), } } diff --git a/apollo-router/src/plugins/telemetry/reload.rs b/apollo-router/src/plugins/telemetry/reload.rs index 2ca69191c9..0f529efd2f 100644 --- a/apollo-router/src/plugins/telemetry/reload.rs +++ b/apollo-router/src/plugins/telemetry/reload.rs @@ -35,6 +35,7 @@ use crate::plugins::telemetry::formatters::FilteringFormatter; use crate::plugins::telemetry::otel; use crate::plugins::telemetry::otel::OpenTelemetryLayer; use crate::plugins::telemetry::otel::PreSampledTracer; +use crate::plugins::telemetry::tracing::datadog_exporter::DatadogTraceState; use crate::plugins::telemetry::tracing::reload::ReloadTracer; use crate::tracer::TraceId; @@ -140,7 +141,9 @@ pub(crate) fn prepare_context(context: Context) -> Context { tracer.new_span_id(), TraceFlags::default(), false, - TraceState::default(), + TraceState::default() + .with_measuring(true) + .with_priority_sampling(true), ); return context.with_remote_span_context(span_context); } diff --git a/apollo-router/src/plugins/telemetry/tracing/apollo.rs b/apollo-router/src/plugins/telemetry/tracing/apollo.rs index a3cafdd5cc..b4f6589e37 100644 --- a/apollo-router/src/plugins/telemetry/tracing/apollo.rs +++ b/apollo-router/src/plugins/telemetry/tracing/apollo.rs @@ -45,7 +45,7 @@ impl TracingConfigurator for Config { .batch_config(&self.batch_processor) .errors_configuration(&self.errors) .use_legacy_request_span(matches!(spans_config.mode, SpanMode::Deprecated)) - .metrics_reference_mode(self.experimental_apollo_metrics_reference_mode) + .metrics_reference_mode(self.metrics_reference_mode) .build()?; Ok(builder.with_span_processor( BatchSpanProcessor::builder(exporter, opentelemetry::runtime::Tokio) diff --git a/apollo-router/src/plugins/telemetry/tracing/datadog.rs b/apollo-router/src/plugins/telemetry/tracing/datadog.rs index e360ee5cb3..4574b529ff 100644 --- a/apollo-router/src/plugins/telemetry/tracing/datadog.rs +++ b/apollo-router/src/plugins/telemetry/tracing/datadog.rs @@ -39,8 +39,7 @@ use crate::plugins::telemetry::consts::SUBGRAPH_SPAN_NAME; use crate::plugins::telemetry::consts::SUPERGRAPH_SPAN_NAME; use crate::plugins::telemetry::endpoint::UriEndpoint; use crate::plugins::telemetry::tracing::datadog_exporter; -use crate::plugins::telemetry::tracing::datadog_exporter::propagator::TRACE_STATE_MEASURE; -use crate::plugins::telemetry::tracing::datadog_exporter::propagator::TRACE_STATE_TRUE_VALUE; +use crate::plugins::telemetry::tracing::datadog_exporter::DatadogTraceState; use crate::plugins::telemetry::tracing::BatchProcessorConfig; use crate::plugins::telemetry::tracing::SpanProcessorExt; use crate::plugins::telemetry::tracing::TracingConfigurator; @@ -255,19 +254,17 @@ impl SpanExporter for ExporterWrapper { }; // Unfortunately trace state is immutable, so we have to create a new one - if let Some(true) = self.span_metrics.get(final_span_name) { - let new_trace_state = span - .span_context - .trace_state() - .insert(TRACE_STATE_MEASURE, TRACE_STATE_TRUE_VALUE) - .expect("valid trace state"); - span.span_context = SpanContext::new( - span.span_context.trace_id(), - span.span_context.span_id(), - span.span_context.trace_flags(), - span.span_context.is_remote(), - new_trace_state, - ) + if let Some(setting) = self.span_metrics.get(final_span_name) { + if *setting != span.span_context.trace_state().measuring_enabled() { + let new_trace_state = span.span_context.trace_state().with_measuring(*setting); + span.span_context = SpanContext::new( + span.span_context.trace_id(), + span.span_context.span_id(), + span.span_context.trace_flags(), + span.span_context.is_remote(), + new_trace_state, + ) + } } // Set the span kind https://github.com/DataDog/dd-trace-go/blob/main/ddtrace/ext/span_kind.go diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index fc4ccce41d..4d94335e81 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -1,5 +1,6 @@ //! Calls out to nodejs query planner +use std::cmp::Ordering; use std::collections::HashMap; use std::fmt::Debug; use std::fmt::Write; @@ -7,6 +8,7 @@ use std::sync::Arc; use std::time::Instant; use apollo_compiler::ast; +use apollo_compiler::execution::InputCoercionError; use apollo_compiler::validation::Valid; use apollo_compiler::Name; use apollo_federation::error::FederationError; @@ -16,20 +18,19 @@ use futures::future::BoxFuture; use opentelemetry_api::metrics::MeterProvider as _; use opentelemetry_api::metrics::ObservableGauge; use opentelemetry_api::KeyValue; +use router_bridge::introspect::IntrospectionError; use router_bridge::planner::PlanOptions; use router_bridge::planner::PlanSuccess; use router_bridge::planner::Planner; use router_bridge::planner::UsageReporting; use serde::Deserialize; -use serde_json_bytes::Map; use serde_json_bytes::Value; use tower::Service; use super::PlanNode; use super::QueryKey; use crate::apollo_studio_interop::generate_usage_reporting; -use crate::apollo_studio_interop::UsageReportingComparisonResult; -use crate::configuration::ApolloMetricsGenerationMode; +use crate::configuration::IntrospectionMode; use crate::configuration::QueryPlannerMode; use crate::error::PlanErrors; use crate::error::QueryPlannerError; @@ -297,8 +298,6 @@ impl PlannerMode { let plan = result?; // Dummy value overwritten below in `BrigeQueryPlanner::plan` - // `Configuration::validate` ensures that we only take this path - // when we also have `ApolloMetricsGenerationMode::New`` let usage_reporting = UsageReporting { stats_report_key: Default::default(), referenced_fields_by_type: Default::default(), @@ -308,6 +307,7 @@ impl PlannerMode { if let Some(node) = &mut root_node { init_query_plan_root_node(node)?; } + Ok(PlanSuccess { usage_reporting, data: QueryPlanResult { @@ -315,6 +315,11 @@ impl PlannerMode { query_plan: QueryPlan { node: root_node.map(Arc::new), }, + evaluated_plan_count: plan + .statistics + .evaluated_plan_count + .clone() + .into_inner() as u64, }, }) } @@ -484,19 +489,226 @@ impl BridgeQueryPlanner { }) } - async fn introspection(&self, query: String) -> Result { - match self.introspection.as_ref() { - Some(introspection) => { - let response = introspection - .execute(query) + async fn introspection( + &self, + key: QueryKey, + doc: ParsedDocument, + ) -> Result { + let Some(introspection) = &self.introspection else { + return Ok(QueryPlannerContent::IntrospectionDisabled); + }; + let mode = self.configuration.experimental_introspection_mode; + let response = if mode != IntrospectionMode::New && doc.executable.operations.len() > 1 { + // TODO: add an operation_name parameter to router-bridge to fix this? + let error = graphql::Error::builder() + .message( + "Schema introspection is currently not supported \ + with multiple operations in the same document", + ) + .extension_code("INTROSPECTION_WITH_MULTIPLE_OPERATIONS") + .build(); + return Ok(QueryPlannerContent::Response { + response: Box::new(graphql::Response::builder().error(error).build()), + }); + } else { + match mode { + IntrospectionMode::Legacy => introspection + .execute(key.filtered_query) .await - .map_err(QueryPlannerError::Introspection)?; + .map_err(QueryPlannerError::Introspection)?, + IntrospectionMode::New => self.rust_introspection(&key, &doc)?, + IntrospectionMode::Both => { + let rust_result = match self.rust_introspection(&key, &doc) { + Ok(response) => { + if response.errors.is_empty() { + Ok(response) + } else { + Err(QueryPlannerError::Introspection(IntrospectionError { + message: Some( + response + .errors + .into_iter() + .map(|e| e.to_string()) + .collect::>() + .join(", "), + ), + })) + } + } + Err(e) => Err(e), + }; + let js_result = introspection + .execute(key.filtered_query) + .await + .map_err(QueryPlannerError::Introspection); + self.compare_introspection_responses(js_result.clone(), rust_result); + js_result? + } + } + }; - Ok(QueryPlannerContent::Response { - response: Box::new(response), - }) + Ok(QueryPlannerContent::Response { + response: Box::new(response), + }) + } + + fn rust_introspection( + &self, + key: &QueryKey, + doc: &ParsedDocument, + ) -> Result { + let schema = self.schema.api_schema(); + let operation = doc.get_operation(key.operation_name.as_deref())?; + let variable_values = Default::default(); + let variable_values = + apollo_compiler::execution::coerce_variable_values(schema, operation, &variable_values) + .map_err(|e| { + let message = match &e { + InputCoercionError::SuspectedValidationBug(e) => &e.message, + InputCoercionError::ValueError { message, .. } => message, + }; + QueryPlannerError::Introspection(IntrospectionError { + message: Some(message.clone()), + }) + })?; + let response = apollo_compiler::execution::execute_introspection_only_query( + schema, + &doc.executable, + operation, + &variable_values, + ); + Ok(response.into()) + } + + fn compare_introspection_responses( + &self, + mut js_result: Result, + mut rust_result: Result, + ) { + let is_matched; + match (&mut js_result, &mut rust_result) { + (Err(_), Err(_)) => { + is_matched = true; + } + (Err(err), Ok(_)) => { + is_matched = false; + tracing::warn!("JS introspection error: {err}") + } + (Ok(_), Err(err)) => { + is_matched = false; + tracing::warn!("Rust introspection error: {err}") + } + (Ok(js_response), Ok(rust_response)) => { + if let (Some(js_data), Some(rust_data)) = + (&mut js_response.data, &mut rust_response.data) + { + json_sort_arrays(js_data); + json_sort_arrays(rust_data); + } + is_matched = js_response.data == rust_response.data; + if is_matched { + tracing::debug!("Introspection match! 🎉") + } else { + tracing::debug!("Introspection mismatch"); + tracing::trace!("Introspection diff:\n{}", { + let rust = rust_response + .data + .as_ref() + .map(|d| serde_json::to_string_pretty(&d).unwrap()) + .unwrap_or_default(); + let js = js_response + .data + .as_ref() + .map(|d| serde_json::to_string_pretty(&d).unwrap()) + .unwrap_or_default(); + let diff = similar::TextDiff::from_lines(&js, &rust); + diff.unified_diff() + .context_radius(10) + .header("JS", "Rust") + .to_string() + }) + } + } + } + + u64_counter!( + "apollo.router.operations.introspection.both", + "Comparing JS v.s. Rust introspection", + 1, + "generation.is_matched" = is_matched, + "generation.js_error" = js_result.is_err(), + "generation.rust_error" = rust_result.is_err() + ); + + fn json_sort_arrays(value: &mut Value) { + match value { + Value::Array(array) => { + for item in array.iter_mut() { + json_sort_arrays(item) + } + array.sort_by(json_compare) + } + Value::Object(object) => { + for (_key, value) in object { + json_sort_arrays(value) + } + } + Value::Null | Value::Bool(_) | Value::Number(_) | Value::String(_) => {} + } + } + + fn json_compare(a: &Value, b: &Value) -> Ordering { + match (a, b) { + (Value::Null, Value::Null) => Ordering::Equal, + (Value::Bool(a), Value::Bool(b)) => a.cmp(b), + (Value::Number(a), Value::Number(b)) => { + a.as_f64().unwrap().total_cmp(&b.as_f64().unwrap()) + } + (Value::String(a), Value::String(b)) => a.cmp(b), + (Value::Array(a), Value::Array(b)) => iter_cmp(a, b, json_compare), + (Value::Object(a), Value::Object(b)) => { + iter_cmp(a, b, |(key_a, a), (key_b, b)| { + debug_assert_eq!(key_a, key_b); // Response object keys are in selection set order + json_compare(a, b) + }) + } + _ => json_discriminant(a).cmp(&json_discriminant(b)), + } + } + + // TODO: use `Iterator::cmp_by` when available: + // https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.cmp_by + // https://github.com/rust-lang/rust/issues/64295 + fn iter_cmp( + a: impl IntoIterator, + b: impl IntoIterator, + cmp: impl Fn(T, T) -> Ordering, + ) -> Ordering { + use itertools::Itertools; + for either_or_both in a.into_iter().zip_longest(b) { + match either_or_both { + itertools::EitherOrBoth::Both(a, b) => { + let ordering = cmp(a, b); + if ordering != Ordering::Equal { + return ordering; + } + } + itertools::EitherOrBoth::Left(_) => return Ordering::Less, + itertools::EitherOrBoth::Right(_) => return Ordering::Greater, + } + } + Ordering::Equal + } + + fn json_discriminant(value: &Value) -> u8 { + match value { + Value::Null => 0, + Value::Bool(_) => 1, + Value::Number(_) => 2, + Value::String(_) => 3, + Value::Array(_) => 4, + Value::Object(_) => 5, } - None => Ok(QueryPlannerContent::IntrospectionDisabled), } } @@ -530,138 +742,48 @@ impl BridgeQueryPlanner { ) .await?; - // the `statsReportKey` field should match the original query instead of the filtered query, to index them all under the same query - let operation_signature = if matches!( - self.configuration - .experimental_apollo_metrics_generation_mode, - ApolloMetricsGenerationMode::Legacy | ApolloMetricsGenerationMode::Both - ) && original_query != filtered_query - { - Some( - self.planner - .js_for_api_schema_and_introspection_and_operation_signature() - .operation_signature(original_query.clone(), operation.clone()) - .await - .map_err(QueryPlannerError::RouterBridgeError)?, - ) - } else { - None - }; - match plan_success { PlanSuccess { data: QueryPlanResult { query_plan: QueryPlan { node: Some(node) }, formatted_query_plan, + evaluated_plan_count, }, mut usage_reporting, } => { - if let Some(sig) = operation_signature { - usage_reporting.stats_report_key = sig; - } - - if matches!( - self.configuration - .experimental_apollo_metrics_generation_mode, - ApolloMetricsGenerationMode::New | ApolloMetricsGenerationMode::Both - ) { - // If the query is filtered, we want to generate the signature using the original query and generate the - // reference using the filtered query. To do this, we need to re-parse the original query here. - let signature_doc = if original_query != filtered_query { - Query::parse_document( - &original_query, - operation.clone().as_deref(), - &self.schema, - &self.configuration, - ) - .unwrap_or(doc.clone()) - } else { - doc.clone() - }; - - let generated_usage_reporting = generate_usage_reporting( - &signature_doc.executable, - &doc.executable, - &operation, - self.schema.supergraph_schema(), - &self.signature_normalization_algorithm, - ); - - // Ignore comparison if the operation name is an empty string since there is a known issue where - // router behaviour is incorrect in that case, and it also generates incorrect usage reports. - // https://github.com/apollographql/router/issues/4837 - let is_empty_operation_name = operation.map_or(false, |s| s.is_empty()); - let is_in_both_metrics_mode = matches!( - self.configuration - .experimental_apollo_metrics_generation_mode, - ApolloMetricsGenerationMode::Both - ); - if !is_empty_operation_name && is_in_both_metrics_mode { - let comparison_result = generated_usage_reporting.compare(&usage_reporting); - - if matches!( - comparison_result, - UsageReportingComparisonResult::StatsReportKeyNotEqual - | UsageReportingComparisonResult::BothNotEqual - ) { - u64_counter!( - "apollo.router.operations.telemetry.studio.signature", - "The match status of the Apollo reporting signature generated by the JS implementation vs the Rust implementation", - 1, - "generation.is_matched" = false - ); - tracing::debug!( - "Different signatures generated between router and router-bridge.\nQuery:\n{}\nRouter:\n{}\nRouter Bridge:\n{}", - filtered_query, - generated_usage_reporting.result.stats_report_key, - usage_reporting.stats_report_key, - ); - } else { - u64_counter!( - "apollo.router.operations.telemetry.studio.signature", - "The match status of the Apollo reporting signature generated by the JS implementation vs the Rust implementation", - 1, - "generation.is_matched" = true - ); - } + // If the query is filtered, we want to generate the signature using the original query and generate the + // reference using the filtered query. To do this, we need to re-parse the original query here. + let signature_doc = if original_query != filtered_query { + Query::parse_document( + &original_query, + operation.clone().as_deref(), + &self.schema, + &self.configuration, + ) + .unwrap_or(doc.clone()) + } else { + doc.clone() + }; - if matches!( - comparison_result, - UsageReportingComparisonResult::ReferencedFieldsNotEqual - | UsageReportingComparisonResult::BothNotEqual - ) { - u64_counter!( - "apollo.router.operations.telemetry.studio.references", - "The match status of the Apollo reporting references generated by the JS implementation vs the Rust implementation", - 1, - "generation.is_matched" = false - ); - tracing::debug!( - "Different referenced fields generated between router and router-bridge.\nQuery:\n{}\nRouter:\n{:?}\nRouter Bridge:\n{:?}", - filtered_query, - generated_usage_reporting.result.referenced_fields_by_type, - usage_reporting.referenced_fields_by_type, - ); - } else { - u64_counter!( - "apollo.router.operations.telemetry.studio.references", - "The match status of the Apollo reporting references generated by the JS implementation vs the Rust implementation", - 1, - "generation.is_matched" = true - ); - } - } else if matches!( - self.configuration - .experimental_apollo_metrics_generation_mode, - ApolloMetricsGenerationMode::New - ) { - usage_reporting.stats_report_key = - generated_usage_reporting.result.stats_report_key; - usage_reporting.referenced_fields_by_type = - generated_usage_reporting.result.referenced_fields_by_type; - } - } + u64_histogram!( + "apollo.router.query_planning.plan.evaluated_plans", + "Number of query plans evaluated for a query before choosing the best one", + evaluated_plan_count + ); + + let generated_usage_reporting = generate_usage_reporting( + &signature_doc.executable, + &doc.executable, + &operation, + self.schema.supergraph_schema(), + &self.signature_normalization_algorithm, + ); + + usage_reporting.stats_report_key = + generated_usage_reporting.result.stats_report_key; + usage_reporting.referenced_fields_by_type = + generated_usage_reporting.result.referenced_fields_by_type; Ok(QueryPlannerContent::Plan { plan: Arc::new(super::QueryPlan { @@ -681,13 +803,9 @@ impl BridgeQueryPlanner { query_plan: QueryPlan { node: None }, .. }, - mut usage_reporting, + usage_reporting, } => { failfast_debug!("empty query plan"); - if let Some(sig) = operation_signature { - usage_reporting.stats_report_key = sig; - } - Err(QueryPlannerError::EmptyPlan(usage_reporting)) } } @@ -877,42 +995,83 @@ impl BridgeQueryPlanner { selections.unauthorized.paths = unauthorized_paths; } - if selections.contains_introspection() { - // It can happen if you have a statically skipped query like { get @skip(if: true) { id name }} because it will be statically filtered with {} - if selections - .operations - .first() - .map(|op| op.selection_set.is_empty()) - .unwrap_or_default() - { - return Ok(QueryPlannerContent::Response { - response: Box::new( - graphql::Response::builder() - .data(Value::Object(Default::default())) - .build(), - ), - }); + if selections + .operation(key.operation_name.as_deref()) + .is_some_and(|op| op.selection_set.is_empty()) + { + // All selections have @skip(true) or @include(false) + // Return an empty response now to avoid dealing with an empty query plan later + return Ok(QueryPlannerContent::Response { + response: Box::new( + graphql::Response::builder() + .data(Value::Object(Default::default())) + .build(), + ), + }); + } + + let operation = doc + .executable + .operations + .get(key.operation_name.as_deref()) + .ok(); + let mut has_root_typename = false; + let mut has_schema_introspection = false; + let mut has_other_root_fields = false; + if let Some(operation) = operation { + for field in operation.root_fields(&doc.executable) { + match field.name.as_str() { + "__typename" => has_root_typename = true, + "__schema" | "__type" if operation.is_query() => { + has_schema_introspection = true + } + _ => has_other_root_fields = true, + } } - // If we have only one operation containing only the root field `__typename` - // (possibly aliased or repeated). (This does mean we fail to properly support - // {"query": "query A {__typename} query B{somethingElse}", "operationName":"A"}.) - if let Some(output_keys) = selections - .operations - .first() - .and_then(|op| op.is_only_typenames_with_output_keys()) - { - let operation_name = selections.operations[0].kind().to_string(); - let data: Value = Value::Object(Map::from_iter( - output_keys - .into_iter() - .map(|key| (key, Value::String(operation_name.clone().into()))), - )); + if has_root_typename && !has_schema_introspection && !has_other_root_fields { + // Fast path for __typename alone + if operation + .selection_set + .selections + .iter() + .all(|sel| sel.as_field().is_some_and(|f| f.name == "__typename")) + { + let root_type_name: serde_json_bytes::ByteString = + operation.object_type().as_str().into(); + let data = Value::Object( + operation + .root_fields(&doc.executable) + .filter(|field| field.name == "__typename") + .map(|field| { + ( + field.response_key().as_str().into(), + Value::String(root_type_name.clone()), + ) + }) + .collect(), + ); + return Ok(QueryPlannerContent::Response { + response: Box::new(graphql::Response::builder().data(data).build()), + }); + } else { + // fragments might use @include or @skip + } + } + } else { + // Should be unreachable as QueryAnalysisLayer would have returned an error + } + + if has_schema_introspection { + if has_other_root_fields { + let error = graphql::Error::builder() + .message("Mixed queries with both schema introspection and concrete fields are not supported") + .extension_code("MIXED_INTROSPECTION") + .build(); return Ok(QueryPlannerContent::Response { - response: Box::new(graphql::Response::builder().data(data).build()), + response: Box::new(graphql::Response::builder().error(error).build()), }); - } else { - return self.introspection(key.original_query).await; } + return self.introspection(key, doc).await; } if key.filtered_query != key.original_query { @@ -949,6 +1108,7 @@ impl BridgeQueryPlanner { pub struct QueryPlanResult { pub(super) formatted_query_plan: Option>, pub(super) query_plan: QueryPlan, + pub(super) evaluated_plan_count: u64, } impl QueryPlanResult { @@ -1700,4 +1860,23 @@ mod tests { "init.is_success" = false ); } + + #[test(tokio::test)] + async fn test_evaluated_plans_histogram() { + async { + let _ = plan( + EXAMPLE_SCHEMA, + include_str!("testdata/query.graphql"), + include_str!("testdata/query.graphql"), + None, + PlanOptions::default(), + ) + .await + .unwrap(); + + assert_histogram_exists!("apollo.router.query_planning.plan.evaluated_plans", u64); + } + .with_metrics() + .await; + } } diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index 20b2f75342..f6270381cb 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -207,7 +207,7 @@ where _, )| WarmUpCachingQueryKey { query: query.clone(), - operation: operation.clone(), + operation_name: operation.clone(), hash: Some(hash.clone()), metadata: metadata.clone(), plan_options: plan_options.clone(), @@ -252,7 +252,7 @@ where for query in queries { all_cache_keys.push(WarmUpCachingQueryKey { query, - operation: None, + operation_name: None, hash: None, metadata: CacheKeyMetadata::default(), plan_options: PlanOptions::default(), @@ -269,7 +269,7 @@ where let mut reused = 0usize; for WarmUpCachingQueryKey { mut query, - operation, + operation_name, hash, metadata, plan_options, @@ -278,8 +278,8 @@ where } in all_cache_keys { let context = Context::new(); - let doc = match query_analysis - .parse_document(&query, operation.as_deref()) + let (doc, _operation_def) = match query_analysis + .parse_document(&query, operation_name.as_deref()) .await { Ok(doc) => doc, @@ -288,7 +288,7 @@ where let caching_key = CachingQueryKey { query: query.clone(), - operation: operation.clone(), + operation: operation_name.clone(), hash: doc.hash.clone(), schema_id: Arc::clone(&self.schema.schema_id), metadata, @@ -322,8 +322,8 @@ where }) .await; if entry.is_first() { - let doc = match query_analysis - .parse_document(&query, operation.as_deref()) + let (doc, _operation_def) = match query_analysis + .parse_document(&query, operation_name.as_deref()) .await { Ok(doc) => doc, @@ -348,7 +348,7 @@ where let request = QueryPlannerRequest { query, - operation_name: operation, + operation_name, context: context.clone(), }; @@ -546,7 +546,7 @@ where } } - // This will be overridden when running in ApolloMetricsGenerationMode::New mode + // This will be overridden by the Rust usage reporting implementation if let Some(QueryPlannerContent::Plan { plan, .. }) = &content { context.extensions().with_lock(|mut lock| { lock.insert::>(plan.usage_reporting.clone()) @@ -685,7 +685,7 @@ impl Hash for CachingQueryKey { #[derive(Debug, Clone, Hash, PartialEq, Eq)] pub(crate) struct WarmUpCachingQueryKey { pub(crate) query: String, - pub(crate) operation: Option, + pub(crate) operation_name: Option, pub(crate) hash: Option>, pub(crate) metadata: CacheKeyMetadata, pub(crate) plan_options: PlanOptions, diff --git a/apollo-router/src/services/execution/service.rs b/apollo-router/src/services/execution/service.rs index 9486e50ec1..1abcb0c5d3 100644 --- a/apollo-router/src/services/execution/service.rs +++ b/apollo-router/src/services/execution/service.rs @@ -190,7 +190,7 @@ impl ExecutionService { let mut nullified_paths: Vec = vec![]; let metrics_ref_mode = match &self.apollo_telemetry_config { - Some(conf) => conf.experimental_apollo_metrics_reference_mode, + Some(conf) => conf.metrics_reference_mode, _ => ApolloMetricsReferenceMode::default(), }; diff --git a/apollo-router/src/services/layers/query_analysis.rs b/apollo-router/src/services/layers/query_analysis.rs index 9bcdfcceae..d4280df414 100644 --- a/apollo-router/src/services/layers/query_analysis.rs +++ b/apollo-router/src/services/layers/query_analysis.rs @@ -5,8 +5,10 @@ use std::hash::Hash; use std::sync::Arc; use apollo_compiler::ast; +use apollo_compiler::executable::Operation; use apollo_compiler::validation::Valid; use apollo_compiler::ExecutableDocument; +use apollo_compiler::Node; use http::StatusCode; use lru::LruCache; use router_bridge::planner::UsageReporting; @@ -77,7 +79,7 @@ impl QueryAnalysisLayer { &self, query: &str, operation_name: Option<&str>, - ) -> Result { + ) -> Result<(ParsedDocument, Node), SpecError> { let query = query.to_string(); let operation_name = operation_name.map(|o| o.to_string()); let schema = self.schema.clone(); @@ -89,12 +91,14 @@ impl QueryAnalysisLayer { task::spawn_blocking(move || { span.in_scope(|| { - Query::parse_document( + let doc = Query::parse_document( &query, operation_name.as_deref(), schema.as_ref(), conf.as_ref(), - ) + )?; + let operation = doc.get_operation(operation_name.as_deref())?.clone(); + Ok((doc, operation)) }) }) .await @@ -146,70 +150,48 @@ impl QueryAnalysisLayer { .cloned(); let res = match entry { - None => { - match self.parse_document(&query, op_name.as_deref()).await { - Err(errors) => { - (*self.cache.lock().await).put( - QueryAnalysisKey { - query, - operation_name: op_name, - }, - Err(errors.clone()), - ); - let errors = match errors.into_graphql_errors() { - Ok(v) => v, - Err(errors) => vec![Error::builder() - .message(errors.to_string()) - .extension_code(errors.extension_code()) - .build()], - }; + None => match self.parse_document(&query, op_name.as_deref()).await { + Err(errors) => { + (*self.cache.lock().await).put( + QueryAnalysisKey { + query, + operation_name: op_name.clone(), + }, + Err(errors.clone()), + ); + Err(errors) + } + Ok((doc, operation)) => { + let context = Context::new(); - return Err(SupergraphResponse::builder() - .errors(errors) - .status_code(StatusCode::BAD_REQUEST) - .context(request.context) - .build() - .expect("response is valid")); + if self.enable_authorization_directives { + AuthorizationPlugin::query_analysis( + &doc, + op_name.as_deref(), + &self.schema, + &context, + ); } - Ok(doc) => { - let context = Context::new(); - - let operation = doc.executable.operations.get(op_name.as_deref()).ok(); - let operation_name = operation.as_ref().and_then(|operation| { - operation.name.as_ref().map(|s| s.as_str().to_owned()) - }); - if self.enable_authorization_directives { - AuthorizationPlugin::query_analysis( - &doc, - operation_name.as_deref(), - &self.schema, - &context, - ); - } + context + .insert(OPERATION_NAME, operation.name.clone()) + .expect("cannot insert operation name into context; this is a bug"); + let operation_kind = OperationKind::from(operation.operation_type); + context + .insert(OPERATION_KIND, operation_kind) + .expect("cannot insert operation kind in the context; this is a bug"); - context - .insert(OPERATION_NAME, operation_name) - .expect("cannot insert operation name into context; this is a bug"); - let operation_kind = - operation.map(|op| OperationKind::from(op.operation_type)); - // FIXME: I think we should not add an operation kind by default. If it's an invalid graphql operation for example it might be useful to detect there isn't operation_kind - context - .insert(OPERATION_KIND, operation_kind.unwrap_or_default()) - .expect("cannot insert operation kind in the context; this is a bug"); + (*self.cache.lock().await).put( + QueryAnalysisKey { + query, + operation_name: op_name.clone(), + }, + Ok((context.clone(), doc.clone())), + ); - (*self.cache.lock().await).put( - QueryAnalysisKey { - query, - operation_name: op_name.clone(), - }, - Ok((context.clone(), doc.clone())), - ); - - Ok((context, doc)) - } + Ok((context, doc)) } - } + }, Some(c) => c, }; @@ -250,8 +232,15 @@ impl QueryAnalysisLayer { referenced_fields_by_type: HashMap::new(), })) }); + let errors = match errors.into_graphql_errors() { + Ok(v) => v, + Err(errors) => vec![Error::builder() + .message(errors.to_string()) + .extension_code(errors.extension_code()) + .build()], + }; Err(SupergraphResponse::builder() - .errors(errors.into_graphql_errors().unwrap_or_default()) + .errors(errors) .status_code(StatusCode::BAD_REQUEST) .context(request.context) .build() @@ -270,6 +259,26 @@ pub(crate) struct ParsedDocumentInner { pub(crate) hash: Arc, } +impl ParsedDocumentInner { + pub(crate) fn get_operation( + &self, + operation_name: Option<&str>, + ) -> Result<&Node, SpecError> { + if let Ok(operation) = self.executable.operations.get(operation_name) { + Ok(operation) + } else if let Some(name) = operation_name { + Err(SpecError::UnknownOperation(name.to_owned())) + } else if self.executable.operations.is_empty() { + // Maybe not reachable? + // A valid document is non-empty and has no unused fragments + Err(SpecError::NoOperation) + } else { + debug_assert!(self.executable.operations.len() > 1); + Err(SpecError::MultipleOperationWithoutOperationName) + } + } +} + impl Display for ParsedDocumentInner { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self) diff --git a/apollo-router/src/spec/mod.rs b/apollo-router/src/spec/mod.rs index 06229783ff..786eb8a056 100644 --- a/apollo-router/src/spec/mod.rs +++ b/apollo-router/src/spec/mod.rs @@ -52,6 +52,10 @@ pub(crate) enum SpecError { ValidationError(ValidationErrors), /// Unknown operation named "{0}" UnknownOperation(String), + /// Must provide operation name if query contains multiple operations. + MultipleOperationWithoutOperationName, + /// Must provide an operation. + NoOperation, /// subscription operation is not supported SubscriptionNotSupported, /// query hashing failed: {0} @@ -84,6 +88,8 @@ impl ErrorExtension for SpecError { SpecError::ParseError(_) => "PARSING_ERROR", SpecError::ValidationError(_) => "GRAPHQL_VALIDATION_FAILED", SpecError::UnknownOperation(_) => "GRAPHQL_VALIDATION_FAILED", + SpecError::MultipleOperationWithoutOperationName => "GRAPHQL_VALIDATION_FAILED", + SpecError::NoOperation => "GRAPHQL_VALIDATION_FAILED", SpecError::SubscriptionNotSupported => "SUBSCRIPTION_NOT_SUPPORTED", SpecError::QueryHashing(_) => "QUERY_HASHING", } diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index d40b247b4d..21f2914035 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -15,7 +15,6 @@ use indexmap::IndexSet; use serde::Deserialize; use serde::Serialize; use serde_json_bytes::ByteString; -use tower::BoxError; use tracing::level_filters::LevelFilter; use self::change::QueryHashVisitor; @@ -312,12 +311,13 @@ impl Query { })) } + #[cfg(test)] pub(crate) fn parse( query: impl Into, operation_name: Option<&str>, schema: &Schema, configuration: &Configuration, - ) -> Result { + ) -> Result { let query = query.into(); let doc = Self::parse_document(&query, operation_name, schema, configuration)?; @@ -995,10 +995,6 @@ impl Query { } } - pub(crate) fn contains_introspection(&self) -> bool { - self.operations.iter().any(Operation::is_introspection) - } - pub(crate) fn variable_value<'a>( &'a self, operation_name: Option<&str>, @@ -1158,43 +1154,6 @@ impl Operation { }) } - /// Checks to see if this is a query or mutation containing only - /// `__typename` at the root level (possibly more than one time, possibly - /// with aliases). If so, returns Some with a Vec of the output keys - /// corresponding. - pub(crate) fn is_only_typenames_with_output_keys(&self) -> Option> { - if self.selection_set.is_empty() { - None - } else { - let output_keys: Vec = self - .selection_set - .iter() - .filter_map(|s| s.output_key_if_typename_field()) - .collect(); - if output_keys.len() == self.selection_set.len() { - Some(output_keys) - } else { - None - } - } - } - - fn is_introspection(&self) -> bool { - // If the only field is `__typename` it's considered as an introspection query - if self.is_only_typenames_with_output_keys().is_some() { - return true; - } - self.selection_set.iter().all(|sel| match sel { - Selection::Field { name, .. } => { - let name = name.as_str(); - // `__typename` can only be resolved in runtime, - // so this query cannot be seen as an introspection query - name == "__schema" || name == "__type" - } - _ => false, - }) - } - pub(crate) fn kind(&self) -> &OperationKind { &self.kind } diff --git a/apollo-router/src/spec/query/tests.rs b/apollo-router/src/spec/query/tests.rs index d05aa767b6..11f078af5d 100644 --- a/apollo-router/src/spec/query/tests.rs +++ b/apollo-router/src/spec/query/tests.rs @@ -5306,67 +5306,6 @@ fn fragment_on_interface() { .test(); } -#[test] -fn parse_introspection_query() { - let schema = "type Query { - foo: String - stuff: Bar - array: [Bar] - baz: String - } - type Bar { - bar: String - baz: String - }"; - - let schema = with_supergraph_boilerplate(schema, "Query"); - let schema = Schema::parse(&schema, &Default::default()).expect("could not parse schema"); - - let query = "{ - __type(name: \"Bar\") { - name - fields { - name - type { - name - } - } - } - }"; - assert!(Query::parse(query, None, &schema, &Default::default()) - .unwrap() - .operations - .first() - .unwrap() - .is_introspection()); - - let query = "query { - __schema { - queryType { - name - } - } - }"; - - assert!(Query::parse(query, None, &schema, &Default::default()) - .unwrap() - .operations - .first() - .unwrap() - .is_introspection()); - - let query = "query { - __typename - }"; - - assert!(Query::parse(query, None, &schema, &Default::default()) - .unwrap() - .operations - .first() - .unwrap() - .is_introspection()); -} - #[test] fn fragment_on_union() { let schema = "type Query { diff --git a/apollo-router/src/spec/selection.rs b/apollo-router/src/spec/selection.rs index e06145e051..0c0cd545b8 100644 --- a/apollo-router/src/spec/selection.rs +++ b/apollo-router/src/spec/selection.rs @@ -194,15 +194,6 @@ impl Selection { matches!(self, Selection::Field {name, ..} if name.as_str() == TYPENAME) } - pub(crate) fn output_key_if_typename_field(&self) -> Option { - match self { - Selection::Field { name, alias, .. } if name.as_str() == TYPENAME => { - alias.as_ref().or(Some(name)).cloned() - } - _ => None, - } - } - pub(crate) fn contains_error_path(&self, path: &[PathElement], fragments: &Fragments) -> bool { match (path.first(), self) { (None, _) => true, diff --git a/apollo-router/src/uplink/license_enforcement.rs b/apollo-router/src/uplink/license_enforcement.rs index 743fbbe543..1f20719436 100644 --- a/apollo-router/src/uplink/license_enforcement.rs +++ b/apollo-router/src/uplink/license_enforcement.rs @@ -388,7 +388,7 @@ impl LicenseEnforcementReport { .name("Demand control plugin") .build(), ConfigurationRestriction::builder() - .path("$.telemetry.apollo.experimental_apollo_metrics_reference_mode") + .path("$.telemetry.apollo.metrics_reference_mode") .value("extended") .name("Apollo metrics extended references") .build(), diff --git a/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap b/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap index baa48d4a8a..cf517de1b5 100644 --- a/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap +++ b/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap @@ -58,4 +58,4 @@ Configuration yaml: .demand_control * Apollo metrics extended references - .telemetry.apollo.experimental_apollo_metrics_reference_mode + .telemetry.apollo.metrics_reference_mode diff --git a/apollo-router/src/uplink/testdata/restricted.router.yaml b/apollo-router/src/uplink/testdata/restricted.router.yaml index b354a9a239..fbdc5a0957 100644 --- a/apollo-router/src/uplink/testdata/restricted.router.yaml +++ b/apollo-router/src/uplink/testdata/restricted.router.yaml @@ -69,7 +69,7 @@ preview_entity_cache: telemetry: apollo: - experimental_apollo_metrics_reference_mode: extended + metrics_reference_mode: extended instrumentation: spans: router: diff --git a/apollo-router/tests/common.rs b/apollo-router/tests/common.rs index 35a1115495..beccfcdddd 100644 --- a/apollo-router/tests/common.rs +++ b/apollo-router/tests/common.rs @@ -87,6 +87,7 @@ pub struct IntegrationTest { _subgraph_overrides: HashMap, bind_address: Arc>>, redis_namespace: String, + log: String, } impl IntegrationTest { @@ -278,6 +279,7 @@ impl IntegrationTest { collect_stdio: Option>, supergraph: Option, mut subgraph_overrides: HashMap, + log: Option, ) -> Self { let redis_namespace = Uuid::new_v4().to_string(); let telemetry = telemetry.unwrap_or_default(); @@ -346,6 +348,7 @@ impl IntegrationTest { _tracer_provider_subgraph: tracer_provider_subgraph, telemetry, redis_namespace, + log: log.unwrap_or_else(|| "error,apollo_router=info".to_owned()), } } @@ -380,15 +383,15 @@ impl IntegrationTest { } router - .args([ + .args(dbg!([ "--hr", "--config", &self.test_config_location.to_string_lossy(), "--supergraph", &self.test_schema_location.to_string_lossy(), "--log", - "error,apollo_router=info", - ]) + &self.log, + ])) .stdout(Stdio::piped()); let mut router = router.spawn().expect("router should start"); diff --git a/apollo-router/tests/fixtures/introspect_full_schema.graphql b/apollo-router/tests/fixtures/introspect_full_schema.graphql new file mode 100644 index 0000000000..a0a3100c4f --- /dev/null +++ b/apollo-router/tests/fixtures/introspect_full_schema.graphql @@ -0,0 +1,98 @@ +query IntrospectionQuery { + __schema { + queryType { + name + } + mutationType { + name + } + subscriptionType { + name + } + types { + ...FullType + } + directives { + name + description + locations + args(includeDeprecated: true) { + ...InputValue + } + } + } +} +fragment FullType on __Type { + kind + name + description + fields(includeDeprecated: true) { + name + description + args(includeDeprecated: true) { + ...InputValue + } + type { + ...TypeRef + } + isDeprecated + deprecationReason + } + inputFields(includeDeprecated: true) { + ...InputValue + } + interfaces { + ...TypeRef + } + enumValues(includeDeprecated: true) { + name + description + isDeprecated + deprecationReason + } + possibleTypes { + ...TypeRef + } +} +fragment InputValue on __InputValue { + name + description + type { + ...TypeRef + } + defaultValue + isDeprecated + deprecationReason +} +fragment TypeRef on __Type { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + } + } + } + } + } + } + } +} diff --git a/apollo-router/tests/integration/fixtures/query_planner_redis_config_update_query_planner_mode.router.yaml b/apollo-router/tests/integration/fixtures/query_planner_redis_config_update_query_planner_mode.router.yaml index 704cf8fb60..195306ed62 100644 --- a/apollo-router/tests/integration/fixtures/query_planner_redis_config_update_query_planner_mode.router.yaml +++ b/apollo-router/tests/integration/fixtures/query_planner_redis_config_update_query_planner_mode.router.yaml @@ -8,5 +8,4 @@ supergraph: - redis://localhost:6379 ttl: 10s -experimental_query_planner_mode: new -experimental_apollo_metrics_generation_mode: new \ No newline at end of file +experimental_query_planner_mode: new \ No newline at end of file diff --git a/apollo-router/tests/integration/introspection.rs b/apollo-router/tests/integration/introspection.rs new file mode 100644 index 0000000000..38fe62a70d --- /dev/null +++ b/apollo-router/tests/integration/introspection.rs @@ -0,0 +1,256 @@ +use apollo_router::plugin::test::MockSubgraph; +use apollo_router::services::supergraph::Request; +use serde_json::json; +use tower::ServiceExt; + +use crate::integration::IntegrationTest; + +#[tokio::test] +async fn simple_legacy_mode() { + let request = Request::fake_builder() + .query("{ __schema { queryType { name } } }") + .build() + .unwrap(); + let response = make_request(request, "legacy").await; + insta::assert_json_snapshot!(response, @r###" + { + "data": { + "__schema": { + "queryType": { + "name": "Query" + } + } + } + } + "###); +} + +#[tokio::test] +async fn simple_new_mode() { + let request = Request::fake_builder() + .query("{ __schema { queryType { name } } }") + .build() + .unwrap(); + let response = make_request(request, "new").await; + insta::assert_json_snapshot!(response, @r###" + { + "data": { + "__schema": { + "queryType": { + "name": "Query" + } + } + } + } + "###); +} + +#[tokio::test] +async fn top_level_inline_fragment() { + let request = Request::fake_builder() + .query("{ ... { __schema { queryType { name } } } }") + .build() + .unwrap(); + let response = make_request(request, "legacy").await; + insta::assert_json_snapshot!(response, @r###" + { + "data": { + "__schema": { + "queryType": { + "name": "Query" + } + } + } + } + "###); +} + +#[tokio::test] +async fn variable() { + let request = Request::fake_builder() + .query( + r#" + query($d: Boolean!) { + __type(name: "Query") { + fields(includeDeprecated: $d) { + name + } + } + } + "#, + ) + .variable("d", true) + .build() + .unwrap(); + let response = make_request(request, "legacy").await; + insta::assert_json_snapshot!(response, @r###" + { + "errors": [ + { + "message": "introspection error : Variable \"$d\" of required type \"Boolean!\" was not provided.", + "extensions": { + "code": "INTROSPECTION_ERROR" + } + } + ] + } + "###); +} + +#[tokio::test] +async fn two_operations() { + let request = Request::fake_builder() + .query( + r#" + query ThisOp { __schema { queryType { name } } } + query OtherOp { me { id } } + "#, + ) + .operation_name("ThisOp") + .build() + .unwrap(); + let response = make_request(request, "legacy").await; + insta::assert_json_snapshot!(response, @r###" + { + "errors": [ + { + "message": "Schema introspection is currently not supported with multiple operations in the same document", + "extensions": { + "code": "INTROSPECTION_WITH_MULTIPLE_OPERATIONS" + } + } + ] + } + "###); +} + +#[tokio::test] +async fn operation_name_error() { + let request = Request::fake_builder() + .query( + r#" + query ThisOp { me { id } } + query OtherOp { me { id } } + "#, + ) + .build() + .unwrap(); + let response = make_request(request, "legacy").await; + insta::assert_json_snapshot!(response, @r###" + { + "errors": [ + { + "message": "Must provide operation name if query contains multiple operations.", + "extensions": { + "code": "GRAPHQL_VALIDATION_FAILED" + } + } + ] + } + "###); + + let request = Request::fake_builder() + .query("query ThisOp { me { id } }") + .operation_name("NonExistentOp") + .build() + .unwrap(); + let response = make_request(request, "legacy").await; + insta::assert_json_snapshot!(response, @r###" + { + "errors": [ + { + "message": "Unknown operation named \"NonExistentOp\"", + "extensions": { + "code": "GRAPHQL_VALIDATION_FAILED" + } + } + ] + } + "###); +} + +#[tokio::test] +async fn mixed() { + let request = Request::fake_builder() + .query( + r#"{ + __schema { queryType { name } } + me { id } + }"#, + ) + .build() + .unwrap(); + let response = make_request(request, "legacy").await; + insta::assert_json_snapshot!(response, @r###" + { + "errors": [ + { + "message": "Mixed queries with both schema introspection and concrete fields are not supported", + "extensions": { + "code": "MIXED_INTROSPECTION" + } + } + ] + } + "###); +} + +async fn make_request(request: Request, mode: &str) -> apollo_router::graphql::Response { + apollo_router::TestHarness::builder() + .configuration_json(json!({ + "experimental_introspection_mode": mode, + "supergraph": { + "introspection": true, + }, + "include_subgraph_errors": { + "all": true, + }, + })) + .unwrap() + .subgraph_hook(|subgraph_name, default| match subgraph_name { + "accounts" => MockSubgraph::builder() + .with_json( + json!({"query": "{me{id}}"}), + json!({"data": {"me": {"id": 1}}}), + ) + .build() + .boxed(), + _ => default, + }) + .build_supergraph() + .await + .unwrap() + .oneshot(request) + .await + .unwrap() + .next_response() + .await + .unwrap() +} + +#[tokio::test] +async fn both_mode_integration() { + let mut router = IntegrationTest::builder() + .config( + " + experimental_introspection_mode: both + supergraph: + introspection: true + ", + ) + .supergraph("../examples/graphql/local.graphql") + .log("error,apollo_router=info,apollo_router::query_planner=debug") + .build() + .await; + router.start().await; + router.assert_started().await; + router + .execute_query(&json!({ + "query": include_str!("../fixtures/introspect_full_schema.graphql"), + })) + .await; + // TODO: should be a match after https://apollographql.atlassian.net/browse/ROUTER-703 + // router.assert_log_contains("Introspection match! 🎉").await; + router.assert_log_contains("Introspection mismatch").await; + router.graceful_shutdown().await; +} diff --git a/apollo-router/tests/integration/mod.rs b/apollo-router/tests/integration/mod.rs index f4c840d9e4..c383b5348f 100644 --- a/apollo-router/tests/integration/mod.rs +++ b/apollo-router/tests/integration/mod.rs @@ -6,11 +6,14 @@ pub(crate) use common::IntegrationTest; mod coprocessor; mod docs; mod file_upload; +mod introspection; mod lifecycle; mod operation_limits; +mod operation_name; mod query_planner; mod subgraph_response; mod traffic_shaping; +mod typename; #[cfg(any(not(feature = "ci"), all(target_arch = "x86_64", target_os = "linux")))] mod redis; diff --git a/apollo-router/tests/integration/operation_name.rs b/apollo-router/tests/integration/operation_name.rs new file mode 100644 index 0000000000..1359b54398 --- /dev/null +++ b/apollo-router/tests/integration/operation_name.rs @@ -0,0 +1,202 @@ +use apollo_router::plugin::test::MockSubgraph; +use apollo_router::services::supergraph::Request; +use serde_json::json; +use tower::ServiceExt; + +#[tokio::test] +async fn empty_document() { + let request = Request::fake_builder() + .query("# intentionally left blank") + .build() + .unwrap(); + let response = make_request(request).await; + insta::assert_json_snapshot!(response, @r###" + { + "errors": [ + { + "message": "Syntax Error: Unexpected .", + "extensions": { + "code": "GRAPHQL_PARSE_FAILED" + } + } + ] + } + "###); +} + +#[tokio::test] +async fn zero_operation() { + let request = Request::fake_builder() + .query("fragment F on Query { me { id }}") + .build() + .unwrap(); + let response = make_request(request).await; + insta::assert_json_snapshot!(response, @r###" + { + "errors": [ + { + "message": "Fragment \"F\" is never used.", + "locations": [ + { + "line": 1, + "column": 1 + } + ], + "extensions": { + "code": "GRAPHQL_VALIDATION_FAILED" + } + } + ] + } + "###); +} + +#[tokio::test] +async fn anonymous_operation() { + let request = Request::fake_builder() + .query("{ me { id } }") + .build() + .unwrap(); + let response = make_request(request).await; + insta::assert_json_snapshot!(response, @r###" + { + "data": { + "me": { + "id": 1 + } + } + } + "###); +} + +#[tokio::test] +async fn named_operation() { + let request = Request::fake_builder() + .query("query Op { me { id } }") + .operation_name("Op") + .build() + .unwrap(); + let response = make_request(request).await; + insta::assert_json_snapshot!(response, @r###" + { + "data": { + "me": { + "id": 1 + } + } + } + "###); +} + +#[tokio::test] +async fn two_named_operations() { + let request = Request::fake_builder() + .query( + r#" + query Op { me { id } } + query OtherOp { me { name } } + "#, + ) + .operation_name("Op") + .build() + .unwrap(); + let response = make_request(request).await; + insta::assert_json_snapshot!(response, @r###" + { + "data": { + "me": { + "id": 1 + } + } + } + "###); +} + +#[tokio::test] +async fn missing_operation_name() { + let request = Request::fake_builder() + .query( + r#" + query Op { me { id } } + query OtherOp { me { name } } + "#, + ) + .build() + .unwrap(); + let response = make_request(request).await; + insta::assert_json_snapshot!(response, @r###" + { + "errors": [ + { + "message": "Must provide operation name if query contains multiple operations.", + "extensions": { + "code": "GRAPHQL_VALIDATION_FAILED" + } + } + ] + } + "###); +} + +#[tokio::test] +async fn incorrect_operation_name() { + let request = Request::fake_builder() + .query( + r#" + query Op { me { id } } + query OtherOp { me { name } } + "#, + ) + .operation_name("SecretThirdOp") + .build() + .unwrap(); + let response = make_request(request).await; + insta::assert_json_snapshot!(response, @r###" + { + "errors": [ + { + "message": "Unknown operation named \"SecretThirdOp\"", + "extensions": { + "code": "GRAPHQL_VALIDATION_FAILED" + } + } + ] + } + "###); +} + +async fn make_request(request: Request) -> apollo_router::graphql::Response { + apollo_router::TestHarness::builder() + .configuration_json(json!({ + "include_subgraph_errors": { + "all": true, + }, + })) + .unwrap() + .subgraph_hook(|subgraph_name, default| match subgraph_name { + "accounts" => MockSubgraph::builder() + .with_json( + json!({"query": "{me{id}}"}), + json!({"data": {"me": {"id": 1}}}), + ) + .with_json( + json!({ + "query": "query Op__accounts__0{me{id}}", + "operationName": "Op__accounts__0", + }), + json!({"data": {"me": {"id": 1}}}), + ) + .build() + .boxed(), + _ => default, + }) + .build_supergraph() + .await + .unwrap() + .oneshot(request) + .await + .unwrap() + .next_response() + .await + .unwrap() +} diff --git a/apollo-router/tests/integration/snapshots/integration_tests__integration__introspection__simple.snap b/apollo-router/tests/integration/snapshots/integration_tests__integration__introspection__simple.snap new file mode 100644 index 0000000000..9ba33edac1 --- /dev/null +++ b/apollo-router/tests/integration/snapshots/integration_tests__integration__introspection__simple.snap @@ -0,0 +1,17 @@ +--- +source: apollo-router/tests/integration/introspection.rs +expression: response +--- +errors: + - message: "Field \"__schema\" of type \"Query\" must have a selection of subfields. Did you mean \"__schema { ... }\"?" + locations: + - line: 1 + column: 3 + extensions: + code: GRAPHQL_VALIDATION_FAILED + - message: "Cannot query field \"query\" on type \"__Schema\"." + locations: + - line: 1 + column: 14 + extensions: + code: GRAPHQL_VALIDATION_FAILED diff --git a/apollo-router/tests/integration/telemetry/datadog.rs b/apollo-router/tests/integration/telemetry/datadog.rs index 613242ca39..d2d3f58d25 100644 --- a/apollo-router/tests/integration/telemetry/datadog.rs +++ b/apollo-router/tests/integration/telemetry/datadog.rs @@ -1,5 +1,6 @@ extern crate core; +use std::collections::HashMap; use std::collections::HashSet; use std::time::Duration; @@ -51,6 +52,7 @@ async fn test_default_span_names() -> Result<(), BoxError> { .unwrap(), id.to_datadog() ); + router.graceful_shutdown().await; TraceSpec::builder() .services(["client", "router", "subgraph"].into()) .span_names( @@ -72,7 +74,6 @@ async fn test_default_span_names() -> Result<(), BoxError> { .build() .validate_trace(id) .await?; - router.graceful_shutdown().await; Ok(()) } @@ -103,6 +104,7 @@ async fn test_override_span_names() -> Result<(), BoxError> { .unwrap(), id.to_datadog() ); + router.graceful_shutdown().await; TraceSpec::builder() .services(["client", "router", "subgraph"].into()) .span_names( @@ -124,7 +126,6 @@ async fn test_override_span_names() -> Result<(), BoxError> { .build() .validate_trace(id) .await?; - router.graceful_shutdown().await; Ok(()) } @@ -155,6 +156,7 @@ async fn test_override_span_names_late() -> Result<(), BoxError> { .unwrap(), id.to_datadog() ); + router.graceful_shutdown().await; TraceSpec::builder() .services(["client", "router", "subgraph"].into()) .span_names( @@ -176,7 +178,6 @@ async fn test_override_span_names_late() -> Result<(), BoxError> { .build() .validate_trace(id) .await?; - router.graceful_shutdown().await; Ok(()) } @@ -205,6 +206,7 @@ async fn test_basic() -> Result<(), BoxError> { .unwrap(), id.to_datadog() ); + router.graceful_shutdown().await; TraceSpec::builder() .operation_name("ExampleQuery") .services(["client", "router", "subgraph"].into()) @@ -239,7 +241,74 @@ async fn test_basic() -> Result<(), BoxError> { .build() .validate_trace(id) .await?; + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_with_parent_span() -> Result<(), BoxError> { + if !graph_os_enabled() { + return Ok(()); + } + let mut router = IntegrationTest::builder() + .telemetry(Telemetry::Datadog) + .config(include_str!("fixtures/datadog.router.yaml")) + .build() + .await; + + router.start().await; + router.assert_started().await; + + let query = json!({"query":"query ExampleQuery {topProducts{name}}","variables":{}}); + let mut headers = HashMap::new(); + headers.insert( + "traceparent".to_string(), + String::from("00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01"), + ); + let (id, result) = router.execute_query_with_headers(&query, headers).await; + assert_eq!( + result + .headers() + .get("apollo-custom-trace-id") + .unwrap() + .to_str() + .unwrap(), + id.to_datadog() + ); router.graceful_shutdown().await; + TraceSpec::builder() + .operation_name("ExampleQuery") + .services(["client", "router", "subgraph"].into()) + .span_names( + [ + "query_planning", + "client_request", + "ExampleQuery__products__0", + "products", + "fetch", + "/", + "execution", + "ExampleQuery", + "subgraph server", + "parse_query", + ] + .into(), + ) + .measured_spans( + [ + "query_planning", + "subgraph", + "http_request", + "subgraph_request", + "router", + "execution", + "supergraph", + "parse_query", + ] + .into(), + ) + .build() + .validate_trace(id) + .await?; Ok(()) } @@ -314,6 +383,7 @@ async fn test_resource_mapping_override() -> Result<(), BoxError> { .get("apollo-custom-trace-id") .unwrap() .is_empty()); + router.graceful_shutdown().await; TraceSpec::builder() .services(["client", "router", "subgraph"].into()) .span_names( @@ -334,7 +404,6 @@ async fn test_resource_mapping_override() -> Result<(), BoxError> { .build() .validate_trace(id) .await?; - router.graceful_shutdown().await; Ok(()) } @@ -359,6 +428,7 @@ async fn test_span_metrics() -> Result<(), BoxError> { .get("apollo-custom-trace-id") .unwrap() .is_empty()); + router.graceful_shutdown().await; TraceSpec::builder() .operation_name("ExampleQuery") .services(["client", "router", "subgraph"].into()) @@ -381,7 +451,6 @@ async fn test_span_metrics() -> Result<(), BoxError> { .build() .validate_trace(id) .await?; - router.graceful_shutdown().await; Ok(()) } @@ -426,12 +495,12 @@ impl TraceSpec { .await?; tracing::debug!("{}", serde_json::to_string_pretty(&trace)?); self.verify_trace_participants(&trace)?; + self.verify_spans_present(&trace)?; + self.validate_measured_spans(&trace)?; self.verify_operation_name(&trace)?; self.verify_priority_sampled(&trace)?; self.verify_version(&trace)?; - self.verify_spans_present(&trace)?; self.validate_span_kinds(&trace)?; - self.validate_measured_spans(&trace)?; Ok(()) } @@ -581,6 +650,8 @@ impl TraceSpec { fn verify_priority_sampled(&self, trace: &Value) -> Result<(), BoxError> { let binding = trace.select_path("$.._sampling_priority_v1")?; let sampling_priority = binding.first(); + // having this priority set to 1.0 everytime is not a problem as we're doing pre sampling in the full telemetry stack + // So basically if the trace was not sampled it wouldn't get to this stage and so nothing would be sent assert_eq!( sampling_priority .expect("sampling priority expected") diff --git a/apollo-router/tests/integration/telemetry/fixtures/datadog.router.yaml b/apollo-router/tests/integration/telemetry/fixtures/datadog.router.yaml index f95964ae6d..d6ecc66607 100644 --- a/apollo-router/tests/integration/telemetry/fixtures/datadog.router.yaml +++ b/apollo-router/tests/integration/telemetry/fixtures/datadog.router.yaml @@ -5,6 +5,9 @@ telemetry: enabled: true header_name: apollo-custom-trace-id format: datadog + propagation: + trace_context: true + jaeger: true common: service_name: router resource: diff --git a/apollo-router/tests/integration/telemetry/fixtures/json.span_attributes.router.yaml b/apollo-router/tests/integration/telemetry/fixtures/json.span_attributes.router.yaml new file mode 100644 index 0000000000..324a22780d --- /dev/null +++ b/apollo-router/tests/integration/telemetry/fixtures/json.span_attributes.router.yaml @@ -0,0 +1,136 @@ +telemetry: + instrumentation: + spans: + mode: spec_compliant + router: + attributes: + should_not_log: + static: hello + too_big: + static: true + another_one: + static: foo + graphql.operation.name: + operation_name: string + supergraph: + attributes: + graphql.document: true + subgraph: + attributes: + too_big: + static: "nope" + events: + router: + # Standard events + request: info + response: info + error: info + + # Custom events + my.disabled_request_event: + message: "my event message" + level: off + on: request + attributes: + http.request.body.size: true + # Only log when the x-log-request header is `log` + condition: + eq: + - "log" + - request_header: "x-log-request" + my.request_event: + message: "my event message" + level: info + on: request + attributes: + http.request.body.size: true + my.response_event: + message: "my response event message" + level: info + on: response + attributes: + http.response.body.size: true + static_one: + static: test + supergraph: + # Standard events + request: info + response: info + error: info + + # Custom events + my.disabled_request.event: + message: "my event message" + level: off + on: request + # Only log when the x-log-request header is `log` + condition: + eq: + - "log" + - request_header: "x-log-request" + my.request.event: + message: "my event message" + level: info + on: request + # Only log when the x-log-request header is `log` + condition: + eq: + - "log" + - request_header: "x-log-request" + my.response_event: + message: "my response event message" + level: warn + on: response + condition: + eq: + - "log" + - response_header: "x-log-request" + subgraph: + # Standard events + request: info + response: warn + error: error + + # Custom events + my.disabled_request.event: + message: "my event message" + level: off + on: request + my.request.event: + message: "my event message" + level: info + on: request + my.response.event: + message: "my response event message" + level: error + on: response + attributes: + subgraph.name: true + response_status: + subgraph_response_status: code + "my.custom.attribute": + subgraph_response_data: "$.*" + default: "missing" + exporters: + tracing: + propagation: + trace_context: true + jaeger: + enabled: true + batch_processor: + scheduled_delay: 100ms + agent: + endpoint: default + logging: + experimental_when_header: + - name: content-type + value: "application/json" + body: true + stdout: + format: + json: + display_span_list: false + display_current_span: false + span_attributes: + - graphql.document + - too_big diff --git a/apollo-router/tests/integration/telemetry/logging.rs b/apollo-router/tests/integration/telemetry/logging.rs index 74cecef1c5..a50f0fa20e 100644 --- a/apollo-router/tests/integration/telemetry/logging.rs +++ b/apollo-router/tests/integration/telemetry/logging.rs @@ -36,6 +36,47 @@ async fn test_json() -> Result<(), BoxError> { Ok(()) } +#[tokio::test(flavor = "multi_thread")] +async fn test_json_promote_span_attributes() -> Result<(), BoxError> { + if !graph_os_enabled() { + eprintln!("test skipped"); + return Ok(()); + } + + let mut router = IntegrationTest::builder() + .telemetry(Telemetry::Jaeger) + .config(include_str!("fixtures/json.span_attributes.router.yaml")) + .build() + .await; + + router.start().await; + router.assert_started().await; + + let query = json!({"query":"query ExampleQuery {topProducts{name}}","variables":{}}); + router.execute_query(&query).await; + router.assert_log_contains("trace_id").await; + router.execute_query(&query).await; + router.assert_log_contains("span_id").await; + router.execute_query(&query).await; + router.assert_log_contains(r#""static_one":"test""#).await; + router.execute_query(&query).await; + router.assert_log_contains(r#""response_status":200"#).await; + router.execute_query(&query).await; + router.assert_log_contains(r#""too_big":true"#).await; + router.execute_query(&query).await; + router.assert_log_contains(r#""too_big":"nope""#).await; + router.execute_query(&query).await; + router + .assert_log_contains(r#""graphql.document":"query ExampleQuery {topProducts{name}}""#) + .await; + router.execute_query(&query).await; + router.assert_log_not_contains(r#""should_not_log""#).await; + router.assert_log_not_contains(r#""another_one""#).await; + router.graceful_shutdown().await; + + Ok(()) +} + #[tokio::test(flavor = "multi_thread")] async fn test_json_uuid_format() -> Result<(), BoxError> { if !graph_os_enabled() { diff --git a/apollo-router/tests/integration/typename.rs b/apollo-router/tests/integration/typename.rs new file mode 100644 index 0000000000..4331333594 --- /dev/null +++ b/apollo-router/tests/integration/typename.rs @@ -0,0 +1,130 @@ +use apollo_router::services::supergraph::Request; +use serde_json::json; +use tower::ServiceExt; + +const SCHEMA: &str = r#" +schema + @core(feature: "https://specs.apollo.dev/core/v0.1"), + @core(feature: "https://specs.apollo.dev/join/v0.1") +{ + query: MyQuery + mutation: MyMutation +} + +directive @core(feature: String!) repeatable on SCHEMA + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet) on FIELD_DEFINITION + +directive @join__type(graph: join__Graph!, key: join__FieldSet) repeatable on OBJECT | INTERFACE + +directive @join__owner(graph: join__Graph!) on OBJECT | INTERFACE + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +scalar join__FieldSet + +enum join__Graph { + ACCOUNTS @join__graph(name: "accounts" url: "http://localhost:4001") + INVENTORY @join__graph(name: "inventory" url: "http://localhost:4004") + PRODUCTS @join__graph(name: "products" url: "http://localhost:4003") + REVIEWS @join__graph(name: "reviews" url: "http://localhost:4002") +} + +type MyMutation { + createThing: String +} + +type MyQuery { + thing: String +} +"#; + +#[tokio::test] +async fn basic() { + let request = Request::fake_builder() + .query("{ __typename }") + .build() + .unwrap(); + let response = make_request(request).await; + insta::assert_json_snapshot!(response, @r###" + { + "data": { + "__typename": "MyQuery" + } + } + "###); +} + +#[tokio::test] +async fn aliased() { + let request = Request::fake_builder() + .query("{ n: __typename }") + .build() + .unwrap(); + let response = make_request(request).await; + insta::assert_json_snapshot!(response, @r###" + { + "data": { + "n": "MyQuery" + } + } + "###); +} + +#[tokio::test] +async fn mutation() { + let request = Request::fake_builder() + .query("mutation { __typename }") + .build() + .unwrap(); + let response = make_request(request).await; + insta::assert_json_snapshot!(response, @r###" + { + "data": { + "__typename": "MyMutation" + } + } + "###); +} + +#[tokio::test] +async fn two_named_operations() { + let request = Request::fake_builder() + .query( + r#" + mutation Op { __typename } + query OtherOp { __typename } + "#, + ) + .operation_name("OtherOp") + .build() + .unwrap(); + let response = make_request(request).await; + insta::assert_json_snapshot!(response, @r###" + { + "data": { + "__typename": "MyQuery" + } + } + "###); +} + +async fn make_request(request: Request) -> apollo_router::graphql::Response { + apollo_router::TestHarness::builder() + .configuration_json(json!({ + "include_subgraph_errors": { + "all": true, + }, + })) + .unwrap() + .schema(SCHEMA) + .build_supergraph() + .await + .unwrap() + .oneshot(request) + .await + .unwrap() + .next_response() + .await + .unwrap() +} diff --git a/dockerfiles/Dockerfile.router b/dockerfiles/Dockerfile.router index eba93272a4..2a2751fede 100644 --- a/dockerfiles/Dockerfile.router +++ b/dockerfiles/Dockerfile.router @@ -65,7 +65,7 @@ RUN \ \nset -e \ \n \ \nif [ -f "/usr/bin/heaptrack" ]; then \ -\n exec heaptrack -o /dist/data/router_heaptrack /dist/router "$@" \ +\n exec heaptrack -o /dist/data/$(hostname)/router_heaptrack /dist/router "$@" \ \nelse \ \n exec /dist/router "$@" \ \nfi \ diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index 83c20bb40e..fc25cbba4f 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.53.0 + image: ghcr.io/apollographql/router:v1.54.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 94900947d1..fd27332b78 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.53.0 + image: ghcr.io/apollographql/router:v1.54.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index f719e3de99..4129159c1b 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.53.0 + image: ghcr.io/apollographql/router:v1.54.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/docs/source/configuration/operation-limits.mdx b/docs/source/configuration/operation-limits.mdx index 48551fa971..2f24ce620b 100644 --- a/docs/source/configuration/operation-limits.mdx +++ b/docs/source/configuration/operation-limits.mdx @@ -34,6 +34,7 @@ limits: Each limit takes an integer value. You can define any combination of [supported limits](#supported-limits). ## Supported limits + ### `max_depth` Limits the deepest nesting of selection sets in an operation, including fields in fragments. @@ -149,3 +150,92 @@ Whenever your router rejects a request because it exceeds an operation limit, th ``` If you run your router in [`warn_only` mode](#warn_only-mode), the router logs the limit violation but executes the operation as normal, returning a 200 status code with the expected response. + +## Using telemetry to set operation limits + +Router telemetry can help you set operation limits, especially when you have a large number of existing operations. You can measure incoming operations over a fixed duration, then use the captured data as a baseline configuration. + +### Logging values + +To log limit information about every operation, you can configure the router with a [custom event](/router/configuration/telemetry/instrumentation/events#custom-events) to log the values of aliases, depth, height, and root_fields for each operation: + +```yaml title="router.yaml" +telemetry: + instrumentation: + events: + supergraph: + OPERATION_LIMIT_INFO: + message: operation limit info + on: response + level: info + attributes: + graphql.operation.name: true + query.aliases: + query: aliases + query.depth: + query: depth + query.height: + query: height + query.root_fields: + query: root_fields +``` + + + +For a large amount of traffic, you may prefer to collect and export metrics to your APM instead. + + + +### Collecting metrics + +To capture and view metrics to help set your operation limits, you can configure the router to collect [custom metrics](/router/configuration/telemetry/instrumentation/instruments#custom-instruments) on the values of aliases, depth, height, and root_fields for each operation: + +```yaml title="router.yaml" +telemetry: + exporters: + metrics: + common: + views: + # Define a custom view because operation limits are different than the default latency-oriented view of OpenTelemetry + - name: oplimits.* + aggregation: + histogram: + buckets: + - 0 + - 5 + - 10 + - 25 + - 50 + - 100 + - 500 + - 1000 + instrumentation: + instruments: + supergraph: + oplimits.aliases: + value: + query: aliases + type: histogram + unit: number + description: "Aliases for an operation" + oplimits.depth: + value: + query: depth + type: histogram + unit: number + description: "Depth for an operation" + oplimits.height: + value: + query: height + type: histogram + unit: number + description: "Height for an operation" + oplimits.root_fields: + value: + query: root_fields + type: histogram + unit: number + description: "Root fields for an operation" +``` + +You should also configure the router to [export metrics](/router/configuration/telemetry/exporters/metrics/overview) to your APM tool. diff --git a/docs/source/configuration/overview.mdx b/docs/source/configuration/overview.mdx index d4bd85955b..13e6991a6a 100644 --- a/docs/source/configuration/overview.mdx +++ b/docs/source/configuration/overview.mdx @@ -598,21 +598,27 @@ supergraph: - + + +The router supports enhanced operation signature normalization in the following versions: + +- [General availability](/resources/product-launch-stages/#general-availability) in v1.54.0 and later +- [Experimental](/resources/product-launch-stages/#experimental-features) in v1.49.0 to v1.53.0 + + -Beginning in v1.49.0, the router supports enhanced operation signature normalization. Apollo's legacy operation signature algorithm removes information about certain fields, such as input objects and aliases. This removal means some operations may have the same normalized signature though they are distinct operations. Enhanced normalization incorporates [input types](#input-types) and [aliases](#aliases) in signature generation. It also includes other improvements that make it more likely that two operations that only vary slightly have the same signature. -Configure enhanced operation signature normalization in `router.yaml` with the `telemetry.apollo.experimental_apollo_signature_normalization_algorithm` option: +Configure enhanced operation signature normalization in `router.yaml` with the `telemetry.apollo.signature_normalization_algorithm` option: ```yaml title="router.yaml" telemetry: apollo: - experimental_apollo_signature_normalization_algorithm: enhanced # Default is legacy + signature_normalization_algorithm: enhanced # Default is legacy ``` Once you enable this configuration, operations with enhanced signatures might appear with different operation IDs than they did previously in GraphOS Studio. @@ -761,20 +767,28 @@ query AliasedQuery { - + + +The router supports extended reference reporting in the following versions: + +- [General availability](/resources/product-launch-stages/#general-availability) in v1.54.0 and later +- [Experimental](/resources/product-launch-stages/#experimental-features) in v1.50.0 to v1.53.0 + + + -Beginning in v1.50.0, you can configure the router to report enum and input object references for enhanced insights and operation checks. +You can configure the router to report enum and input object references for enhanced insights and operation checks. Apollo's legacy reference reporting doesn't include data about enum values and input object fields, meaning you can't view enum and input object field usage in GraphOS Studio. Legacy reporting can also cause [inaccurate operation checks](#enhanced-operation-checks). -Configure extended reference reporting in `router.yaml` with the `telemetry.apollo.experimental_apollo_metrics_reference_mode` option like so: +Configure extended reference reporting in `router.yaml` with the `telemetry.apollo.metrics_reference_mode` option like so: ```yaml title="router.yaml" telemetry: apollo: - experimental_apollo_metrics_reference_mode: extended # Default is legacy + metrics_reference_mode: extended # Default is legacy ``` #### Configuration effect timing diff --git a/docs/source/configuration/telemetry/exporters/logging/stdout.mdx b/docs/source/configuration/telemetry/exporters/logging/stdout.mdx index 36848ec241..a85e8162a6 100644 --- a/docs/source/configuration/telemetry/exporters/logging/stdout.mdx +++ b/docs/source/configuration/telemetry/exporters/logging/stdout.mdx @@ -140,7 +140,7 @@ telemetry: display_current_span: true display_service_name: true display_service_namespace: true - display_trace_id: true + display_trace_id: true # true|false|open_telemetry|hexadecimal|decimal|datadog|uuid display_span_id: true ``` @@ -164,7 +164,7 @@ Example `text` output: | `display_timestamp` | `true`\|`false` | `true` | The timestamp of when the event was raised. | | `display_service_name` | `true`\|`false` | `false` | The service name as configured in metrics common. | | `display_service_namespace` | `true`\|`false` | `false` | The service namespace as configured in metrics common. | -| `display_trace_id` | `true`\|`false` | `false` | The trace id of the span in which the event was raised. | +| `display_trace_id` | `true`\|`false`\|`open_telemetry`\|`hexadecimal`\|`decimal`\|`datadog`\|`uuid` | `false` | The trace id of the span in which the event was raised. | | `display_span_id` | `true`\|`false` | `false` | The span ID of the span in which the event was raised. | | `display_span_list` | `true`\|`false` | `true` | A list of all spans to root in which the event was raised and all of their attributes. | | `display_current_span` | `true`\|`false` | `true` | The span in which the event was raised and all of its' attributes. | @@ -225,7 +225,7 @@ telemetry: display_current_span: true display_span_list: true display_resource: true - display_trace_id: true + display_trace_id: true # true|false|open_telemetry|hexadecimal|decimal|datadog|uuid display_span_id: true ``` @@ -335,6 +335,44 @@ Example output with a list of spans: } ``` +#### `span_attributes` + +The `telemetry.exporters.logging.stdout.format.json.span_attributes` option allows you to display a subset of all span attributes. It takes as input an array of span attribute names to log. + +When `span_attributes` is specified, the router searches for the first attribute in the list of span attributes from the root span to the current span and attaches it to the outermost JSON object for the log event. + +If you set the same attribute name for different spans at different levels, the router chooses the attributes of child spans before the attributes of parent spans. + +For example, you can display just the `span_attr_1` span attribute: + +```yaml title="router.yaml" +telemetry: + exporters: + logging: + stdout: + enabled: true + format: + json: + display_span_list: false + span_attributes: + - span_attr_1 +``` + +Example output with a list of spans: + +```json +{ + "timestamp": "2023-10-30T14:09:34.771388Z", + "level": "INFO", + "fields": { + "event_attr_1": "event_attr_1", + "event_attr_2": "event_attr_2" + }, + "target": "event_target", + "span_attr_1": "span_attr_1" +} +``` + #### `display_resource` The `telemetry.logging.stdout.format.json.display_resource` option configures whether resources configured in `router.yaml` are displayed in log messages. By default, `display_resource` is `true`. @@ -373,7 +411,8 @@ telemetry: | `display_timestamp` | `true`\|`false` | `true` | `timestamp` | The timestamp of when the event was raised. | | `display_span_list` | `true`\|`false` | `true` | `spans` | A list of all spans to root in which the event was raised and all of their attributes. | | `display_resource` | `true`\|`false` | `true` | `resource` | The resource as configured in tracing common. | -| `display_trace_id` | `true`\|`false` | `true` | `trace_id` | The trace id of the span in which the event was raised. | +| `display_trace_id` | `true`\|`false`\|`open_telemetry`\|`hexadecimal`\|`decimal`\|`datadog`\|`uuid` | The trace id of the span in which the event was raised. | | `display_span_id` | `true`\|`false` | `true` | `span_id` | The span id of the span in which the event was raised. | +| `span_attributes` | `[string]` | `[]` | `*` | List of span attributes to attach to the JSON log object. | diff --git a/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx b/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx index d29cbf1fca..828a764f24 100644 --- a/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx +++ b/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx @@ -66,6 +66,7 @@ The coprocessor operations metric has the following attributes: - `apollo.router.query_planning.plan.duration` - Histogram of plan durations isolated to query planning time only. - `apollo.router.query_planning.total.duration` - Histogram of plan durations including queue time. - `apollo.router.query_planning.queued` - A gauge of the number of queued plans requests. +- `apollo.router.query_planning.plan.evaluated_plans` - Histogram of the number of evaluated query plans. - `apollo.router.v8.heap.used` - heap memory used by V8, in bytes. - `apollo.router.v8.heap.total` - total heap allocated by V8, in bytes. diff --git a/examples/add-timestamp-header/rhai/src/main.rs b/examples/add-timestamp-header/rhai/src/main.rs index 3769cca26c..0c52e70815 100644 --- a/examples/add-timestamp-header/rhai/src/main.rs +++ b/examples/add-timestamp-header/rhai/src/main.rs @@ -59,7 +59,7 @@ mod tests { // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) + .operation_name("TopProducts".to_string()) .build() .unwrap(); diff --git a/examples/jwt-claims/rhai/src/main.rs b/examples/jwt-claims/rhai/src/main.rs index 0c0f328be6..7e2a312e9c 100644 --- a/examples/jwt-claims/rhai/src/main.rs +++ b/examples/jwt-claims/rhai/src/main.rs @@ -119,7 +119,7 @@ mod tests { // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) + .operation_name("TopProducts".to_string()) .header( http::header::AUTHORIZATION, "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiIsImtpZCI6ImtleTEifQ.eyJleHAiOjEwMDAwMDAwMDAwLCJhbm90aGVyIGNsYWltIjoidGhpcyBpcyBhbm90aGVyIGNsYWltIn0.4GrmfxuUST96cs0YUC0DfLAG218m7vn8fO_ENfXnu5A", diff --git a/examples/logging/rhai/src/main.rs b/examples/logging/rhai/src/main.rs index b64e76e49b..74629a5d5a 100644 --- a/examples/logging/rhai/src/main.rs +++ b/examples/logging/rhai/src/main.rs @@ -59,7 +59,7 @@ mod tests { // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) + .operation_name("TopProducts".to_string()) .build() .unwrap(); diff --git a/examples/op-name-to-header/rhai/src/main.rs b/examples/op-name-to-header/rhai/src/main.rs index 614f95adcf..b4c7b5b54b 100644 --- a/examples/op-name-to-header/rhai/src/main.rs +++ b/examples/op-name-to-header/rhai/src/main.rs @@ -1,7 +1,7 @@ //! % curl -v \ //! --header 'content-type: application/json' \ //! --url 'http://127.0.0.1:4000' \ -//! --data '{"operationName": "me", "query":"query Query {\n me {\n name\n }\n}"}' +//! --data '{"operationName": "TopProduct", "query":"query TopProduct { topProducts { name } }"}' use anyhow::Result; @@ -39,7 +39,7 @@ mod tests { .headers() .get("X-operation-name") .expect("X-operation-name is present"), - "me" + "TopProducts" ); Ok(supergraph::Response::fake_builder() .data(expected_mock_response_data) @@ -66,7 +66,7 @@ mod tests { // Let's create a request with our operation name let request_with_appropriate_name = supergraph::Request::canned_builder() - .operation_name("me".to_string()) + .operation_name("TopProducts".to_string()) .build() .unwrap(); diff --git a/examples/supergraph-sdl/rust/Cargo.toml b/examples/supergraph-sdl/rust/Cargo.toml index 18e0c2d85c..065990a8ef 100644 --- a/examples/supergraph-sdl/rust/Cargo.toml +++ b/examples/supergraph-sdl/rust/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [dependencies] anyhow = "1" -apollo-compiler = "=1.0.0-beta.20" +apollo-compiler = "=1.0.0-beta.21" apollo-router = { path = "../../../apollo-router" } async-trait = "0.1" tower = { version = "0.4", features = ["full"] } diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index f01ccac721..dfbb4c9725 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -20,7 +20,7 @@ reqwest = { workspace = true, features = ["json", "blocking"] } serde_json.workspace = true tokio.workspace = true # note: this dependency should _always_ be pinned, prefix the version with an `=` -router-bridge = "=0.6.0+v2.9.0" +router-bridge = "=0.6.1+v2.9.0" [dev-dependencies] anyhow = "1" diff --git a/fuzz/subgraph/Cargo.toml b/fuzz/subgraph/Cargo.toml index e04f35066e..0e8ce3b5c8 100644 --- a/fuzz/subgraph/Cargo.toml +++ b/fuzz/subgraph/Cargo.toml @@ -11,7 +11,6 @@ env_logger = "0.10" futures = "0.3.17" lazy_static = "1.4.0" log = "0.4.16" -moka = { version = "0.8.5", features = ["future"] } rand = { version = "0.8.5", features = ["std_rng"] } serde_json = "1.0.79" tokio = { version = "1.22.0", features = ["time", "full"] } diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index 40ba75ba53..50f1886874 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.53.0 +version: 1.54.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.53.0" +appVersion: "v1.54.0" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index e962582547..39e6238055 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.53.0](https://img.shields.io/badge/Version-1.53.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.53.0](https://img.shields.io/badge/AppVersion-v1.53.0-informational?style=flat-square) +![Version: 1.54.0](https://img.shields.io/badge/Version-1.54.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.54.0](https://img.shields.io/badge/AppVersion-v1.54.0-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.54.0 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.54.0 --values my-values.yaml ``` _See [configuration](#configuration) below._ @@ -79,6 +79,7 @@ helm show values oci://ghcr.io/apollographql/helm-charts/router | probes.readiness | object | `{"initialDelaySeconds":0}` | Configure readiness probe | | replicaCount | int | `1` | | | resources | object | `{}` | | +| restartPolicy | string | `"Always"` | Sets the restart policy of pods | | rollingUpdate | object | `{}` | Sets the [rolling update strategy parameters](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment). Can take absolute values or % values. | | router | object | `{"args":["--hot-reload"],"configuration":{"health_check":{"listen":"0.0.0.0:8088"},"supergraph":{"listen":"0.0.0.0:4000"}}}` | See https://www.apollographql.com/docs/router/configuration/overview/#yaml-config-file for yaml structure | | securityContext | object | `{}` | | diff --git a/helm/chart/router/templates/deployment.yaml b/helm/chart/router/templates/deployment.yaml index 3268dd443e..19557a6b5e 100644 --- a/helm/chart/router/templates/deployment.yaml +++ b/helm/chart/router/templates/deployment.yaml @@ -48,6 +48,9 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} serviceAccountName: {{ include "router.serviceAccountName" . }} + {{- if .Values.restartPolicy }} + restartPolicy: {{.Values.restartPolicy}} + {{- end }} securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} containers: diff --git a/helm/chart/router/values.yaml b/helm/chart/router/values.yaml index b4d437bb7c..8540dd2f56 100644 --- a/helm/chart/router/values.yaml +++ b/helm/chart/router/values.yaml @@ -253,3 +253,6 @@ probes: # -- Sets the [topology spread constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/) for Deployment pods topologySpreadConstraints: [] + +# -- Sets the restart policy of pods +restartPolicy: Always diff --git a/licenses.html b/licenses.html index 37df6f6803..e50ef97a86 100644 --- a/licenses.html +++ b/licenses.html @@ -44,12 +44,12 @@

Third Party Licenses

Overview of licenses:

    -
  • Apache License 2.0 (468)
  • -
  • MIT License (163)
  • +
  • Apache License 2.0 (432)
  • +
  • MIT License (146)
  • BSD 3-Clause "New" or "Revised" License (11)
  • ISC License (8)
  • -
  • BSD 2-Clause "Simplified" License (5)
  • Mozilla Public License 2.0 (5)
  • +
  • BSD 2-Clause "Simplified" License (4)
  • Elastic License 2.0 (3)
  • Creative Commons Zero v1.0 Universal (2)
  • OpenSSL License (1)
  • @@ -65,7 +65,6 @@

    Used by:

  • aws-config
  • aws-credential-types
  • aws-runtime
  • -
  • aws-sigv4
  • aws-smithy-async
  • aws-smithy-http
  • aws-smithy-json
  • @@ -1706,416 +1705,20 @@

    Used by:

    http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    -                                 Apache License
    -                           Version 2.0, January 2004
    -                        https://www.apache.org/licenses/
    -
    -   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -   1. Definitions.
    -
    -      "License" shall mean the terms and conditions for use, reproduction,
    -      and distribution as defined by Sections 1 through 9 of this document.
    -
    -      "Licensor" shall mean the copyright owner or entity authorized by
    -      the copyright owner that is granting the License.
    -
    -      "Legal Entity" shall mean the union of the acting entity and all
    -      other entities that control, are controlled by, or are under common
    -      control with that entity. For the purposes of this definition,
    -      "control" means (i) the power, direct or indirect, to cause the
    -      direction or management of such entity, whether by contract or
    -      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -      outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -      "You" (or "Your") shall mean an individual or Legal Entity
    -      exercising permissions granted by this License.
    -
    -      "Source" form shall mean the preferred form for making modifications,
    -      including but not limited to software source code, documentation
    -      source, and configuration files.
    -
    -      "Object" form shall mean any form resulting from mechanical
    -      transformation or translation of a Source form, including but
    -      not limited to compiled object code, generated documentation,
    -      and conversions to other media types.
    -
    -      "Work" shall mean the work of authorship, whether in Source or
    -      Object form, made available under the License, as indicated by a
    -      copyright notice that is included in or attached to the work
    -      (an example is provided in the Appendix below).
    -
    -      "Derivative Works" shall mean any work, whether in Source or Object
    -      form, that is based on (or derived from) the Work and for which the
    -      editorial revisions, annotations, elaborations, or other modifications
    -      represent, as a whole, an original work of authorship. For the purposes
    -      of this License, Derivative Works shall not include works that remain
    -      separable from, or merely link (or bind by name) to the interfaces of,
    -      the Work and Derivative Works thereof.
    -
    -      "Contribution" shall mean any work of authorship, including
    -      the original version of the Work and any modifications or additions
    -      to that Work or Derivative Works thereof, that is intentionally
    -      submitted to Licensor for inclusion in the Work by the copyright owner
    -      or by an individual or Legal Entity authorized to submit on behalf of
    -      the copyright owner. For the purposes of this definition, "submitted"
    -      means any form of electronic, verbal, or written communication sent
    -      to the Licensor or its representatives, including but not limited to
    -      communication on electronic mailing lists, source code control systems,
    -      and issue tracking systems that are managed by, or on behalf of, the
    -      Licensor for the purpose of discussing and improving the Work, but
    -      excluding communication that is conspicuously marked or otherwise
    -      designated in writing by the copyright owner as "Not a Contribution."
    -
    -      "Contributor" shall mean Licensor and any individual or Legal Entity
    -      on behalf of whom a Contribution has been received by Licensor and
    -      subsequently incorporated within the Work.
    -
    -   2. Grant of Copyright License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      copyright license to reproduce, prepare Derivative Works of,
    -      publicly display, publicly perform, sublicense, and distribute the
    -      Work and such Derivative Works in Source or Object form.
    -
    -   3. Grant of Patent License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      (except as stated in this section) patent license to make, have made,
    -      use, offer to sell, sell, import, and otherwise transfer the Work,
    -      where such license applies only to those patent claims licensable
    -      by such Contributor that are necessarily infringed by their
    -      Contribution(s) alone or by combination of their Contribution(s)
    -      with the Work to which such Contribution(s) was submitted. If You
    -      institute patent litigation against any entity (including a
    -      cross-claim or counterclaim in a lawsuit) alleging that the Work
    -      or a Contribution incorporated within the Work constitutes direct
    -      or contributory patent infringement, then any patent licenses
    -      granted to You under this License for that Work shall terminate
    -      as of the date such litigation is filed.
    -
    -   4. Redistribution. You may reproduce and distribute copies of the
    -      Work or Derivative Works thereof in any medium, with or without
    -      modifications, and in Source or Object form, provided that You
    -      meet the following conditions:
    -
    -      (a) You must give any other recipients of the Work or
    -          Derivative Works a copy of this License; and
    -
    -      (b) You must cause any modified files to carry prominent notices
    -          stating that You changed the files; and
    -
    -      (c) You must retain, in the Source form of any Derivative Works
    -          that You distribute, all copyright, patent, trademark, and
    -          attribution notices from the Source form of the Work,
    -          excluding those notices that do not pertain to any part of
    -          the Derivative Works; and
    -
    -      (d) If the Work includes a "NOTICE" text file as part of its
    -          distribution, then any Derivative Works that You distribute must
    -          include a readable copy of the attribution notices contained
    -          within such NOTICE file, excluding those notices that do not
    -          pertain to any part of the Derivative Works, in at least one
    -          of the following places: within a NOTICE text file distributed
    -          as part of the Derivative Works; within the Source form or
    -          documentation, if provided along with the Derivative Works; or,
    -          within a display generated by the Derivative Works, if and
    -          wherever such third-party notices normally appear. The contents
    -          of the NOTICE file are for informational purposes only and
    -          do not modify the License. You may add Your own attribution
    -          notices within Derivative Works that You distribute, alongside
    -          or as an addendum to the NOTICE text from the Work, provided
    -          that such additional attribution notices cannot be construed
    -          as modifying the License.
    -
    -      You may add Your own copyright statement to Your modifications and
    -      may provide additional or different license terms and conditions
    -      for use, reproduction, or distribution of Your modifications, or
    -      for any such Derivative Works as a whole, provided Your use,
    -      reproduction, and distribution of the Work otherwise complies with
    -      the conditions stated in this License.
    -
    -   5. Submission of Contributions. Unless You explicitly state otherwise,
    -      any Contribution intentionally submitted for inclusion in the Work
    -      by You to the Licensor shall be under the terms and conditions of
    -      this License, without any additional terms or conditions.
    -      Notwithstanding the above, nothing herein shall supersede or modify
    -      the terms of any separate license agreement you may have executed
    -      with Licensor regarding such Contributions.
    -
    -   6. Trademarks. This License does not grant permission to use the trade
    -      names, trademarks, service marks, or product names of the Licensor,
    -      except as required for reasonable and customary use in describing the
    -      origin of the Work and reproducing the content of the NOTICE file.
    -
    -   7. Disclaimer of Warranty. Unless required by applicable law or
    -      agreed to in writing, Licensor provides the Work (and each
    -      Contributor provides its Contributions) on an "AS IS" BASIS,
    -      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -      implied, including, without limitation, any warranties or conditions
    -      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -      PARTICULAR PURPOSE. You are solely responsible for determining the
    -      appropriateness of using or redistributing the Work and assume any
    -      risks associated with Your exercise of permissions under this License.
    -
    -   8. Limitation of Liability. In no event and under no legal theory,
    -      whether in tort (including negligence), contract, or otherwise,
    -      unless required by applicable law (such as deliberate and grossly
    -      negligent acts) or agreed to in writing, shall any Contributor be
    -      liable to You for damages, including any direct, indirect, special,
    -      incidental, or consequential damages of any character arising as a
    -      result of this License or out of the use or inability to use the
    -      Work (including but not limited to damages for loss of goodwill,
    -      work stoppage, computer failure or malfunction, or any and all
    -      other commercial damages or losses), even if such Contributor
    -      has been advised of the possibility of such damages.
    -
    -   9. Accepting Warranty or Additional Liability. While redistributing
    -      the Work or Derivative Works thereof, You may choose to offer,
    -      and charge a fee for, acceptance of support, warranty, indemnity,
    -      or other liability obligations and/or rights consistent with this
    -      License. However, in accepting such obligations, You may act only
    -      on Your own behalf and on Your sole responsibility, not on behalf
    -      of any other Contributor, and only if You agree to indemnify,
    -      defend, and hold each Contributor harmless for any liability
    -      incurred by, or claims asserted against, such Contributor by reason
    -      of your accepting any such warranty or additional liability.
    -
    -   END OF TERMS AND CONDITIONS
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
                                     Apache License
    -                           Version 2.0, January 2004
    -                        http://www.apache.org/licenses/
    -
    -   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -   1. Definitions.
    -
    -      "License" shall mean the terms and conditions for use, reproduction,
    -      and distribution as defined by Sections 1 through 9 of this document.
    -
    -      "Licensor" shall mean the copyright owner or entity authorized by
    -      the copyright owner that is granting the License.
    -
    -      "Legal Entity" shall mean the union of the acting entity and all
    -      other entities that control, are controlled by, or are under common
    -      control with that entity. For the purposes of this definition,
    -      "control" means (i) the power, direct or indirect, to cause the
    -      direction or management of such entity, whether by contract or
    -      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -      outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -      "You" (or "Your") shall mean an individual or Legal Entity
    -      exercising permissions granted by this License.
    -
    -      "Source" form shall mean the preferred form for making modifications,
    -      including but not limited to software source code, documentation
    -      source, and configuration files.
    -
    -      "Object" form shall mean any form resulting from mechanical
    -      transformation or translation of a Source form, including but
    -      not limited to compiled object code, generated documentation,
    -      and conversions to other media types.
    -
    -      "Work" shall mean the work of authorship, whether in Source or
    -      Object form, made available under the License, as indicated by a
    -      copyright notice that is included in or attached to the work
    -      (an example is provided in the Appendix below).
    -
    -      "Derivative Works" shall mean any work, whether in Source or Object
    -      form, that is based on (or derived from) the Work and for which the
    -      editorial revisions, annotations, elaborations, or other modifications
    -      represent, as a whole, an original work of authorship. For the purposes
    -      of this License, Derivative Works shall not include works that remain
    -      separable from, or merely link (or bind by name) to the interfaces of,
    -      the Work and Derivative Works thereof.
    -
    -      "Contribution" shall mean any work of authorship, including
    -      the original version of the Work and any modifications or additions
    -      to that Work or Derivative Works thereof, that is intentionally
    -      submitted to Licensor for inclusion in the Work by the copyright owner
    -      or by an individual or Legal Entity authorized to submit on behalf of
    -      the copyright owner. For the purposes of this definition, "submitted"
    -      means any form of electronic, verbal, or written communication sent
    -      to the Licensor or its representatives, including but not limited to
    -      communication on electronic mailing lists, source code control systems,
    -      and issue tracking systems that are managed by, or on behalf of, the
    -      Licensor for the purpose of discussing and improving the Work, but
    -      excluding communication that is conspicuously marked or otherwise
    -      designated in writing by the copyright owner as "Not a Contribution."
    -
    -      "Contributor" shall mean Licensor and any individual or Legal Entity
    -      on behalf of whom a Contribution has been received by Licensor and
    -      subsequently incorporated within the Work.
    -
    -   2. Grant of Copyright License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      copyright license to reproduce, prepare Derivative Works of,
    -      publicly display, publicly perform, sublicense, and distribute the
    -      Work and such Derivative Works in Source or Object form.
    -
    -   3. Grant of Patent License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      (except as stated in this section) patent license to make, have made,
    -      use, offer to sell, sell, import, and otherwise transfer the Work,
    -      where such license applies only to those patent claims licensable
    -      by such Contributor that are necessarily infringed by their
    -      Contribution(s) alone or by combination of their Contribution(s)
    -      with the Work to which such Contribution(s) was submitted. If You
    -      institute patent litigation against any entity (including a
    -      cross-claim or counterclaim in a lawsuit) alleging that the Work
    -      or a Contribution incorporated within the Work constitutes direct
    -      or contributory patent infringement, then any patent licenses
    -      granted to You under this License for that Work shall terminate
    -      as of the date such litigation is filed.
    -
    -   4. Redistribution. You may reproduce and distribute copies of the
    -      Work or Derivative Works thereof in any medium, with or without
    -      modifications, and in Source or Object form, provided that You
    -      meet the following conditions:
    -
    -      (a) You must give any other recipients of the Work or
    -          Derivative Works a copy of this License; and
    -
    -      (b) You must cause any modified files to carry prominent notices
    -          stating that You changed the files; and
    -
    -      (c) You must retain, in the Source form of any Derivative Works
    -          that You distribute, all copyright, patent, trademark, and
    -          attribution notices from the Source form of the Work,
    -          excluding those notices that do not pertain to any part of
    -          the Derivative Works; and
    -
    -      (d) If the Work includes a "NOTICE" text file as part of its
    -          distribution, then any Derivative Works that You distribute must
    -          include a readable copy of the attribution notices contained
    -          within such NOTICE file, excluding those notices that do not
    -          pertain to any part of the Derivative Works, in at least one
    -          of the following places: within a NOTICE text file distributed
    -          as part of the Derivative Works; within the Source form or
    -          documentation, if provided along with the Derivative Works; or,
    -          within a display generated by the Derivative Works, if and
    -          wherever such third-party notices normally appear. The contents
    -          of the NOTICE file are for informational purposes only and
    -          do not modify the License. You may add Your own attribution
    -          notices within Derivative Works that You distribute, alongside
    -          or as an addendum to the NOTICE text from the Work, provided
    -          that such additional attribution notices cannot be construed
    -          as modifying the License.
    -
    -      You may add Your own copyright statement to Your modifications and
    -      may provide additional or different license terms and conditions
    -      for use, reproduction, or distribution of Your modifications, or
    -      for any such Derivative Works as a whole, provided Your use,
    -      reproduction, and distribution of the Work otherwise complies with
    -      the conditions stated in this License.
    -
    -   5. Submission of Contributions. Unless You explicitly state otherwise,
    -      any Contribution intentionally submitted for inclusion in the Work
    -      by You to the Licensor shall be under the terms and conditions of
    -      this License, without any additional terms or conditions.
    -      Notwithstanding the above, nothing herein shall supersede or modify
    -      the terms of any separate license agreement you may have executed
    -      with Licensor regarding such Contributions.
    -
    -   6. Trademarks. This License does not grant permission to use the trade
    -      names, trademarks, service marks, or product names of the Licensor,
    -      except as required for reasonable and customary use in describing the
    -      origin of the Work and reproducing the content of the NOTICE file.
    -
    -   7. Disclaimer of Warranty. Unless required by applicable law or
    -      agreed to in writing, Licensor provides the Work (and each
    -      Contributor provides its Contributions) on an "AS IS" BASIS,
    -      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -      implied, including, without limitation, any warranties or conditions
    -      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -      PARTICULAR PURPOSE. You are solely responsible for determining the
    -      appropriateness of using or redistributing the Work and assume any
    -      risks associated with Your exercise of permissions under this License.
    -
    -   8. Limitation of Liability. In no event and under no legal theory,
    -      whether in tort (including negligence), contract, or otherwise,
    -      unless required by applicable law (such as deliberate and grossly
    -      negligent acts) or agreed to in writing, shall any Contributor be
    -      liable to You for damages, including any direct, indirect, special,
    -      incidental, or consequential damages of any character arising as a
    -      result of this License or out of the use or inability to use the
    -      Work (including but not limited to damages for loss of goodwill,
    -      work stoppage, computer failure or malfunction, or any and all
    -      other commercial damages or losses), even if such Contributor
    -      has been advised of the possibility of such damages.
    -
    -   9. Accepting Warranty or Additional Liability. While redistributing
    -      the Work or Derivative Works thereof, You may choose to offer,
    -      and charge a fee for, acceptance of support, warranty, indemnity,
    -      or other liability obligations and/or rights consistent with this
    -      License. However, in accepting such obligations, You may act only
    -      on Your own behalf and on Your sole responsibility, not on behalf
    -      of any other Contributor, and only if You agree to indemnify,
    -      defend, and hold each Contributor harmless for any liability
    -      incurred by, or claims asserted against, such Contributor by reason
    -      of your accepting any such warranty or additional liability.
    -
    -   END OF TERMS AND CONDITIONS
    +   Unless required by applicable law or agreed to in writing, software
    +   distributed under the License is distributed on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +   See the License for the specific language governing permissions and
    +   limitations under the License.
     
  • Apache License 2.0

    Used by:

                                     Apache License
                                Version 2.0, January 2004
    @@ -2293,38 +1896,40 @@ 

    Used by:

    of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) Microsoft Corporation. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License.
  • Apache License 2.0

    Used by:

                                     Apache License
                                Version 2.0, January 2004
    @@ -2514,7 +2119,7 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2020 - 2022 Tatsuya Kawano + Copyright (c) Microsoft Corporation. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -3175,7 +2780,6 @@

    Used by:

  • clap_builder
  • clap_derive
  • clap_lex
  • -
  • opentelemetry-proto
                                 Apache License
                            Version 2.0, January 2004
@@ -4244,7 +3848,6 @@ 

Used by:

  • is_terminal_polyfill
  • quick-error
  • resolv-conf
  • -
  • scheduled-thread-pool
  • serde_spanned
  • tokio-io-timeout
  • toml
  • @@ -5482,7 +5085,6 @@

    Used by:

  • utf-8
  • utf8parse
  • wasm-streams
  • -
  • zerocopy
  •                               Apache License
                             Version 2.0, January 2004
    @@ -8402,8 +8004,6 @@ 

    Used by:

  • arc-swap
  • async-channel
  • async-compression
  • -
  • async-io
  • -
  • async-lock
  • autocfg
  • backtrace
  • base64
  • @@ -8414,11 +8014,8 @@

    Used by:

  • bstr
  • bumpalo
  • bytes-utils
  • -
  • camino
  • cc
  • cfg-if
  • -
  • cfg-if
  • -
  • ci_info
  • cmake
  • concurrent-queue
  • const-random
  • @@ -8427,17 +8024,13 @@

    Used by:

  • core-foundation-sys
  • countme
  • crossbeam-channel
  • -
  • crossbeam-epoch
  • -
  • crossbeam-utils
  • crossbeam-utils
  • debugid
  • derivative
  • derive_arbitrary
  • displaydoc
  • either
  • -
  • envmnt
  • equivalent
  • -
  • error-chain
  • event-listener
  • fastrand
  • fastrand
  • @@ -8447,12 +8040,10 @@

    Used by:

  • fnv
  • form_urlencoded
  • fraction
  • -
  • fsio
  • futures-lite
  • futures-timer
  • gimli
  • git2
  • -
  • glob
  • hashbrown
  • hashbrown
  • hdrhistogram
  • @@ -8469,7 +8060,6 @@

    Used by:

  • indexmap
  • indexmap
  • inventory
  • -
  • io-lifetimes
  • ipconfig
  • itertools
  • itertools
  • @@ -8482,12 +8072,10 @@

    Used by:

  • libz-ng-sys
  • libz-sys
  • linux-raw-sys
  • -
  • linux-raw-sys
  • lock_api
  • log
  • maplit
  • match_cfg
  • -
  • maybe-uninit
  • mime
  • mockall
  • mockall_derive
  • @@ -8534,7 +8122,6 @@

    Used by:

  • rustc_version
  • rustc_version
  • rustix
  • -
  • rustix
  • rustls
  • rustls-native-certs
  • rustls-pemfile
  • @@ -8549,10 +8136,8 @@

    Used by:

  • shellexpand
  • signal-hook-registry
  • similar
  • -
  • skeptic
  • smallvec
  • socket2
  • -
  • socket2
  • stable_deref_trait
  • syn
  • tempfile
  • @@ -8577,7 +8162,6 @@

    Used by:

  • waker-fn
  • wasi
  • wasi
  • -
  • wasi
  • wasm-bindgen
  • wasm-bindgen-backend
  • wasm-bindgen-futures
  • @@ -10029,7 +9613,6 @@

    Apache License 2.0

    Used by:

    @@ -10635,7 +10218,6 @@

    Apache License 2.0

    Used by:

                                  Apache License
    @@ -11258,53 +10840,6 @@ 

    Used by:

    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -
    - -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    # Contributing
    -
    -## License
    -
    -Licensed under either of
    -
    - * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
    - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
    -
    -at your option.
    -
    -### Contribution
    -
    -Unless you explicitly state otherwise, any contribution intentionally submitted
    -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
    -additional terms or conditions.
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    ../../LICENSE-APACHE
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    // Licensed under the Apache License, Version 2.0
    -// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
    -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
    -// All files in the project carrying such notice may not be copied, modified, or distributed
    -// except according to those terms.
     
  • @@ -11941,10 +11476,14 @@

    Used by:

    Apache License 2.0

    Used by:

      +
    • apollo-compiler
    • +
    • apollo-parser
    • +
    • apollo-smith
    • async-graphql-axum
    • async-graphql-derive
    • async-graphql-parser
    • async-graphql-value
    • +
    • buildstructor
    • deno-proc-macro-rules
    • deno-proc-macro-rules-macros
    • dunce
    • @@ -11960,6 +11499,7 @@

      Used by:

    • num-cmp
    • prost
    • rhai_codegen
    • +
    • serde_derive_default
    • siphasher
    • system-configuration
    • system-configuration-sys
    • @@ -12021,139 +11561,32 @@

      Used by:

      5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - -To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -
  • - -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    Copyright 2021 Oliver Giersch
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    Copyright [2022] [Bryn Cooke]
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    Copyright [2023] [Bryn Cooke]
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    Licensed under the Apache License, Version 2.0
    -<LICENSE-APACHE or
    -http://www.apache.org/licenses/LICENSE-2.0> or the MIT
    -license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
    -at your option. All files in the project carrying such
    -notice may not be copied, modified, or distributed except
    -according to those terms.
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    MIT OR Apache-2.0
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    MIT OR Apache-2.0
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    MIT or Apache-2.0
    +
    +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
    +
    +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
    +
    +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
    +
    +END OF TERMS AND CONDITIONS
    +
    +APPENDIX: How to apply the Apache License to your work.
    +
    +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!)  The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
    +
    +Copyright [yyyy] [name of copyright owner]
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
     
  • @@ -12187,37 +11620,6 @@

    Used by:

    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -
  • - -
  • -

    BSD 2-Clause "Simplified" License

    -

    Used by:

    - -
    Copyright (c) 2015, Nick Fitzgerald
    -All rights reserved.
    -
    -Redistribution and use in source and binary forms, with or without modification,
    -are permitted provided that the following conditions are met:
    -
    -1. Redistributions of source code must retain the above copyright notice, this
    -   list of conditions and the following disclaimer.
    -
    -2. Redistributions in binary form must reproduce the above copyright notice,
    -   this list of conditions and the following disclaimer in the documentation
    -   and/or other materials provided with the distribution.
    -
    -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
    -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
    -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
    -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
    -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
    -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
    -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
    -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
    -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     
  • @@ -13011,6 +12413,36 @@

    Used by:

    // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +
  • +
  • +

    ISC License

    +

    Used by:

    + +
    // Copyright 2021 Brian Smith.
    +//
    +// Permission to use, copy, modify, and/or distribute this software for any
    +// purpose with or without fee is hereby granted, provided that the above
    +// copyright notice and this permission notice appear in all copies.
    +//
    +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
    +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR
    +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    +
    +#[test]
    +fn cert_without_extensions_test() {
    +    // Check the certificate is valid with
    +    // `openssl x509 -in cert_without_extensions.der -inform DER -text -noout`
    +    const CERT_WITHOUT_EXTENSIONS_DER: &[u8] = include_bytes!("cert_without_extensions.der");
    +
    +    assert!(webpki::EndEntityCert::try_from(CERT_WITHOUT_EXTENSIONS_DER).is_ok());
    +}
     
  • @@ -13080,7 +12512,6 @@

    ISC License

    Used by:

    ISC License:
     
    @@ -13090,33 +12521,6 @@ 

    Used by:

    Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -
    -
  • -
  • -

    MIT License

    -

    Used by:

    - -
    // Copyright (c) 2019 Nuclear Furnace
    -//
    -// Permission is hereby granted, free of charge, to any person obtaining a copy
    -// of this software and associated documentation files (the "Software"), to deal
    -// in the Software without restriction, including without limitation the rights
    -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -// copies of the Software, and to permit persons to whom the Software is
    -// furnished to do so, subject to the following conditions:
    -//
    -// The above copyright notice and this permission notice shall be included in all
    -// copies or substantial portions of the Software.
    -//
    -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    -// SOFTWARE.
     
  • @@ -13599,7 +13003,6 @@

    MIT License

    Used by:

    Copyright (c) 2017 Gilad Naaman
     
    @@ -13830,66 +13233,6 @@ 

    Used by:

    shall be included in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. -
    -
  • -
  • -

    MIT License

    -

    Used by:

    - -
    Copyright (c) 2019 Carl Lerche
    -
    -Permission is hereby granted, free of charge, to any
    -person obtaining a copy of this software and associated
    -documentation files (the "Software"), to deal in the
    -Software without restriction, including without
    -limitation the rights to use, copy, modify, merge,
    -publish, distribute, sublicense, and/or sell copies of
    -the Software, and to permit persons to whom the Software
    -is furnished to do so, subject to the following
    -conditions:
    -
    -The above copyright notice and this permission notice
    -shall be included in all copies or substantial portions
    -of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
    -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
    -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
    -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
    -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
    -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
    -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
    -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
    -DEALINGS IN THE SOFTWARE.
    -
    -Copyright (c) 2018 David Tolnay
    -
    -Permission is hereby granted, free of charge, to any
    -person obtaining a copy of this software and associated
    -documentation files (the "Software"), to deal in the
    -Software without restriction, including without
    -limitation the rights to use, copy, modify, merge,
    -publish, distribute, sublicense, and/or sell copies of
    -the Software, and to permit persons to whom the Software
    -is furnished to do so, subject to the following
    -conditions:
    -
    -The above copyright notice and this permission notice
    -shall be included in all copies or substantial portions
    -of the Software.
    -
     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
     ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
     TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
    @@ -14896,6 +14239,8 @@ 

    Used by:

    MIT License

    Used by:

    -
  • -
  • -

    MIT License

    -

    Used by:

    - -
    The MIT License
    -
    -Copyright 2015 Google Inc. All rights reserved.
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in
    -all copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    -THE SOFTWARE.
     
  • @@ -15238,37 +14553,6 @@

    Used by:

    MIT License

    Used by:

    -
    The MIT License (MIT)
    -
    -Copyright (c) 2014 Mathijs van de Nes
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    -SOFTWARE.
    -
    -
  • -
  • -

    MIT License

    -

    Used by:

    - @@ -15361,34 +14645,6 @@

    Used by:

    OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -
  • -
  • -

    MIT License

    -

    Used by:

    - -
    The MIT License (MIT)
    -
    -Copyright (c) 2015 Gerd Zellweger
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in
    -all copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    -THE SOFTWARE.
  • MIT License

    @@ -15822,25 +15078,6 @@

    Used by:

    OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -
  • -
  • -

    MIT License

    -

    Used by:

    - -
    This project is dual-licensed under the Unlicense and MIT licenses.
    -
    -You may use this code under the terms of either license.
     
  • @@ -16238,6 +15475,7 @@

    Used by:

    Mozilla Public License 2.0

    Used by:

    Mozilla Public License Version 2.0
    @@ -16620,8 +15858,8 @@ 

    Used by:

    Mozilla Public License 2.0

    Used by:

    Mozilla Public License Version 2.0
     ==================================
    @@ -16996,35 +16234,6 @@ 

    Used by:

    This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. -
    -
  • -
  • -

    Mozilla Public License 2.0

    -

    Used by:

    - -
    This packge contains a modified version of ca-bundle.crt:
    -
    -ca-bundle.crt -- Bundle of CA Root Certificates
    -
    -Certificate data from Mozilla as of: Thu Nov  3 19:04:19 2011#
    -This is a bundle of X.509 certificates of public Certificate Authorities
    -(CA). These were automatically extracted from Mozilla's root certificates
    -file (certdata.txt).  This file can be found in the mozilla source tree:
    -http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1#
    -It contains the certificates in PEM format and therefore
    -can be directly used with curl / libcurl / php_curl, or with
    -an Apache+mod_ssl webserver for SSL client authentication.
    -Just configure this file as the SSLCACertificateFile.#
    -
    -***** BEGIN LICENSE BLOCK *****
    -This Source Code Form is subject to the terms of the Mozilla Public License,
    -v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
    -one at http://mozilla.org/MPL/2.0/.
    -
    -***** END LICENSE BLOCK *****
    -@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $
     
  • @@ -17095,26 +16304,50 @@

    Used by:

    UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
     
    -Unicode Data Files include all data files under the directories http://www.unicode.org/Public/, http://www.unicode.org/reports/, http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and http://www.unicode.org/utility/trac/browser/.
    -
    -Unicode Data Files do not include PDF online code charts under the directory http://www.unicode.org/Public/.
    -
    -Software includes any source code published in the Unicode Standard or under the directories http://www.unicode.org/Public/, http://www.unicode.org/reports/, http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and http://www.unicode.org/utility/trac/browser/.
    +See Terms of Use <https://www.unicode.org/copyright.html>
    +for definitions of Unicode Inc.’s Data Files and Software.
     
    -NOTICE TO USER: Carefully read the following legal agreement. BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE THE DATA FILES OR SOFTWARE.
    +NOTICE TO USER: Carefully read the following legal agreement.
    +BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
    +DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
    +YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
    +TERMS AND CONDITIONS OF THIS AGREEMENT.
    +IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
    +THE DATA FILES OR SOFTWARE.
     
     COPYRIGHT AND PERMISSION NOTICE
     
    -Copyright © 1991-2016 Unicode, Inc. All rights reserved. Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
    +Copyright © 1991-2022 Unicode, Inc. All rights reserved.
    +Distributed under the Terms of Use in https://www.unicode.org/copyright.html.
     
    -Permission is hereby granted, free of charge, to any person obtaining a copy of the Unicode data files and any associated documentation (the "Data Files") or Unicode software and any associated documentation (the "Software") to deal in the Data Files or Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, and/or sell copies of the Data Files or Software, and to permit persons to whom the Data Files or Software are furnished to do so, provided that either
    -
    -     (a) this copyright and permission notice appear with all copies of the Data Files or Software, or
    -     (b) this copyright and permission notice appear in associated Documentation.
    -
    -THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THE DATA FILES OR SOFTWARE.
    -
    -Except as contained in this notice, the name of a copyright holder shall not be used in advertising or otherwise to promote the sale, use or other dealings in these Data Files or Software without prior written authorization of the copyright holder.
    +Permission is hereby granted, free of charge, to any person obtaining
    +a copy of the Unicode data files and any associated documentation
    +(the "Data Files") or Unicode software and any associated documentation
    +(the "Software") to deal in the Data Files or Software
    +without restriction, including without limitation the rights to use,
    +copy, modify, merge, publish, distribute, and/or sell copies of
    +the Data Files or Software, and to permit persons to whom the Data Files
    +or Software are furnished to do so, provided that either
    +(a) this copyright and permission notice appear with all copies
    +of the Data Files or Software, or
    +(b) this copyright and permission notice appear in associated
    +Documentation.
    +
    +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
    +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
    +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
    +NONINFRINGEMENT OF THIRD PARTY RIGHTS.
    +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
    +NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
    +DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
    +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
    +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
    +PERFORMANCE OF THE DATA FILES OR SOFTWARE.
    +
    +Except as contained in this notice, the name of a copyright holder
    +shall not be used in advertising or otherwise to promote the sale,
    +use or other dealings in these Data Files or Software without prior
    +written authorization of the copyright holder.
     
  • diff --git a/scripts/install.sh b/scripts/install.sh index f250d420a1..0447b4c144 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.53.0" +PACKAGE_VERSION="v1.54.0" download_binary() { downloader --check diff --git a/xtask/Cargo.lock b/xtask/Cargo.lock index 37b00586cd..819a7106ac 100644 --- a/xtask/Cargo.lock +++ b/xtask/Cargo.lock @@ -529,6 +529,17 @@ dependencies = [ "wasi 0.9.0+wasi-snapshot-preview1", ] +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", +] + [[package]] name = "gimli" version = "0.27.3" @@ -1051,7 +1062,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom", + "getrandom 0.1.16", "libc", "rand_chacha", "rand_core", @@ -1074,7 +1085,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom", + "getrandom 0.1.16", ] [[package]] @@ -1184,12 +1195,27 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", - "untrusted", + "spin 0.5.2", + "untrusted 0.7.1", "web-sys", "winapi", ] +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.15", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.52.0", +] + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -1211,12 +1237,12 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.6" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", - "ring", + "ring 0.17.8", "rustls-webpki", "sct", ] @@ -1244,12 +1270,12 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.101.4" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.8", + "untrusted 0.9.0", ] [[package]] @@ -1288,8 +1314,8 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -1440,6 +1466,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + [[package]] name = "strsim" version = "0.11.0" @@ -1703,6 +1735,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "url" version = "2.4.0"