diff --git a/.apigentools-info b/.apigentools-info index e7a274eae570f..67fde5cdb5950 100644 --- a/.apigentools-info +++ b/.apigentools-info @@ -4,13 +4,13 @@ "spec_versions": { "v1": { "apigentools_version": "1.6.6", - "regenerated": "2024-05-03 14:27:52.024982", - "spec_repo_commit": "27a03d2c" + "regenerated": "2024-05-09 08:10:38.432153", + "spec_repo_commit": "f983a01b" }, "v2": { "apigentools_version": "1.6.6", - "regenerated": "2024-05-03 14:27:59.973440", - "spec_repo_commit": "27a03d2c" + "regenerated": "2024-05-09 08:10:46.472041", + "spec_repo_commit": "f983a01b" } } } \ No newline at end of file diff --git a/Makefile b/Makefile index c517cb60e5b99..c98f00009f05b 100644 --- a/Makefile +++ b/Makefile @@ -130,7 +130,7 @@ placeholders: hugpython update_pre_build hugpython: local/etc/requirements3.txt @${PY3} -m venv --clear $@ && . $@/bin/activate && $@/bin/pip install --upgrade pip wheel && $@/bin/pip install -r $<;\ if [[ "$(CI_COMMIT_REF_NAME)" != "" ]]; then \ - $@/bin/pip install https://binaries.ddbuild.io/dd-source/python/assetlib-0.0.28306535-py3-none-any.whl; \ + $@/bin/pip install https://binaries.ddbuild.io/dd-source/python/assetlib-0.0.33893004-py3-none-any.whl; \ fi update_pre_build: hugpython diff --git a/assets/scripts/datadog-docs.js b/assets/scripts/datadog-docs.js index d14dc8b439ea7..9b4be60914f70 100644 --- a/assets/scripts/datadog-docs.js +++ b/assets/scripts/datadog-docs.js @@ -411,11 +411,8 @@ window.addEventListener( 'popstate', function (event) { setMobileNav(); - if (event.state) { - loadPage(window.location.href); - closeNav(); - getPathElement(); - } - }, - false + loadPage(window.location.href); + closeNav(); + getPathElement(); + } ); diff --git a/config/_default/menus/main.en.yaml b/config/_default/menus/main.en.yaml index d0bae423d9ae6..2e61f73b33b86 100644 --- a/config/_default/menus/main.en.yaml +++ b/config/_default/menus/main.en.yaml @@ -901,25 +901,25 @@ menu: parent: dev_tools_service_check identifier: dev_tools_service_check_api weight: 603 - - name: IDE Integrations - url: developers/ide_integrations/ + - name: IDE Plugins + url: developers/ide_plugins/ parent: dev_tools - identifier: ide_integrations + identifier: ide_plugins weight: 7 - name: JetBrains IDEs - url: developers/ide_integrations/idea/ - parent: ide_integrations - identifier: ide_integrations_idea + url: developers/ide_plugins/idea/ + parent: ide_plugins + identifier: ide_plugins_idea weight: 701 - name: VS Code - url: developers/ide_integrations/vscode/ - parent: ide_integrations - identifier: ide_integrations_vscode + url: developers/ide_plugins/vscode/ + parent: ide_plugins + identifier: ide_plugins_vscode weight: 702 - name: Visual Studio - url: developers/ide_integrations/visual_studio/ - parent: ide_integrations - identifier: ide_integrations_visual_studio + url: developers/ide_plugins/visual_studio/ + parent: ide_plugins + identifier: ide_plugins_visual_studio weight: 703 - name: Community url: developers/community/ @@ -1109,11 +1109,21 @@ menu: identifier: cloudcraft_api_budgets_export parent: cloudcraft_api_budgets weight: 1 + - name: Teams + url: cloudcraft/api/teams/ + identifier: cloudcraft_api_teams + parent: cloudcraft_api + weight: 5 + - name: List teams + url: cloudcraft/api/teams/#list-teams + identifier: cloudcraft_api_teams_list + parent: cloudcraft_api_teams + weight: 1 - name: Users url: cloudcraft/api/users/ identifier: cloudcraft_api_users parent: cloudcraft_api - weight: 5 + weight: 6 - name: Get user profile url: cloudcraft/api/users/#get-user-profile identifier: cloudcraft_api_users_info @@ -1344,66 +1354,71 @@ menu: parent: monitor_types identifier: monitor_types_auditlogs weight: 205 + - name: Change + url: monitors/types/change-alert/ + parent: monitor_types + identifier: monitor_types_change + weight: 206 - name: CI url: monitors/types/ci/ parent: monitor_types identifier: monitor_types_ci - weight: 206 + weight: 207 - name: Cloud Cost url: monitors/types/cloud_cost/ parent: monitor_types identifier: monitor_types_cloud_cost - weight: 207 + weight: 208 - name: Composite url: monitors/types/composite/ parent: monitor_types identifier: monitor_types_composite - weight: 208 + weight: 209 - name: Database Monitoring url: monitors/types/database_monitoring/ parent: monitor_types identifier: monitor_types_database_monitoring - weight: 209 + weight: 210 - name: Error Tracking url: monitors/types/error_tracking/ parent: monitor_types identifier: monitor_types_error_tracking - weight: 210 + weight: 211 - name: Event url: monitors/types/event/ parent: monitor_types identifier: monitor_types_event - weight: 211 + weight: 212 - name: Forecast url: monitors/types/forecasts/ parent: monitor_types identifier: monitor_types_forecasts - weight: 212 + weight: 213 - name: Integration url: monitors/types/integration/ parent: monitor_types identifier: monitor_types_integration - weight: 213 + weight: 214 - name: Live Process url: monitors/types/process/ parent: monitor_types identifier: monitor_types_process - weight: 214 + weight: 215 - name: Logs url: monitors/types/log/ parent: monitor_types identifier: monitor_types_log - weight: 215 + weight: 216 - name: Network url: monitors/types/network/ parent: monitor_types identifier: monitor_types_network - weight: 216 + weight: 217 - name: Network Performance url: monitors/types/network_performance/ parent: monitor_types identifier: monitor_types_network_performance - weight: 217 + weight: 218 - name: Outlier url: monitors/types/outlier/ parent: monitor_types @@ -1413,27 +1428,27 @@ menu: url: monitors/types/process_check/ parent: monitor_types identifier: monitor_types_process_check - weight: 219 + weight: 220 - name: Real User Monitoring url: monitors/types/real_user_monitoring/ parent: monitor_types identifier: monitor_types_rum - weight: 220 + weight: 221 - name: Service Check url: monitors/types/service_check/ parent: monitor_types identifier: monitor_types_service_check - weight: 221 + weight: 222 - name: SLO Alerts url: monitors/types/slo/ parent: monitor_types identifier: monitor_types_slo - weight: 222 + weight: 223 - name: Watchdog url: monitors/types/watchdog/ parent: monitor_types identifier: monitor_types_watchdog - weight: 223 + weight: 224 - name: Notifications url: monitors/notify/ parent: alerting @@ -1835,6 +1850,11 @@ menu: url: error_tracking/rum parent: error_tracking identifier: error_tracking_rum + weight: 6 + - name: Troubleshooting + url: error_tracking/troubleshooting + parent: error_tracking + identifier: error_tracking_troubleshooting weight: 7 - name: Service Level Objectives url: service_management/service_level_objectives/ @@ -2963,6 +2983,11 @@ menu: parent: tracing_error_tracking identifier: tracing_error_tracking_exception_replay weight: 1005 + - name: Troubleshooting + url: error_tracking/troubleshooting + parent: tracing_error_tracking + identifier: tracing_error_tracking_troubleshooting + weight: 1006 - name: Data Security url: tracing/configure_data_security/ parent: tracing @@ -4192,6 +4217,11 @@ menu: parent: log_management_error_tracking identifier: log_management_error_tracking_suspect_commits weight: 607 + - name: Troubleshooting + url: error_tracking/troubleshooting + parent: log_management_error_tracking + identifier: log_management_error_tracking_troubleshooting + weight: 609 - name: Guides url: logs/guide/ parent: log_management @@ -5249,6 +5279,11 @@ menu: parent: rum_error_tracking identifier: rum_error_tracking_suspect_commits weight: 807 + - name: Troubleshooting + url: error_tracking/troubleshooting + parent: rum_error_tracking + identifier: rum_error_tracking_troubleshooting + weight: 808 - name: Guides url: real_user_monitoring/guide/ parent: rum diff --git a/config/_default/menus/main.fr.yaml b/config/_default/menus/main.fr.yaml index ae0fa696bffc2..e7d192aed8061 100644 --- a/config/_default/menus/main.fr.yaml +++ b/config/_default/menus/main.fr.yaml @@ -876,25 +876,25 @@ menu: parent: dev_tools_service_check identifier: dev_tools_service_check_api weight: 603 - - name: IDE Integrations - url: developers/ide_integrations/ + - name: IDE Plugins + url: developers/ide_plugins/ parent: dev_tools - identifier: ide_integrations + identifier: ide_plugins weight: 7 - name: IntelliJ IDEA - url: developers/ide_integrations/idea/ - parent: ide_integrations - identifier: ide_integrations_idea + url: developers/ide_plugins/idea/ + parent: ide_plugins + identifier: ide_plugins_idea weight: 701 - name: VS Code - url: developers/ide_integrations/vscode/ - parent: ide_integrations - identifier: ide_integrations_vscode + url: developers/ide_plugins/vscode/ + parent: ide_plugins + identifier: ide_plugins_vscode weight: 702 - name: Visual Studio - url: developers/ide_integrations/visual_studio/ - parent: ide_integrations - identifier: ide_integrations_visual_studio + url: developers/ide_plugins/visual_studio/ + parent: ide_plugins + identifier: ide_plugins_visual_studio weight: 703 - name: Communauté url: developers/community/ diff --git a/config/_default/menus/main.ja.yaml b/config/_default/menus/main.ja.yaml index c417921b907c9..0c72be0a834e4 100644 --- a/config/_default/menus/main.ja.yaml +++ b/config/_default/menus/main.ja.yaml @@ -877,24 +877,24 @@ menu: identifier: dev_tools_service_check_api weight: 603 - name: IDE インテグレーション - url: developers/ide_integrations/ + url: developers/ide_plugins/ parent: dev_tools - identifier: ide_integrations + identifier: ide_plugins weight: 7 - name: IntelliJ IDEA - url: developers/ide_integrations/idea/ - parent: ide_integrations - identifier: ide_integrations_idea + url: developers/ide_plugins/idea/ + parent: ide_plugins + identifier: ide_plugins_idea weight: 701 - name: VS Code - url: developers/ide_integrations/vscode/ - parent: ide_integrations - identifier: ide_integrations_vscode + url: developers/ide_plugins/vscode/ + parent: ide_plugins + identifier: ide_plugins_vscode weight: 702 - name: Visual Studio - url: developers/ide_integrations/visual_studio/ - parent: ide_integrations - identifier: ide_integrations_visual_studio + url: developers/ide_plugins/visual_studio/ + parent: ide_plugins + identifier: ide_plugins_visual_studio weight: 703 - name: コミュニティ url: developers/community/ diff --git a/config/_default/menus/main.ko.yaml b/config/_default/menus/main.ko.yaml index a98155f4b27dd..9b95036a17021 100644 --- a/config/_default/menus/main.ko.yaml +++ b/config/_default/menus/main.ko.yaml @@ -896,25 +896,25 @@ menu: parent: dev_tools_service_check url: api/v1/service-checks/ weight: 603 - - identifier: ide_integrations + - identifier: ide_plugins name: IDE 통합 parent: dev_tools - url: developers/ide_integrations/ + url: developers/ide_plugins/ weight: 7 - - identifier: ide_integrations_idea + - identifier: ide_plugins_idea name: JetBrains IDE - parent: ide_integrations - url: developers/ide_integrations/idea/ + parent: ide_plugins + url: developers/ide_plugins/idea/ weight: 701 - - identifier: ide_integrations_vscode + - identifier: ide_plugins_vscode name: VS 코드 - parent: ide_integrations - url: developers/ide_integrations/vscode/ + parent: ide_plugins + url: developers/ide_plugins/vscode/ weight: 702 - - identifier: ide_integrations_visual_studio + - identifier: ide_plugins_visual_studio name: Visual Studio - parent: ide_integrations - url: developers/ide_integrations/visual_studio/ + parent: ide_plugins + url: developers/ide_plugins/visual_studio/ weight: 703 - identifier: dev_community name: 커뮤니티 diff --git a/content/en/account_management/org_settings/service_accounts.md b/content/en/account_management/org_settings/service_accounts.md index d55eafae66825..1a9edf2bdcbfe 100644 --- a/content/en/account_management/org_settings/service_accounts.md +++ b/content/en/account_management/org_settings/service_accounts.md @@ -57,7 +57,9 @@ To modify a service account, click on one in the service accounts list. 2. Update any fields you would like to change. You can edit the name, email address, status, and roles. 3. Click **Save**. -To disable a service account, follow the procedure above to edit the service account and set the status to **Disabled**. +To disable a service account, the user must have the User Manage Access permission in addition to Service Account Write. + +To disable a service account, follow the previous procedure to edit the service account and set the status to **Disabled**. ### Create or revoke application keys diff --git a/content/en/account_management/scim/_index.md b/content/en/account_management/scim/_index.md index 68e84e26a6e80..8ad5ee461dd25 100644 --- a/content/en/account_management/scim/_index.md +++ b/content/en/account_management/scim/_index.md @@ -47,7 +47,7 @@ To avoid losing access to your data, Datadog strongly recommends that you create ## Email verification -Creating a new user with SCIM triggers an email to the user. For first time access, you are required to log in through the the invite link shared by email. The link is active for 30 days. If it expires, go to the [user settings page][7] and select a user to resend an invite link. +Creating a new user with SCIM triggers an email to the user. For first time access, you are required to log in through the the invite link shared by email. The link is active for 2 days. If it expires, go to the [user settings page][7] and select a user to resend an invite link. ## Further Reading diff --git a/content/en/agent/logs/_index.md b/content/en/agent/logs/_index.md index 32b68e3221dc5..22fcb4bf5474c 100644 --- a/content/en/agent/logs/_index.md +++ b/content/en/agent/logs/_index.md @@ -22,15 +22,17 @@ further_reading: Log collection requires the Datadog Agent v6.0+. Older versions of the Agent do not include the `log collection` interface. If you are not using the Agent already, follow the [Agent installation instructions][1]. +See [Observability Pipelines][2] if you want to send logs using another vendor's collector or forwarder, or you want to preprocess your log data within your environment before shipping. + ## Activate log collection -Collecting logs is **not enabled** by default in the Datadog Agent. If you are running the Agent in a Kubernetes or Docker environment, see the dedicated [Kubernetes Log Collection][2] or [Docker Log Collection][3] documentation. +Collecting logs is **not enabled** by default in the Datadog Agent. If you are running the Agent in a Kubernetes or Docker environment, see the dedicated [Kubernetes Log Collection][3] or [Docker Log Collection][4] documentation. -To enable log collection with an Agent running on your host, change `logs_enabled: false` to `logs_enabled: true` in the Agent's [main configuration file][4] (`datadog.yaml`). +To enable log collection with an Agent running on your host, change `logs_enabled: false` to `logs_enabled: true` in the Agent's [main configuration file][5] (`datadog.yaml`). {{< agent-config type="log collection configuration" filename="datadog.yaml" collapsible="true">}} -Starting with Agent v6.19+/v7.19+, HTTPS transport is the default transport used. For more details on how to enforce HTTPS/TCP transport, refer to the [Agent transport documentation][5]. +Starting with Agent v6.19+/v7.19+, HTTPS transport is the default transport used. For more details on how to enforce HTTPS/TCP transport, refer to the [Agent transport documentation][6]. To send logs with environment variables, configure the following: @@ -42,13 +44,13 @@ After activating log collection, the Agent is ready to forward logs to Datadog. Datadog Agent v6 can collect logs and forward them to Datadog from files, the network (TCP or UDP), journald, and Windows channels: -1. In the `conf.d/` directory at the root of your [Agent's configuration directory][4], create a new `.d/` folder that is accessible by the Datadog user. +1. In the `conf.d/` directory at the root of your [Agent's configuration directory][5], create a new `.d/` folder that is accessible by the Datadog user. 2. Create a new `conf.yaml` file in this new folder. 3. Add a custom log collection configuration group with the parameters below. -4. [Restart your Agent][6] to take into account this new configuration. -5. Run the [Agent's status subcommand][7] and look for `` under the Checks section. +4. [Restart your Agent][7] to take into account this new configuration. +5. Run the [Agent's status subcommand][8] and look for `` under the Checks section. -If there are permission errors, see [Permission issues tailing log files][12] to troubleshoot. +If there are permission errors, see [Permission issues tailing log files][9] to troubleshoot. Below are examples of custom log collection setup: @@ -156,29 +158,30 @@ List of all available parameters for log collection: | `port` | Yes | If `type` is **tcp** or **udp**, set the port for listening to logs. | | `path` | Yes | If `type` is **file** or **journald**, set the file path for gathering logs. | | `channel_path` | Yes | If `type` is **windows_event**, list the Windows event channels for collecting logs. | -| `service` | Yes | The name of the service owning the log. If you instrumented your service with [Datadog APM][8], this must be the same service name. Check the [unified service tagging][9] instructions when configuring `service` across multiple data types. | -| `source` | Yes | The attribute that defines which integration is sending the logs. If the logs do not come from an existing integration, then this field may include a custom source name. However, it is recommended that you match this value to the namespace of any related [custom metrics][10] you are collecting, for example: `myapp` from `myapp.request.count`. | +| `service` | Yes | The name of the service owning the log. If you instrumented your service with [Datadog APM][10], this must be the same service name. Check the [unified service tagging][11] instructions when configuring `service` across multiple data types. | +| `source` | Yes | The attribute that defines which integration is sending the logs. If the logs do not come from an existing integration, then this field may include a custom source name. However, it is recommended that you match this value to the namespace of any related [custom metrics][12] you are collecting, for example: `myapp` from `myapp.request.count`. | | `include_units` | No | If `type` is **journald**, list of the specific journald units to include. | | `exclude_paths` | No | If `type` is **file**, and `path` contains a wildcard character, list the matching file or files to exclude from log collection. This is available for Agent version >= 6.18. | | `exclude_units` | No | If `type` is **journald**, list of the specific journald units to exclude. | | `sourcecategory` | No | The attribute used to define the category a source attribute belongs to, for example: `source:postgres, sourcecategory:database` or `source: apache, sourcecategory: http_web_access`. | | `start_position` | No | If `type` is **file**, set the position for the Agent to start reading the file. Valid values are `beginning` and `end` (default: `end`). If `path` contains a wildcard character, `beginning` is not supported. _Added in Agent v6.19/v7.19_

If `type` is **journald**, set the position for the Agent to start reading the journal. Valid values are `beginning`, `end`, `forceBeginning`, and `forceEnd` (default: `end`). With `force` options, the Agent ignores the cursor stored on disk and always reads from the beginning or the end of the journal when it starts. _Added in Agent v7.38_ | | `encoding` | No | If `type` is **file**, set the encoding for the Agent to read the file. Set it to `utf-16-le` for UTF-16 little-endian, `utf-16-be` for UTF-16 big-endian, or `shift-jis` for Shift JIS. If set to any other value, the Agent reads the file as UTF-8. _Added `utf-16-le` and `utf-16be` in Agent v6.23/v7.23, `shift-jis` in Agent v6.34/v7.34_ | -| `tags` | No | A list of tags added to each log collected ([learn more about tagging][11]). | +| `tags` | No | A list of tags added to each log collected ([learn more about tagging][13]). | ## Further Reading {{< partial name="whats-next/whats-next.html" >}} [1]: https://app.datadoghq.com/account/settings/agent/latest -[2]: /agent/kubernetes/log/ -[3]: /agent/docker/log/ -[4]: /agent/configuration/agent-configuration-files/ -[5]: /agent/logs/log_transport/ -[6]: /agent/configuration/agent-commands/#restart-the-agent -[7]: /agent/configuration/agent-commands/#agent-status-and-information -[8]: /tracing/ -[9]: /getting_started/tagging/unified_service_tagging -[10]: /metrics/custom_metrics/#overview -[11]: /getting_started/tagging/ -[12]: /logs/guide/log-collection-troubleshooting-guide/#permission-issues-tailing-log-files +[2]: https://docs.datadoghq.com/observability_pipelines/ +[3]: /agent/kubernetes/log/ +[4]: /agent/docker/log/ +[5]: /agent/configuration/agent-configuration-files/ +[6]: /agent/logs/log_transport/ +[7]: /agent/configuration/agent-commands/#restart-the-agent +[8]: /agent/configuration/agent-commands/#agent-status-and-information +[9]: /logs/guide/log-collection-troubleshooting-guide/#permission-issues-tailing-log-files' +[10]: /tracing/ +[11]: /getting_started/tagging/unified_service_tagging +[12]: /metrics/custom_metrics/#overview +[13]: /getting_started/tagging/ diff --git a/content/en/cloudcraft/api/teams/_index.md b/content/en/cloudcraft/api/teams/_index.md new file mode 100644 index 0000000000000..a4a7131764434 --- /dev/null +++ b/content/en/cloudcraft/api/teams/_index.md @@ -0,0 +1,5 @@ +--- +title: Teams +--- + +{{< openapi-ref-docs url="cloudcraft.json" tag="Teams">}} diff --git a/content/en/code_analysis/static_analysis/_index.md b/content/en/code_analysis/static_analysis/_index.md index 24b82c4cf7a81..2dd260547d362 100644 --- a/content/en/code_analysis/static_analysis/_index.md +++ b/content/en/code_analysis/static_analysis/_index.md @@ -65,8 +65,9 @@ Static Analysis currently supports scanning the following languages and technolo ### IDEs {{< whatsnext desc="With Static Analysis, you can identify code vulnerabilities as you edit a file in your Integrated Development Environment (IDE). See the documentation for information about the following integrations:">}} - {{< nextlink href="developers/ide_integrations/idea/" >}}Datadog Plugin for IntelliJ IDEA{{< /nextlink >}} - {{< nextlink href="developers/ide_integrations/vscode/" >}}Datadog Extension for Visual Studio Code{{< /nextlink >}} + {{< nextlink href="developers/ide_plugins/idea/" >}}Datadog Plugin for JetBrains IDEs{{< /nextlink >}} + {{< nextlink href="developers/ide_plugins/vscode/" >}}Datadog Extension for Visual Studio Code{{< /nextlink >}} + {{< nextlink href="developers/ide_plugins/visual_studio/" >}}Datadog Extension for Visual Studio{{< /nextlink >}} {{< /whatsnext >}} ## Search and filter results diff --git a/content/en/code_analysis/static_analysis/setup.md b/content/en/code_analysis/static_analysis/setup.md index 76451046496e9..f87e977de04b2 100644 --- a/content/en/code_analysis/static_analysis/setup.md +++ b/content/en/code_analysis/static_analysis/setup.md @@ -207,7 +207,7 @@ To upload a SARIF report: [1]: https://app.datadoghq.com/ci/setup/code-analysis [2]: https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=sarif -[3]: /developers/ide_integrations/idea/#static-analysis +[3]: /developers/ide_plugins/idea/#static-analysis [4]: /account_management/api-app-keys/ [6]: /code_analysis/static_analysis_rules [7]: /getting_started/site/ diff --git a/content/en/code_analysis/static_analysis_rules/_index.md b/content/en/code_analysis/static_analysis_rules/_index.md index ae05d4735097c..dd193832f8625 100644 --- a/content/en/code_analysis/static_analysis_rules/_index.md +++ b/content/en/code_analysis/static_analysis_rules/_index.md @@ -193,7 +193,7 @@ cascade: title: How to use this rule steps: - Create a static-analysis.datadog.yml with the content above at the root of your repository - - Use our free IDE integrations or add Code Analysis scans to your CI pipelines + - Use our free IDE Plugins or add Code Analysis scans to your CI pipelines - Get feedback on your code footer: For more information, please read the Code Analysis documentation bottom_boxes: diff --git a/content/en/continuous_integration/pipelines/_index.md b/content/en/continuous_integration/pipelines/_index.md index a222ddeadd745..6cb8f78996f45 100644 --- a/content/en/continuous_integration/pipelines/_index.md +++ b/content/en/continuous_integration/pipelines/_index.md @@ -145,8 +145,8 @@ If your CI provider is not supported, you can try setting up Pipeline Visibility | {{< ci-details title="Running pipelines" >}}Identification of pipelines executions that are running with associated tracing.{{< /ci-details >}} | | {{< X >}} | | | {{< X >}} | | | | {{< X >}} | | {{< ci-details title="Partial retries" >}}Identification of partial retries (for example, when only a subset of jobs were retried).{{< /ci-details >}} | | {{< X >}} | | {{< X >}} | {{< X >}} | {{< X >}} | {{< X >}} | {{< X >}} | {{< X >}} | {{< X >}} | | {{< ci-details title="Step spans" >}}Step level spans are available for more granular visibility.{{< /ci-details >}} | {{< X >}} (_But are presented as job spans_) | | | | {{< X >}} | | {{< X >}} | | | {{< X >}} | -| {{< ci-details title="Manual steps" >}}Identification of when there is a job with a manual approval phase in the overall pipeline.{{< /ci-details >}} | {{< X >}} | {{< X >}} | | {{< X >}} | {{< X >}} | {{< X >}} | {{< X >}} | | | {{< X >}} | -| {{< ci-details title="Approval wait time">}}Identification of the amount of time for which a pipeline or job has been waiting for a manual approval.{{< /ci-details >}} | | | | | {{< X >}} | {{< X >}} | | | | | +| {{< ci-details title="Manual steps" >}}Identification of when there is a job with a manual approval phase in the overall pipeline.{{< /ci-details >}} | {{< X >}} | {{< X >}} | | {{< X >}} | {{< X >}} | {{< X >}} | {{< X >}} | | {{< X >}} | {{< X >}} | +| {{< ci-details title="Approval wait time">}}Identification of the amount of time for which a pipeline or job has been waiting for a manual approval.{{< /ci-details >}} | | | | | {{< X >}} | {{< X >}} | | | {{< X >}} | | | {{< ci-details title="Queue time" >}}Identification of the amount of time for which a pipeline or job was in the queue before execution.{{< /ci-details >}} | {{< X >}} | {{< X >}} | | {{< X >}} | {{< X >}} | | {{< X >}} | {{< X >}} | | {{< X >}} | | {{< ci-details title="Logs correlation" >}}Retrieval of pipeline or job logs from the CI provider. Logs are displayed on the Logs tab in the Pipeline Execution view.{{< /ci-details >}} | {{< X >}} | {{< X >}} | {{< X >}} | | {{< X >}} | | | | {{< X >}} | | | {{< ci-details title="Infrastructure metric correlation" >}}Correlation of host-level information for the Datadog Agent, CI pipelines, or job runners to CI pipeline execution data.{{< /ci-details >}} | {{< X >}} | {{< X >}} | | {{< X >}} | {{< X >}} | | | | | | diff --git a/content/en/continuous_integration/pipelines/awscodepipeline.md b/content/en/continuous_integration/pipelines/awscodepipeline.md index 688ec0068a30a..231336d7ff08e 100644 --- a/content/en/continuous_integration/pipelines/awscodepipeline.md +++ b/content/en/continuous_integration/pipelines/awscodepipeline.md @@ -32,6 +32,7 @@ Set up tracing on AWS CodePipeline to collect data about pipeline executions, an | [Partial retries][14] | Partial pipelines | View partially retried pipeline executions. | | *[Running pipelines][15] | Running pipelines | View pipeline executions that are running. | | **Logs correlation | Logs correlation | Correlate pipeline and job spans to logs and enable [job log correlation](#enable-log-correlation). | +| [Approval wait time][17] | Approval wait time | View the amount of time jobs and pipelines wait for manual approvals. | *AWS CodePipeline running pipelines don't have Git information until they have finished.\ **AWS CodePipeline logs correlation is only available for AWS CodeBuild actions. @@ -145,3 +146,4 @@ The **CI Pipeline List** page shows data for only the [default branch][13] of ea [14]: /glossary/#partial-retry [15]: /glossary/#running-pipeline [16]: /logs/guide/send-aws-services-logs-with-the-datadog-lambda-function +[17]: /glossary/#approval-wait-time diff --git a/content/en/dashboards/scheduled_reports.md b/content/en/dashboards/scheduled_reports.md index 5e929657eb59b..7c7b345b0d562 100644 --- a/content/en/dashboards/scheduled_reports.md +++ b/content/en/dashboards/scheduled_reports.md @@ -67,21 +67,21 @@ From the configuration modal that opens, you can pause an existing report or cre ## Permissions Users need the **Dashboards Report Write** [permission][2] to create and edit report schedules. -This permission can be granted by another user with the **User Access Manage** permission, and is available by default to users with the **Datadog Admin** [out-of-the-box role][3]. +This permission can be granted by another user with the **User Access Manage** permission. {{< img src="dashboards/scheduled_reports/dashboard_permissions.png" alt="A screenshot of an individual user's permissions from within the organization settings page. The dashboards report write permission is highlighted under the dashboards section" style="width:90%;" >}} -Users with the **Org Management** permission can enable or disable the scheduled reports feature for their organization from the **Settings** tab under [Public Sharing][4] in **Organization Settings**. +Users with the **Org Management** permission can enable or disable the scheduled reports feature for their organization from the **Settings** tab under [Public Sharing][3] in **Organization Settings**. {{< img src="dashboards/scheduled_reports/report_management.png" alt="The Report Management setting under the Settings tab in Public Sharing within Organization Settings in Datadog with the setting Enabled" style="width:90%;" >}} ## Unsupported widget types The following widget types are **not** supported and will be shown as empty in the report: -- [Iframe][5] -- [Image][6] -- [Hostmap][7] -- [Run Workflow][8] +- [Iframe][4] +- [Image][5] +- [Hostmap][6] +- [Run Workflow][7] ## Further Reading @@ -89,10 +89,9 @@ The following widget types are **not** supported and will be shown as empty in t [1]: /dashboards/#get-started [2]: /account_management/rbac/permissions/ -[3]: /account_management/rbac/permissions/#out-of-the-box-roles -[4]: /account_management/org_settings/#public-sharing -[5]: /dashboards/widgets/iframe/ -[6]: /dashboards/widgets/image/ -[7]: /dashboards/widgets/hostmap/ -[8]: /dashboards/widgets/run_workflow/ +[3]: /account_management/org_settings/#public-sharing +[4]: /dashboards/widgets/iframe/ +[5]: /dashboards/widgets/image/ +[6]: /dashboards/widgets/hostmap/ +[7]: /dashboards/widgets/run_workflow/ diff --git a/content/en/dashboards/widgets/change.md b/content/en/dashboards/widgets/change.md index 91559525edaa1..2e9501d912202 100644 --- a/content/en/dashboards/widgets/change.md +++ b/content/en/dashboards/widgets/change.md @@ -66,4 +66,4 @@ This widget can be used with the **[Dashboards API][2]**. See the following tabl [2]: /api/latest/dashboards/ [3]: /dashboards/graphing_json/widget_json/ [6]: /monitors/types/metric/?tab=change -[7]: /monitors/guide/change-alert/ \ No newline at end of file +[7]: /monitors/types/change-alert/ \ No newline at end of file diff --git a/content/en/data_jobs/kubernetes.md b/content/en/data_jobs/kubernetes.md index 589745c42650d..b552a2ca07cef 100644 --- a/content/en/data_jobs/kubernetes.md +++ b/content/en/data_jobs/kubernetes.md @@ -53,15 +53,15 @@ You can install the Datadog Agent using the [Datadog Operator][3] or [Helm][4]. metadata: name: datadog spec: - features: - apm: - enabled: true - hostPortConfig: - enabled: true - hostPort: 8126 - admissionController: - enabled: true - mutateUnlabelled: false + features: + apm: + enabled: true + hostPortConfig: + enabled: true + hostPort: 8126 + admissionController: + enabled: true + mutateUnlabelled: false global: tags: - 'data_workload_monitoring_trial:true' diff --git a/content/en/database_monitoring/setup_mysql/aurora.md b/content/en/database_monitoring/setup_mysql/aurora.md index 8c206677675e4..478272d5d1289 100644 --- a/content/en/database_monitoring/setup_mysql/aurora.md +++ b/content/en/database_monitoring/setup_mysql/aurora.md @@ -350,7 +350,7 @@ To avoid exposing the `datadog` user's password in plain text, use the Agent's [ ## Install the RDS Integration -To collect more comprehensive database metrics from AWS, install the [RDS integration][8] (optional). +To see infrastructure metrics from AWS, such as CPU, alongside the database telemetry in DBM, install the [RDS integration][8] (optional). ## Troubleshooting diff --git a/content/en/database_monitoring/setup_mysql/rds.md b/content/en/database_monitoring/setup_mysql/rds.md index 04f06f728c623..6d4667b9e0572 100644 --- a/content/en/database_monitoring/setup_mysql/rds.md +++ b/content/en/database_monitoring/setup_mysql/rds.md @@ -342,7 +342,7 @@ To avoid exposing the `datadog` user's password in plain text, use the Agent's [ ## Install the RDS Integration -To collect more comprehensive database metrics from AWS, install the [RDS integration][8] (optional). +To see infrastructure metrics from AWS, such as CPU, alongside the database telemetry in DBM, install the [RDS integration][8] (optional). ## Troubleshooting diff --git a/content/en/developers/ide_integrations/_index.md b/content/en/developers/ide_integrations/_index.md deleted file mode 100644 index 1064a242edc66..0000000000000 --- a/content/en/developers/ide_integrations/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: IDE Integrations -kind: Documentation -description: Learn how to set up Datadog IDE integrations. -disable_toc: false ---- - -## Overview - -Use Datadog integrations in your preferred integrated development environment (IDE) to interact with Datadog services as you code. - -{{< whatsnext desc="See the documentation for information about the following integrations:">}} - {{< nextlink href="developers/ide_integrations/idea/" >}}JetBrains IDEs: The Datadog plugin for IntelliJ IDEA, GoLand, PhpStorm, and PyCharm.{{< /nextlink >}} - {{< nextlink href="developers/ide_integrations/vscode/" >}}Visual Studio Code: The Datadog extension for VS Code.{{< /nextlink >}} - {{< nextlink href="developers/ide_integrations/visual_studio/" >}}Visual Studio: The Datadog extension for .NET developers.{{< /nextlink >}} -{{< /whatsnext >}} \ No newline at end of file diff --git a/content/en/developers/ide_plugins/_index.md b/content/en/developers/ide_plugins/_index.md new file mode 100644 index 0000000000000..79fbb81d1bcc9 --- /dev/null +++ b/content/en/developers/ide_plugins/_index.md @@ -0,0 +1,18 @@ +--- +title: Datadog IDE Plugins +kind: Documentation +description: Learn how to set up Datadog IDE plugins. +disable_toc: false +aliases: +- '/developers/ide_integrations/' +--- + +## Overview + +Use Datadog plugins in your preferred integrated development environment (IDE) to interact with Datadog services as you code. + +{{< whatsnext desc="See the documentation for information about the following integrations:">}} + {{< nextlink href="developers/ide_plugins/idea/" >}}JetBrains IDEs: The Datadog plugin for IntelliJ IDEA, GoLand, PhpStorm, and PyCharm.{{< /nextlink >}} + {{< nextlink href="developers/ide_plugins/vscode/" >}}Visual Studio Code: The Datadog extension for VS Code.{{< /nextlink >}} + {{< nextlink href="developers/ide_plugins/visual_studio/" >}}Visual Studio: The Datadog extension for .NET developers.{{< /nextlink >}} +{{< /whatsnext >}} \ No newline at end of file diff --git a/content/en/developers/ide_integrations/idea/_index.md b/content/en/developers/ide_plugins/idea/_index.md similarity index 84% rename from content/en/developers/ide_integrations/idea/_index.md rename to content/en/developers/ide_plugins/idea/_index.md index 9ff67360ff38c..3ade31735047d 100644 --- a/content/en/developers/ide_integrations/idea/_index.md +++ b/content/en/developers/ide_plugins/idea/_index.md @@ -3,6 +3,8 @@ title: Datadog Plugin for JetBrains IDEs kind: documentation disable_toc: false is_beta: true +aliases: +- '/developers/ide_integrations/idea/' further_reading: - link: "/getting_started/profiler/" tag: "Documentation" @@ -25,7 +27,7 @@ further_reading: The Datadog plugin for JetBrains IDEs is available for IDEA, GoLand, PhpStorm, and PyCharm. It helps you improve software performance by providing meaningful code-level insights directly in the IDE based on real-time observability data. -{{< img src="/developers/ide_integrations/idea/overview1.png" alt="The Datadog tool window open in IDEA" style="width:100%;" >}} +{{< img src="/developers/ide_plugins/idea/overview1.png" alt="The Datadog tool window open in IDEA" style="width:100%;" >}} The **Code Insights** view keeps you informed about: - Issues from [Error Tracking][6] @@ -59,7 +61,7 @@ The **Static Analysis** integration analyzes your code (locally) against predefi 1. If you receive a prompt notifying you that Datadog is a third-party plugin, click **Accept**. 1. Click **Restart IDE**. -{{< img src="/developers/ide_integrations/idea/install-plugin.png" alt="The Datadog plugin" style="width:100%;" >}} +{{< img src="/developers/ide_plugins/idea/install-plugin.png" alt="The Datadog plugin" style="width:100%;" >}} Alternatively, you can install the plugin from the [JetBrains Marketplace][4]. @@ -88,7 +90,7 @@ To remove a service, select it in the **Services** table and click the minus ico ## Code Insights The **Code Insights** tab displays insights generated by the Datadog platform that are relevant to your current project. The insights are grouped into three categories: performance, reliability, and security. -{{< img src="/developers/ide_integrations/idea/code-insights.png" alt="The Code Insights tab." style="width:100%;" >}} +{{< img src="/developers/ide_plugins/idea/code-insights.png" alt="The Code Insights tab." style="width:100%;" >}} Code Insights include a detailed description for each issue, and links to: - The related source code location @@ -113,7 +115,7 @@ The available profiling types usually include options like **CPU Time** and **Al The **Top List** sub-tab shows the methods that consume the most resources based on the aggregated profile data loaded from the Datadog servers. These are the methods that are most likely candidates for optimization. -{{< img src="/developers/ide_integrations/idea/top-list1.png" alt="The Top-list view" style="width:100%;" >}} +{{< img src="/developers/ide_plugins/idea/top-list1.png" alt="The Top-list view" style="width:100%;" >}} - Double-clicking an item in the list (or selecting **Jump to Source** from the context menu) opens a source code editor showing where the method is defined. - To see a flame graph visualization of a method, select **Search in Flame Graph** from the context menu. @@ -130,7 +132,7 @@ Right-click on a method in the call tree to see options to navigate to the sourc A flame graph is a visualization of profiling samples that shows stack traces and their relative frequency during the sample period. The Datadog plugin collects multiple individual profiles from the requested time frame, and aggregates them. Each individual profile covers a 60 second interval within the requested time frame. -{{< img src="/developers/ide_integrations/idea/flamegraph1.png" alt="A flame graph showing CPU Time over the past hour" style="width:100%;" >}} +{{< img src="/developers/ide_plugins/idea/flamegraph1.png" alt="A flame graph showing CPU Time over the past hour" style="width:100%;" >}} Each time you change the profile type, the time frame, or the environment, the Datadog plugin generates a new flame graph. @@ -146,33 +148,33 @@ Hovering over a method displays a tooltip with the following information: Profiling samples include stack trace and line number information. Use the **Separate Flame Graph by** button to switch between separating frames by method or line number. -{{< img src="/developers/ide_integrations/idea/separate-flamegraph-by.png" alt="Use the tooltip button to separate frames by method or line number" style="width:40%;" >}} +{{< img src="/developers/ide_plugins/idea/separate-flamegraph-by.png" alt="Use the tooltip button to separate frames by method or line number" style="width:40%;" >}} ### Source highlighting When the Continuous Profiler tab is active, the plugin adds code highlights to the source code editor margin. For Top Methods, an icon appears in the editor margin, and line-level highlights appear in the code based on the active Profiling data. - Hover over the icon to see more information. - Click the icon to open the top list Profiling tab or open Profiling in Datadog. - {{< img src="/developers/ide_integrations/idea/interest-options.png" alt="Click the Datadog icon to open the Profiling data in a tab or in Datadog" style="width:100%;" >}} + {{< img src="/developers/ide_plugins/idea/interest-options.png" alt="Click the Datadog icon to open the Profiling data in a tab or in Datadog" style="width:100%;" >}} The active Profiling tab also affects the project tree view, which is annotated with the selected profile's metrics: -{{< img src="/developers/ide_integrations/idea/project-tree-view.png" alt="The project tree annotated with profile metrics from a profile tab" style="width:60%;" >}} +{{< img src="/developers/ide_plugins/idea/project-tree-view.png" alt="The project tree annotated with profile metrics from a profile tab" style="width:60%;" >}} ## Logs insights Log patterns from Datadog are matched directly to lines of code in your editor for your Java, Go, and Python source files: -{{< img src="/developers/ide_integrations/idea/log-patterns.png" alt="A log line showing log events from Datadog" style="width:100%;" >}} +{{< img src="/developers/ide_plugins/idea/log-patterns.png" alt="A log line showing log events from Datadog" style="width:100%;" >}} A popup shows runtime values from the log entries: -{{< img src="/developers/ide_integrations/idea/log-patterns-popup.png" alt="A popup showing log patterns from Datadog" style="width:100%;" >}} +{{< img src="/developers/ide_plugins/idea/log-patterns-popup.png" alt="A popup showing log patterns from Datadog" style="width:100%;" >}} Click the log icon to open the [Log Explorer][5] on the Datadog platform with a pre-filled query that matches the logger name, log level, and log message as closely as possible: -{{< img src="/developers/ide_integrations/idea/log-explorer-link.png" alt="A source file showing a View Logs icon and link." style="width:100%;" >}} +{{< img src="/developers/ide_plugins/idea/log-explorer-link.png" alt="A source file showing a View Logs icon and link." style="width:100%;" >}} ## CI Test Runs You can view recent test runs in the [Continuous Integration Visibility Explorer][12] by navigating directly from your source files. Look for the **View Test Runs** links following test method declarations in your source code: -{{< img src="/developers/ide_integrations/idea/ci-navigation.png" alt="A source file showing a View Test Runs link." style="width:100%;" >}} +{{< img src="/developers/ide_plugins/idea/ci-navigation.png" alt="A source file showing a View Test Runs link." style="width:100%;" >}} Clicking the link opens the **Test Runs** tab showing the recent history for one test case. @@ -180,7 +182,7 @@ Clicking the link opens the **Test Runs** tab showing the recent history for one The **View in IntelliJ/GoLand/PyCharm** feature provides a link from the Datadog platform directly to your Java, Go, and Python source files. Look for the button next to frames in stack traces displayed on the platform (for example, in [Error Tracking][6]): -{{< img src="/developers/ide_integrations/idea/view-in-idea.png" alt="A stack trace on the Datadog platform showing the View in IntelliJ button." style="width:100%;" >}} +{{< img src="/developers/ide_plugins/idea/view-in-idea.png" alt="A stack trace on the Datadog platform showing the View in IntelliJ button." style="width:100%;" >}}
This feature has two prerequisites: (1) Source Code Integration is configured for your service and (2) the JetBrains Toolbox is installed on your development machine.
@@ -189,14 +191,14 @@ The Datadog plugin runs [Static Analysis][13] rules on your source files as you Static Analysis supports scanning for many programming languages. For a complete list, see [Static Analysis Rules][14]. For file types belonging to supported languages, issues are shown in the source code editor with the JetBrains inspection system, and suggested fixes can be applied directly: -{{< img src="/developers/ide_integrations/idea/static-analysis-issue.png" alt="A static analysis rule violation and recommended fix." style="width:100%;" >}} +{{< img src="/developers/ide_plugins/idea/static-analysis-issue.png" alt="A static analysis rule violation and recommended fix." style="width:100%;" >}} Additionally, all issues detected by this feature are listed in the standard **Problems** view. ### Getting started When you start editing a source file supported by Static Analysis, the plugin checks for `static-analysis.datadog.yml` at your source repository's root. It prompts you to create the file if necessary: -{{< img src="/developers/ide_integrations/idea/static-analysis-onboard.png" alt="A banner for onboarding." style="width:100%;" >}} +{{< img src="/developers/ide_plugins/idea/static-analysis-onboard.png" alt="A banner for onboarding." style="width:100%;" >}} Once the configuration file is created, the static analyzer runs automatically in the background. diff --git a/content/en/developers/ide_integrations/visual_studio/_index.md b/content/en/developers/ide_plugins/visual_studio/_index.md similarity index 82% rename from content/en/developers/ide_integrations/visual_studio/_index.md rename to content/en/developers/ide_plugins/visual_studio/_index.md index 478e390df6006..cc334f68d3051 100644 --- a/content/en/developers/ide_integrations/visual_studio/_index.md +++ b/content/en/developers/ide_plugins/visual_studio/_index.md @@ -2,6 +2,8 @@ title: Datadog Extension for Visual Studio kind: documentation is_beta: true +aliases: +- '/developers/ide_integrations/visual_studio/' further_reading: - link: "/getting_started/profiler/" tag: "Documentation" @@ -21,25 +23,25 @@ further_reading: The Datadog extension for Visual Studio helps you find and fix bugs, security issues, and performance bottlenecks based on real-time observability data from your services and runtime environments. -{{< img src="/developers/ide_integrations/visual_studio/datadog-for-visual-studio.png" alt="Datadog extension for Visual Studio">}} +{{< img src="/developers/ide_plugins/visual_studio/datadog-for-visual-studio.png" alt="Datadog extension for Visual Studio">}} ### Code insights Stay informed about [Error Tracking][5] issues, [Security Vulnerabilities][6], [Flaky Tests][10], and [Watchdog][7] profiling insights without leaving Visual Studio. -{{< img src="/developers/ide_integrations/visual_studio/code-insights.png" alt="The Code Insights view" >}} +{{< img src="/developers/ide_plugins/visual_studio/code-insights.png" alt="The Code Insights view" >}} ### Continuous Profiler Analyze and improve the performance of your applications with real-time profiling metrics for CPU, Memory, I/O, and others. -{{< img src="/developers/ide_integrations/visual_studio/top-list.png" alt="The Code Insights view">}} +{{< img src="/developers/ide_plugins/visual_studio/top-list.png" alt="The Code Insights view">}} ### Logs navigation You can navigate to the [Log Explorer][18] on the Datadog platform directly from your C# source files. Look for the clickable icon preceding message strings from log statements within your source code: -{{< img src="/developers/ide_integrations/visual_studio/logs-navigation.png" alt="A source file showing log lines with clickable icons." style="width:100%;" >}} +{{< img src="/developers/ide_plugins/visual_studio/logs-navigation.png" alt="A source file showing log lines with clickable icons." style="width:100%;" >}} Clicking the icon opens the **Log Explorer** with a query that matches the logger name, log level, and log message as closely as possible. @@ -47,7 +49,7 @@ Clicking the icon opens the **Log Explorer** with a query that matches the logge Navigate from Datadog to your source code with one click. -{{< img src="/developers/ide_integrations/visual_studio/view-in-visual-studio.png" alt="A stack trace on the Datadog platform showing the View in Visual Studio button.">}} +{{< img src="/developers/ide_plugins/visual_studio/view-in-visual-studio.png" alt="A stack trace on the Datadog platform showing the View in Visual Studio button.">}} ## Getting started diff --git a/content/en/developers/ide_integrations/vscode/_index.md b/content/en/developers/ide_plugins/vscode/_index.md similarity index 85% rename from content/en/developers/ide_integrations/vscode/_index.md rename to content/en/developers/ide_plugins/vscode/_index.md index c64199a59843f..fbabe94ee033a 100644 --- a/content/en/developers/ide_integrations/vscode/_index.md +++ b/content/en/developers/ide_plugins/vscode/_index.md @@ -3,6 +3,8 @@ title: Datadog Extension for Visual Studio Code kind: documentation description: Learn how to run Synthetic tests on local environments directly in VS Code. is_beta: true +aliases: +- '/developers/ide_integrations/vscode/' further_reading: - link: "/getting_started/synthetics/" tag: "Documentation" @@ -22,7 +24,7 @@ further_reading: The Datadog extension for Visual Studio Code (VS Code) integrates with Datadog to accelerate your development. -{{< img src="/developers/ide_integrations/vscode/datadog-vscode.png" alt="The Datadog for VS Code extension" style="width:100%;" >}} +{{< img src="/developers/ide_plugins/vscode/datadog-vscode.png" alt="The Datadog for VS Code extension" style="width:100%;" >}} The **Code Insights** view keeps you informed about: - Issues from [Error Tracking][10] @@ -52,7 +54,7 @@ Install the [Datadog Extension][6] from the Visual Studio Marketplace. ## Code Insights The **Code Insights** tree displays insights generated by the Datadog platform that are relevant to your code-base. The insights are grouped into three categories: performance, reliability, and security. -{{< img src="/developers/ide_integrations/vscode/code-insights.png" alt="The Code Insights view." style="width:100%;" >}} +{{< img src="/developers/ide_plugins/vscode/code-insights.png" alt="The Code Insights view." style="width:100%;" >}} Code Insights include a detailed description for each issue, and links to: - The related source code location @@ -63,7 +65,7 @@ You can dismiss individual insights and set filters to view the categories of in ## Synthetic Tests The Datadog extension enables you to [run Synthetic HTTP tests and browser tests on local environments][1] directly in the IDE. You can identify and address potential issues resulting from code changes before they are deployed into production and impact your end users. -{{< img src="developers/ide_integrations/vscode/vscode-extension-demo.png" alt="The Datadog Extension in VS Code" style="width:100%;" >}} +{{< img src="developers/ide_plugins/vscode/vscode-extension-demo.png" alt="The Datadog Extension in VS Code" style="width:100%;" >}} ### Run Synthetic tests locally @@ -71,7 +73,7 @@ The Datadog extension enables you to [run Synthetic HTTP tests and browser tests 2. Change the test's configuration to convert the start URL and specify a `localhost` URL on the **Settings** page. 3. Run the test. -{{< img src="developers/ide_integrations/vscode/test_configuration_modified_starturl.png" alt="The Test Configuration panel and Settings page where you can specify the start URL of a Synthetics test to a localhost URL" style="width:100%;" >}} +{{< img src="developers/ide_plugins/vscode/test_configuration_modified_starturl.png" alt="The Test Configuration panel and Settings page where you can specify the start URL of a Synthetics test to a localhost URL" style="width:100%;" >}} If you haven't set up Synthetic tests already, [create a test in Datadog][3]. For more information about running tests on a local environment, see [Getting Started with API Tests][4], [Getting Started with Browser Tests][5], and the [Continuous Testing documentation][1]. @@ -85,7 +87,7 @@ If you are using the [custom role feature][8], add your user to any custom role The **View in VS Code** feature provides a link from Datadog directly to your source files. Look for the button next to frames in stack traces displayed in the UI (for example, in [Error Tracking][10]): -{{< img src="/developers/ide_integrations/vscode/view-in-vscode.png" alt="A stack trace on the Datadog platform showing the View in VS Code button." style="width:100%;" >}} +{{< img src="/developers/ide_plugins/vscode/view-in-vscode.png" alt="A stack trace on the Datadog platform showing the View in VS Code button." style="width:100%;" >}}
To use this feature, first configure source code integration for your service.
@@ -94,14 +96,14 @@ The Datadog extension runs [Static Analysis][14] rules on the source files you h Static Analysis supports scanning for many programming languages. For a complete list, see [Static Analysis Rules][15]. For file types belonging to supported languages, issues are shown in the source code editor with the VS Code inspection system, and suggested fixes can be applied directly: -{{< img src="/developers/ide_integrations/vscode/static-analysis-issue.png" alt="A static analysis rule violation and recommended fix." style="width:100%;" >}} +{{< img src="/developers/ide_plugins/vscode/static-analysis-issue.png" alt="A static analysis rule violation and recommended fix." style="width:100%;" >}} Additionally, all issues detected by this feature are listed in the standard **Problems** view. ### Getting started When you start editing a source file supported by Static Analysis, the extension checks for `static-analysis.datadog.yml` at your source repository's root. It prompts you to create the file if necessary: -{{< img src="/developers/ide_integrations/vscode/static-analysis-onboard.png" alt="A banner for onboarding." style="width:100%;" >}} +{{< img src="/developers/ide_plugins/vscode/static-analysis-onboard.png" alt="A banner for onboarding." style="width:100%;" >}} Once the configuration file is created, the static analyzer runs automatically in the background. diff --git a/content/en/developers/integrations/log_pipeline.md b/content/en/developers/integrations/log_pipeline.md index f492bdbc7d70f..e66fd4ad802b3 100644 --- a/content/en/developers/integrations/log_pipeline.md +++ b/content/en/developers/integrations/log_pipeline.md @@ -21,7 +21,7 @@ description: Learn how to create a Datadog Log integration. --- ## Overview -This page walks Technology Partners through creating a log pipeline. +This page walks Technology Partners through creating a log pipeline. A log pipeline is required if your integration is sending in logs. ## Log integrations @@ -99,12 +99,12 @@ To add a facet or measure: 4. For a measure, to define the unit, click **Advanced options**. Select the unit based on what the attribute represents. 5. Click **Add**. -To easily navigate the facet list, facets are grouped together. For fields specific to the integration logs, create a single group with the same name as the `source` tag. +To help navigate the facet list, facets are grouped together. For fields specific to the integration logs, create a **single group with the same name** as the `source` tag. 1. In the log panel, click the Cog icon next to the attribute that you want in the new group. 2. Select **Edit facet/measure for @attribute**. If there isn't a facet for the attribute yet, select **Create facet/measure for @attribute**. 3. Click **Advanced options**. -4. In the **Group** field, enter the name and a description of the new group, and select **New group**. +4. In the **Group** field, enter the name of the group matching the source tag and a description of the new group, and select **New group**. 5. Click **Update**. **Guidelines** diff --git a/content/en/error_tracking/troubleshooting.md b/content/en/error_tracking/troubleshooting.md new file mode 100644 index 0000000000000..e14d37df1df76 --- /dev/null +++ b/content/en/error_tracking/troubleshooting.md @@ -0,0 +1,89 @@ +--- +title: Error Tracking Troubleshooting +kind: documentation +--- + +If you experience unexpected behavior with Error Tracking, the troubleshooting steps below can help you resolve the issue quickly. If you continue to have trouble, reach out to [Datadog support][1]. + +Datadog recommends regularly updating to the latest version of the Datadog tracing libraries, mobile SDKs, and web SDKs, as each release contains improvements and fixes. + +## Errors are not found in Error Tracking + +### Logs + +Make sure the error message has the [required attributes][2], and Error Tracking for Logs is [activated][7]. + +This [example query][3] searches for logs meeting the criteria for inclusion in Error Tracking. + +### APM + +To be processed by Error Tracking, a span must have these attributes: +- `error.type` +- `error.message` +- `error.stack` + +**Note**: The stack must have at least two lines and one *meaningful* frame (a frame with a function name and a filename in most languages). + +Only errors from service entry spans (the uppermost service spans) are processed by Error Tracking. Error Tracking primarily captures unhandled exceptions, and this behavior is in place to avoid capturing errors handled internally by the service. + +This [example query][5] searches for spans meeting the criteria for inclusion in Error Tracking. + +#### Workarounds for bubbling up child span errors to service entry span + +Some tracers provide a way to access the root span and bubble up the error from child to root. + +{{< tabs >}} +{{% tab "Java" %}} + +```java +final Span span = GlobalTracer.get().activeSpan(); +if (span != null && (span instanceof MutableSpan)) { + MutableSpan localRootSpan = ((MutableSpan) span).getLocalRootSpan(); + // do stuff with root span + localRootSpan.setTag("", ""); +} +``` + +{{% /tab %}} +{{% tab "Python" %}} + +```python +context = tracer.get_call_context() +root_span = context.get_current_root_span() +root_span.set_tag('', '') +``` + +{{% /tab %}} +{{% tab "Ruby" %}} + +```ruby +current_root_span = Datadog.tracer.active_root_span +current_root_span.set_tag('', '') unless current_root_span.nil? +``` + +{{% /tab %}} + +{{< /tabs >}} + +### RUM + +Error Tracking only processes errors that are sent with the source set to `custom`, `source` or `report`, and contain a stack trace. Errors sent with any other source (such as `console`) or sent from browser extensions are not processed by Error Tracking. + +This [example query][6] shows RUM errors that meet the criteria for inclusion in Error Tracking. + +## No error samples found for an issue + +All errors are processed, but only retained errors are available in the issue panel as an error sample. + +### APM + +Spans associated with the error need to be retained with a custom retention filter in order for samples of that error to show up in the issue panel. + +[1]: /help/ +[2]: /logs/error_tracking/backend/?tab=serilog#attributes-for-error-tracking +[3]: https://app.datadoghq.com/logs?query=status%3A%28emergency%20OR%20alert%20OR%20critical%20OR%20error%29%20AND%20%28%40error.stack%3A%2A%20OR%20%40error.kind%3A%2A%29%20 +[4]: /tracing/error_tracking/#use-span-tags-to-track-error-spans +[5]: https://app.datadoghq.com/apm/traces?query=%40_top_level%3A1%20%40error.stack%3A%2A%20AND%20%40error.message%3A%2A%20AND%20error.type%3A%2A%20AND%20%40_top_level%3A1%20 +[6]: https://app.datadoghq.com/rum/sessions?query=%40type%3Aerror%20%40error.stack%3A%2A +[7]: https://app.datadoghq.com/error-tracking/settings +[8]: /tracing/trace_collection/custom_instrumentation/java/dd-api/#set-tags--errors-on-a-root-span-from-a-child-span \ No newline at end of file diff --git a/content/en/getting_started/continuous_testing/_index.md b/content/en/getting_started/continuous_testing/_index.md index 8e1f2d70fcf88..d0a3220096f73 100644 --- a/content/en/getting_started/continuous_testing/_index.md +++ b/content/en/getting_started/continuous_testing/_index.md @@ -104,7 +104,7 @@ Separately, you can use the [Datadog Synthetics VS Code Integration][12] to help * Run HTTP API tests and browser tests and see their results within VS Code. * Test only what matters by executing relevant tests at the same time. -{{< img src="developers/ide_integrations/vscode/vscode-extension-demo.png" alt="vscode-extension-demo" style="width:100%;" >}} +{{< img src="developers/ide_plugins/vscode/vscode-extension-demo.png" alt="vscode-extension-demo" style="width:100%;" >}} ### Running tests in VS Code @@ -175,12 +175,12 @@ For more information, see the [Parallelization documentation][18]. [4]: /mobile_app_testing/ [5]: /synthetics/explorer?track=synbatch [6]: /continuous_testing/cicd_integrations/configuration/?tab=npm -[7]: /developers/ide_integrations/ +[7]: /developers/ide_plugins/ [8]: https://datadoghq.com [9]: /getting_started/synthetics/browser_test/#create-a-browser-test [10]: /continuous_testing/cicd_integrations/configuration/?tab=npm#install-the-package [11]: /continuous_testing/cicd_integrations/configuration/?tab=npm#reporters -[12]: /developers/ide_integrations/vscode/ +[12]: /developers/ide_plugins/vscode/ [13]: /getting_started/synthetics/private_location/ [14]: /continuous_testing/environments/ [15]: /continuous_testing/explorer/?tab=testruns#create-a-search-query diff --git a/content/en/glossary/_index.md b/content/en/glossary/_index.md index 556e1c7f524e8..89d518b85dfa3 100644 --- a/content/en/glossary/_index.md +++ b/content/en/glossary/_index.md @@ -1,5 +1,7 @@ --- title: Glossary +aliases: + - /glossary/terms/wall_time/ cascade: disable_toc: true scrollspy: diff --git a/content/en/glossary/terms/approval_wait_time.md b/content/en/glossary/terms/approval_wait_time.md index a62cca41c5852..aa4333f98e633 100644 --- a/content/en/glossary/terms/approval_wait_time.md +++ b/content/en/glossary/terms/approval_wait_time.md @@ -5,4 +5,4 @@ core_product: related_terms: - queue time --- -Approval wait time is the duration that workflow runs and jobs within a workflow are paused, pending manual approval. For more information, see the documentation. \ No newline at end of file +Approval wait time is the duration that pipelines and jobs or stages within a pipeline are blocked, pending manual approval. For more information, see the documentation. diff --git a/content/en/glossary/terms/flame_graph.md b/content/en/glossary/terms/flame_graph.md index 0b192577c4e14..92e408957cd53 100644 --- a/content/en/glossary/terms/flame_graph.md +++ b/content/en/glossary/terms/flame_graph.md @@ -2,5 +2,8 @@ title: flame graph core_product: - apm + - code profiling --- -A flame graph is a visualization of a trace, where bars represent spans and show the span's execution time as well as what called it and what calls it made. Flame graphs are also used to represent profiles. \ No newline at end of file +A flame graph is a visualization of a trace, where bars represent spans and show the span's execution time as well as what called it and what calls it made. + +A flame graph is also the default visualization for Continuous Profiler. It shows resource consumption (such as CPU usage) per method, and how each method was called. diff --git a/content/en/glossary/terms/timeline_view.md b/content/en/glossary/terms/timeline_view.md new file mode 100644 index 0000000000000..a204b19b3795d --- /dev/null +++ b/content/en/glossary/terms/timeline_view.md @@ -0,0 +1,12 @@ +--- +title: timeline view +core_product: + - code profiling +--- + +A timeline view is the equivalent of a flame graph, with a distribution over time. + +Each lane represents a **thread** (or a **goroutine** for Go applications). In contrast to the flame graph, you can use the timeline view to: +- Isolate spiky methods +- Investigate complex interactions between threads +- Surface runtime activity impacting the process diff --git a/content/en/glossary/terms/wall_time.md b/content/en/glossary/terms/wall_time.md deleted file mode 100644 index dadb545a6a6ff..0000000000000 --- a/content/en/glossary/terms/wall_time.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -# Glossary Term -title: wall time - -core_product: - - ci-cd - ---- -Wall time is the real time elapsed while the test suite runs, which is less than the sum of all test times when tests are run concurrently. For more information, see the documentation. \ No newline at end of file diff --git a/content/en/integrations/guide/aws-manual-setup.md b/content/en/integrations/guide/aws-manual-setup.md index 9a525e40b9046..8c8f6f693a6dd 100644 --- a/content/en/integrations/guide/aws-manual-setup.md +++ b/content/en/integrations/guide/aws-manual-setup.md @@ -124,9 +124,10 @@ Ensure to leave `Require MFA` disabled. For more details, see the [How to use an 3. In the [AWS integration tile][1], click **Add AWS Account**, and then select **Manually**. 4. Select the **Access Keys (GovCloud or China\* Only)** tab. -5. Enter your `Account ID`, `AWS Access Key` and `AWS Secret Key`. Only access and secret keys for GovCloud and China are accepted. -6. Click **Save**. -7. Wait up to 10 minutes for data to start being collected, and then view the out-of-the-box AWS Overview Dashboard to see metrics sent by your AWS services and infrastructure. +5. Click the **I confirm that the IAM User for the Datadog Integration has been added to the AWS Account** checkbox. +6. Enter your `Account ID`, `AWS Access Key` and `AWS Secret Key`. Only access and secret keys for GovCloud and China are accepted. +7. Click **Save**. +8. Wait up to 10 minutes for data to start being collected, and then view the out-of-the-box AWS Overview Dashboard to see metrics sent by your AWS services and infrastructure. \* _All use of Datadog Services in (or in connection with environments within) mainland China is subject to the disclaimer published in the [Restricted Service Locations][2] section on our website._ diff --git a/content/en/logs/guide/best-practices-for-log-management.md b/content/en/logs/guide/best-practices-for-log-management.md index 63db263cef40c..506c2300cf978 100644 --- a/content/en/logs/guide/best-practices-for-log-management.md +++ b/content/en/logs/guide/best-practices-for-log-management.md @@ -33,6 +33,8 @@ This guide also goes through how to monitor your log usage by: - [Alerting on indexed logs when the volume passes a specified threshold](#alert-when-an-indexed-log-volume-passes-a-specified-threshold) - [Setting up exclusion filters on high-volume logs](#set-up-exclusion-filters-on-high-volume-logs) +If you want to transform your logs or redact sensitive data in your logs before they leave your environment, see how to [aggregate, process, and transform your log data with Observability Pipelines][29]. + ## Log account configuration ### Set up multiple indexes for log segmentation @@ -223,3 +225,4 @@ If you want to see user activities, such as who changed the retention of an inde [26]: /account_management/audit_trail/ [27]: https://www.datadoghq.com/pricing/?product=audit-trail#audit-trail [28]: /monitors/configuration/?tab=thresholdalert#evaluation-window +[29]: /observability_pipelines/ diff --git a/content/en/logs/log_configuration/archives.md b/content/en/logs/log_configuration/archives.md index 365f2fb1e9af3..e4600506af484 100644 --- a/content/en/logs/log_configuration/archives.md +++ b/content/en/logs/log_configuration/archives.md @@ -27,7 +27,7 @@ Configure your Datadog account to forward all the logs ingested—whether [index {{< img src="logs/archives/log_forwarding_archives_tab.png" alt="Archives tab on the Log Forwarding page" style="width:100%;">}} -Navigate to the [**Log Forwarding** page][14] to set up an archive for forwarding ingested logs to your own cloud-hosted storage bucket. +Navigate to the [**Log Forwarding** page][3] to set up an archive for forwarding ingested logs to your own cloud-hosted storage bucket. 1. If you haven't already, set up a Datadog [integration](#set-up-an-integration) for your cloud provider. 2. Create a [storage bucket](#create-a-storage-bucket). @@ -36,6 +36,8 @@ Navigate to the [**Log Forwarding** page][14] to set up an archive for forwardin 5. Configure [advanced settings](#advanced-settings) such as encryption, storage class, and tags. 6. [Validate](#validation) your setup and check for possible misconfigurations that Datadog would be able to detect for you. +See how to [archive your logs with Observability Pipelines][4] if you want to route your logs to a storage-optimized archive directly from your environment. + ## Configure an archive ### Set up an integration @@ -87,13 +89,12 @@ Go into your [AWS console][1] and [create an S3 bucket][2] to send your archives **Notes:** - Do not make your bucket publicly readable. -- For [US1, US3, and US5 sites][4], see [AWS Pricing][5] for inter-region data transfer fees and how cloud storage costs may be impacted. Consider creating your storage bucket in `us-east-1` to manage your inter-region data transfer fees. +- For [US1, US3, and US5 sites][3], see [AWS Pricing][4] for inter-region data transfer fees and how cloud storage costs may be impacted. Consider creating your storage bucket in `us-east-1` to manage your inter-region data transfer fees. [1]: https://s3.console.aws.amazon.com/s3 [2]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/create-bucket.html -[3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-overview.html -[4]: /getting_started/site/ -[5]: https://aws.amazon.com/s3/pricing/ +[3]: /getting_started/site/ +[4]: https://aws.amazon.com/s3/pricing/ {{% /tab %}} {{% tab "Azure Storage" %}} @@ -122,7 +123,7 @@ Go to your [Google Cloud account][1] and [create a GCS bucket][2] to send your a ### Set permissions -Only Datadog users with the [`logs_write_archive` permission][3] can create, modify, or delete log archive configurations. +Only Datadog users with the [`logs_write_archive` permission][5] can create, modify, or delete log archive configurations. {{< tabs >}} {{% tab "AWS S3" %}} @@ -195,12 +196,12 @@ Only Datadog users with the [`logs_write_archive` permission][3] can create, mod ### Route your logs to a bucket -Navigate to the [Log Forwarding page][5] and select **Add a new archive** on the **Archives** tab. +Navigate to the [Log Forwarding page][6] and select **Add a new archive** on the **Archives** tab. **Notes:** -* Only Datadog users with the [`logs_write_archive` permission][3] can complete this and the following step. -* Archiving logs to Azure Blob Storage requires an App Registration. See instructions [on the Azure integration page][6], and set the "site" on the right-hand side of the documentation page to "US." App Registration(s) created for archiving purposes only need the "Storage Blob Data Contributor" role. If your storage bucket is in a subscription being monitored through a Datadog Resource, a warning is displayed about the App Registration being redundant. You can ignore this warning. -* If your bucket restricts network access to specified IPs, add the webhook IPs from the [IP ranges list][4] to the allowlist. +* Only Datadog users with the [`logs_write_archive` permission][5] can complete this and the following step. +* Archiving logs to Azure Blob Storage requires an App Registration. See instructions [on the Azure integration page][7], and set the "site" on the right-hand side of the documentation page to "US." App Registration(s) created for archiving purposes only need the "Storage Blob Data Contributor" role. If your storage bucket is in a subscription being monitored through a Datadog Resource, a warning is displayed about the App Registration being redundant. You can ignore this warning. +* If your bucket restricts network access to specified IPs, add the webhook IPs from the [IP ranges list][8] to the allowlist. {{< tabs >}} {{% tab "AWS S3" %}} @@ -245,9 +246,9 @@ By default: Use this optional configuration step to assign roles on that archive and restrict who can: -* Edit that archive configuration. See the [`logs_write_archive`][7] permission. -* Rehydrate from that archive. See the [`logs_read_archives`][8] and [`logs_write_historical_view`][9] permissions. -* Access rehydrated logs in case you use the legacy [`read_index_data` permission][10]. +* Edit that archive configuration. See the [`logs_write_archive`][9] permission. +* Rehydrate from that archive. See the [`logs_read_archives`][10] and [`logs_write_historical_view`][11] permissions. +* Access rehydrated logs in case you use the legacy [`read_index_data` permission][12]. {{< img src="logs/archives/archive_restriction.png" alt="Restrict access to Archives and Rehydrated logs" style="width:75%;">}} @@ -256,7 +257,7 @@ Use this optional configuration step to assign roles on that archive and restric Use this optional configuration step to: * Include all log tags in your archives (activated by default on all new archives). **Note**: This increases the size of resulting archives. -* Add tags on rehydrated logs according to your Restriction Queries policy. See the [`logs_read_data`][11] permission. +* Add tags on rehydrated logs according to your Restriction Queries policy. See the [`logs_read_data`][13] permission. {{< img src="logs/archives/tags_in_out.png" alt="Configure Archive Tags" style="width:75%;">}} @@ -402,11 +403,11 @@ Once your archive settings are successfully configured in your Datadog account, However, after creating or updating your archive configurations, it can take several minutes before the next archive upload is attempted. The frequency at which archives are uploaded can vary. **Check back on your storage bucket in 15 minutes** to make sure the archives are successfully being uploaded from your Datadog account. -After that, if the archive is still in a pending state, check your inclusion filters to make sure the query is valid and matches log events in [Live Tail][12]. When Datadog fails to upload logs to an external archive, due to unintentional changes in settings or permissions, the corresponding Log Archive is highlighted in the configuration page. +After that, if the archive is still in a pending state, check your inclusion filters to make sure the query is valid and matches log events in [Live Tail][14]. When Datadog fails to upload logs to an external archive, due to unintentional changes in settings or permissions, the corresponding Log Archive is highlighted in the configuration page. {{< img src="logs/archives/archive_errors_details.png" alt="Check that your archives are properly set up" style="width:100%;">}} -Hover over the archive to view the error details and the actions to take to resolve the issue. An event is also generated in the [Events Explorer][13]. You can create a monitor for these events to detect and remediate failures quickly. +Hover over the archive to view the error details and the actions to take to resolve the issue. An event is also generated in the [Events Explorer][15]. You can create a monitor for these events to detect and remediate failures quickly. ## Multiple archives @@ -452,15 +453,16 @@ Within the zipped JSON file, each event's content is formatted as follows: [1]: /logs/indexes/#exclusion-filters [2]: /logs/archives/rehydrating/ -[3]: /account_management/rbac/permissions/?tab=ui#logs_write_archives -[4]: https://ip-ranges.datadoghq.com/ -[5]: https://app.datadoghq.com/logs/pipelines/archives -[6]: /integrations/azure/ -[7]: /account_management/rbac/permissions#logs_write_archives -[8]: /account_management/rbac/permissions#logs_read_archives -[9]: /account_management/rbac/permissions#logs_write_historical_view -[10]: /account_management/rbac/permissions#logs_read_index_data -[11]: /account_management/rbac/permissions#logs_read_data -[12]: /logs/explorer/live_tail/ -[13]: /service_management/events/explorer/ -[14]: https://app.datadoghq.com/logs/pipelines/log-forwarding +[3]: https://app.datadoghq.com/logs/pipelines/log-forwarding +[4]: /observability_pipelines/archive_logs/ +[5]: /account_management/rbac/permissions/?tab=ui#logs_write_archives +[6]: https://app.datadoghq.com/logs/pipelines/archives +[7]: /integrations/azure/ +[8]: https://ip-ranges.datadoghq.com/ +[9]: /account_management/rbac/permissions#logs_write_archives +[10]: /account_management/rbac/permissions#logs_read_archives +[11]: /account_management/rbac/permissions#logs_write_historical_view +[12]: /account_management/rbac/permissions#logs_read_index_data +[13]: /account_management/rbac/permissions#logs_read_data +[14]: /logs/explorer/live_tail/ +[15]: /service_management/events/explorer/ diff --git a/content/en/logs/log_configuration/forwarding_custom_destinations.md b/content/en/logs/log_configuration/forwarding_custom_destinations.md index 9ff12df091026..31d9581d9d5d5 100644 --- a/content/en/logs/log_configuration/forwarding_custom_destinations.md +++ b/content/en/logs/log_configuration/forwarding_custom_destinations.md @@ -11,6 +11,9 @@ further_reading: - link: "/logs/log_configuration/pipelines" tag: "Documentation" text: "Learn about log pipelines" +- link: "/observability_pipelines/" + tag: "Documentation" + text: "Forward logs directly from your environment with Observability Pipelines" --- {{% site-region region="gov" %}} diff --git a/content/en/monitors/guide/_index.md b/content/en/monitors/guide/_index.md index e6cfbcecd75ed..cdd68c8a6daa3 100644 --- a/content/en/monitors/guide/_index.md +++ b/content/en/monitors/guide/_index.md @@ -37,7 +37,6 @@ cascade: {{< nextlink href="synthetics/guide/synthetic-test-monitors" >}}How to create monitors in synthetic tests{{< /nextlink >}} {{< nextlink href="monitors/guide/non_static_thresholds" >}}How to monitor non-static thresholds{{< /nextlink >}} {{< nextlink href="monitors/guide/anomaly-monitor" >}}Anomaly monitors{{< /nextlink >}} - {{< nextlink href="monitors/guide/change-alert" >}}Change Alert monitors{{< /nextlink >}} {{< nextlink href="monitors/guide/monitor-ephemeral-servers-for-reboots" >}}Monitor ephemeral servers for reboots{{< /nextlink >}} {{< nextlink href="monitors/guide/how-to-update-anomaly-monitor-timezone" >}}How to update an anomaly detection monitor to account for local timezone{{< /nextlink >}} {{< nextlink href="monitors/guide/history_and_evaluation_graphs" >}}Monitor History and Evaluation Graph{{< /nextlink >}} diff --git a/content/en/monitors/types/_index.md b/content/en/monitors/types/_index.md index 7b973c5f7b335..64a1a2a206940 100644 --- a/content/en/monitors/types/_index.md +++ b/content/en/monitors/types/_index.md @@ -25,6 +25,7 @@ further_reading: {{< nextlink href="/monitors/types/anomaly" >}}Anomaly: Detect anomalous behavior for a metric based on historical data.{{< /nextlink >}} {{< nextlink href="/monitors/types/apm" >}}APM: Monitor APM metrics or trace queries.{{< /nextlink >}} {{< nextlink href="/monitors/types/audit_trail" >}}Audit Trail: Alert when a specified type of audit log exceeds a user-defined threshold over a given period of time.{{< /nextlink >}} +{{< nextlink href="/monitors/types/change-alert" >}}Change Alert: Alert when the absolute or relative value changes against a user-defined threshold over a given period of time.{{< /nextlink >}} {{< nextlink href="/monitors/types/ci" >}}CI: Monitor CI pipelines and tests data gathered by Datadog.{{< /nextlink >}} {{< nextlink href="/monitors/types/cloud_cost" >}}Cloud Cost: Monitor cost changes associated with cloud platforms.{{< /nextlink >}} {{< nextlink href="/monitors/types/composite" >}}Composite: Alert on an expression combining multiple monitors.{{< /nextlink >}} diff --git a/content/en/monitors/guide/change-alert.md b/content/en/monitors/types/change-alert.md similarity index 77% rename from content/en/monitors/guide/change-alert.md rename to content/en/monitors/types/change-alert.md index 6c180940f6091..07985fcebb063 100644 --- a/content/en/monitors/guide/change-alert.md +++ b/content/en/monitors/types/change-alert.md @@ -1,7 +1,9 @@ --- -title: Change alert monitors -kind: Guide +title: Change Alert Monitor +kind: Documentation disable_toc: false +aliases: +- monitors/guide/change-alert further_reading: - link: "/monitors/types/metric/?tab=change#choose-the-detection-method" tag: "Documentation" @@ -23,11 +25,15 @@ Here is a breakdown of how monitors with the change detection method work: 1. Aggregation is applied over the query in (3) which returns a single value. 1. The threshold defined in **Set alert conditions** is compared to the single value returned in (4). +## Monitor creation + +To create a [Change Alert monitor][9] in Datadog, use the main navigation: *Monitors --> New Monitor --> Change*. + ## Evaluation conditions Here are the different options that you need to configure in a change alert monitor. -{{< img src="/monitors/guide/change-alert/configure_define_the_metrics.png" alt="Configuration options for change alert detection method" style="width:100%;" >}} +{{< img src="/monitors/monitor_types/change-alert/configure_define_the_metrics.png" alt="Configuration options for change alert detection method" style="width:100%;" >}} The example shows the following alert condition: The **average** of the **change** over **1 hour** compared to **5 minutes** @@ -51,12 +57,16 @@ This determines the way the monitor evaluates as expressed in the formula sectio In both cases, `Change`, and `% Change` can be either positive or negative. +## Notifications + +For instructions on the **Notify your team** section, see the [Notifications][7] and [Monitor configuration][8] pages. + ## Troubleshooting a change alert evaluation To verify the results of your change alert evaluation, reconstruct the metric queries with a Notebook. Take this change alert monitor with the following settings. -{{< img src="monitors/guide/change-alert/example_monitor_config.png" alt="The create monitor page with a change alert selected, evaluating the percent change of the average of the metric system.load.1 over the last 5 minutes compared to the last 30 minutes" style="width:100%;" >}} +{{< img src="monitors/monitor_types/change-alert/example_monitor_config.png" alt="The create monitor page with a change alert selected, evaluating the percent change of the average of the metric system.load.1 over the last 5 minutes compared to the last 30 minutes" style="width:100%;" >}} Monitor Query: ```pct_change(avg(last_5m),last_30m): > -50``` @@ -75,7 +85,7 @@ This is a break down of the query with the following conditions: - Query of data points N minutes ago (this is the normal query + timeshift(-1800)). - The timeshift function uses a **negative** duration because you're shifting the data back. Combine these queries along with the % change formula from the table. - **Note**: Since this example only has one metric, it's also possible to use a single query (a) and add the formula `((a - timeshift(a, -1800)) / timeshift(a, -1800)) * 100` - {{< img src="monitors/guide/change-alert/notebook_query_reconstruct_timeshift.png" alt="The edit screen of a cell in a notebook, titled Reconstruct Change Alert query, configured as a timeseries using the average of the metric system.load.1, from everywhere, with the formula ((a - timeshift(a, -1800)) / timeshift(a, -1800)) * 100 being applied" style="width:100%;" >}} + {{< img src="monitors/monitor_types/change-alert/notebook_query_reconstruct_timeshift.png" alt="The edit screen of a cell in a notebook, titled Reconstruct Change Alert query, configured as a timeseries using the average of the metric system.load.1, from everywhere, with the formula ((a - timeshift(a, -1800)) / timeshift(a, -1800)) * 100 being applied" style="width:100%;" >}} 2. Compare the monitor's history graph with the notebook graph. Are the values comparable? 3. Apply the aggregation. - To compare your notebook graph to the change alert monitor evaluation, scope your timeframe to match the change alert. @@ -88,3 +98,6 @@ This is a break down of the query with the following conditions: [1]: /monitors/configuration/#evaluation-window [2]: /monitors/manage/status/#investigate-a-monitor-in-a-notebook [3]: /dashboards/functions/timeshift/ +[7]: /monitors/notify/ +[8]: /monitors/configuration/?tab=thresholdalert#notify-your-team +[9]: https://app.datadoghq.com/monitors/create/metric/change diff --git a/content/en/monitors/types/metric.md b/content/en/monitors/types/metric.md index befb25a515088..c51f30919219d 100644 --- a/content/en/monitors/types/metric.md +++ b/content/en/monitors/types/metric.md @@ -16,7 +16,7 @@ further_reading: - link: "/monitors/manage/status/" tag: "Documentation" text: "Consult your monitor status" -- link: "/monitors/guide/change-alert" +- link: "/monitors/types/change-alert" tag: "Documentation" text: "Troubleshoot change alert monitors" --- @@ -50,7 +50,7 @@ This type of alert is useful to track spikes, drops, or slow changes in a metric For more information, see the [Change alert monitors][1] guide. -[1]: /monitors/guide/change-alert/ +[1]: /monitors/types/change-alert/ {{% /tab %}} {{% tab "Anomaly" %}} diff --git a/content/en/observability_pipelines/dual_ship_logs/_index.md b/content/en/observability_pipelines/dual_ship_logs/_index.md index 7a1c31ebfb410..94c061c947d9a 100644 --- a/content/en/observability_pipelines/dual_ship_logs/_index.md +++ b/content/en/observability_pipelines/dual_ship_logs/_index.md @@ -2,6 +2,8 @@ title: Dual Ship Logs kind: Documentation disable_toc: false +aliases: + - /observability_pipelines/dual_ship_logs/datadog_agent --- ## Overview @@ -12,12 +14,10 @@ As your infrastructure and your organization scales, so does your log volume, th Select a source to get started: -- [Datadog Agent][1] -- [Splunk HTTP Event Collector (HEC)][2] -- [Splunk Heavy and Universal Forwarders (TCP)][3] -- [Sumo Logic Hosted Collector][4] +- [Splunk HTTP Event Collector (HEC)][1] +- [Splunk Heavy and Universal Forwarders (TCP)][2] +- [Sumo Logic Hosted Collector][3] -[1]: /observability_pipelines/dual_ship_logs/datadog_agent -[2]: /observability_pipelines/dual_ship_logs/splunk_hec -[3]: /observability_pipelines/dual_ship_logs/splunk_tcp -[4]: /observability_pipelines/dual_ship_logs/sumo_logic_hosted_collector +[1]: /observability_pipelines/dual_ship_logs/splunk_hec +[2]: /observability_pipelines/dual_ship_logs/splunk_tcp +[3]: /observability_pipelines/dual_ship_logs/sumo_logic_hosted_collector diff --git a/content/en/profiler/enabling/go.md b/content/en/profiler/enabling/go.md index e42c603c696c7..b82a6f15eb898 100644 --- a/content/en/profiler/enabling/go.md +++ b/content/en/profiler/enabling/go.md @@ -121,7 +121,7 @@ To add detailed C function call information to CPU profiles, you may opt to use **Note**: This library is considered experimental. It can cause (infrequent) deadlocks in programs that use C++ exceptions, or that use libraries such as `tcmalloc`, which also collect call stacks. -## Save 2 to 14% CPU in production with PGO +## Save up to 14% CPU in production with PGO Starting [Go 1.21][14], the Go compiler supports Profile-Guided Optimization (PGO). PGO enables additional optimizations on code identified as hot by CPU profiles of production workloads. This is compatible with Datadog Go Continuous Profiler and can be used for production builds. diff --git a/content/en/profiler/guide/_index.md b/content/en/profiler/guide/_index.md index eb49c86aa73fd..bc64b6b4f1ae9 100644 --- a/content/en/profiler/guide/_index.md +++ b/content/en/profiler/guide/_index.md @@ -18,7 +18,7 @@ further_reading: {{< whatsnext desc="Guides" >}} {{< nextlink href="/profiler/guide/isolate-outliers-in-monolithic-services/" >}}Isolate Outliers in Monolithic Services{{< /nextlink >}} {{< nextlink href="/profiler/guide/solve-memory-leaks/" >}}Solve Memory Leaks with Profiling{{< /nextlink >}} - {{< nextlink href="/profiler/guide/save-cpu-in-production-with-go-pgo/" tag="Go">}}Save up to 15% CPU in Production with Profile-Guided Optimization{{< /nextlink >}} + {{< nextlink href="/profiler/guide/save-cpu-in-production-with-go-pgo/" tag="Go">}}Save up to 14% CPU in Production with Profile-Guided Optimization{{< /nextlink >}} {{< /whatsnext >}} ## Further reading diff --git a/content/en/profiler/guide/save-cpu-in-production-with-go-pgo.md b/content/en/profiler/guide/save-cpu-in-production-with-go-pgo.md index e845d1dfbc1b2..5d8500260668e 100644 --- a/content/en/profiler/guide/save-cpu-in-production-with-go-pgo.md +++ b/content/en/profiler/guide/save-cpu-in-production-with-go-pgo.md @@ -1,5 +1,5 @@ --- -title: Go - Save up to 15% CPU in Production with Profile-Guided Optimization +title: Go - Save up to 14% CPU in Production with Profile-Guided Optimization kind: guide further_reading: - link: "/profiler" diff --git a/content/en/real_user_monitoring/platform/connect_rum_and_traces.md b/content/en/real_user_monitoring/platform/connect_rum_and_traces.md index ba400d0bf6d33..b682746280409 100644 --- a/content/en/real_user_monitoring/platform/connect_rum_and_traces.md +++ b/content/en/real_user_monitoring/platform/connect_rum_and_traces.md @@ -114,9 +114,21 @@ To start sending just your iOS application's traces to Datadog, see [iOS Trace C **Note**: `traceSampleRate` **does not** impact RUM sessions sampling. Only backend traces are sampled out. +4. _(Optional)_ If you set a `traceSampleRate`, to ensure backend services' sampling decisions are still applied, configure the `traceContextInjection` initialization parameter to `sampled` (set to `all` by default). + + For example, if you set the `traceSampleRate` to 20% in the Browser SDK: + - When `traceContextInjection` is set to `all`, **20%** of backend traces are kept and **80%** of backend traces are dropped. + + {{< img src="real_user_monitoring/connect_rum_and_traces/traceContextInjection_all-2.png" alt="traceContextInjection set to all" style="width:90%;">}} + + - When `traceContextInjection` is set to `sampled`, **20%** of backend traces are kept. For the remaining **80%**, the browser SDK **does not inject** a sampling decision. The decision is made on the server side and is based on the tracing library head-based sampling [configuration][2]. In the example below, the backend sample rate is set to 40%, and therefore 32% of the remaining backend traces are kept. + + {{< img src="real_user_monitoring/connect_rum_and_traces/traceContextInjection_sampled-2.png" alt="traceContextInjection set to sampled" style="width:90%;">}} +
End-to-end tracing is available for requests fired after the Browser SDK is initialized. End-to-end tracing of the initial HTML document and early browser requests is not supported.
[1]: /real_user_monitoring/browser/ +[2]: /tracing/trace_pipeline/ingestion_mechanisms/#head-based-sampling {{% /tab %}} {{% tab "Android RUM" %}} diff --git a/content/en/real_user_monitoring/session_replay/browser/_index.md b/content/en/real_user_monitoring/session_replay/browser/_index.md index a3678c8b449e0..19add9f4c86fb 100644 --- a/content/en/real_user_monitoring/session_replay/browser/_index.md +++ b/content/en/real_user_monitoring/session_replay/browser/_index.md @@ -72,12 +72,14 @@ if (user.isAuthenticated) { To stop the Session Replay recording, call `stopSessionReplayRecording()`. -
When using a version of the RUM Browser SDK older than v5.0.0, Session Replay recording does not begin automatically. Call `startSessionReplayRecording()` to begin recording.
+
When using a version of the RUM Browser SDK older than v5.0.0, Session Replay recording does not begin automatically. Call startSessionReplayRecording() to begin recording.
## Disable Session Replay To stop session recordings, set `sessionReplaySampleRate` to `0`. This stops collecting data for the [Browser RUM & Session Replay plan][6]. +
If you're using a version of the RUM Browser SDK previous to v5.0.0, set replaySampleRate to 0.
+ ## Retention By default, Session Replay data is retained for 30 days. diff --git a/content/en/security/application_security/enabling/compatibility/dotnet.md b/content/en/security/application_security/enabling/compatibility/dotnet.md index 2dc25810ba07f..240aec049f87e 100644 --- a/content/en/security/application_security/enabling/compatibility/dotnet.md +++ b/content/en/security/application_security/enabling/compatibility/dotnet.md @@ -25,14 +25,14 @@ The minimum tracer version to get all supported ASM capabilities for .NET is 2.4 **Note**: Threat Protection requires enabling [Remote Configuration][3], which is included in the listed minimum tracer version. ### Supported deployment types -|Type | Threat Detection support | Vulnerability Management for OSS support | -| --- | --- | ---- | -| Docker | {{< X >}} | {{< X >}} | -| Kubernetes | {{< X >}} | {{< X >}} | -| Amazon ECS | {{< X >}} | {{< X >}} | -| AWS Fargate | {{< X >}} | {{< X >}} | -| AWS Lambda | {{< X >}} | | -| Azure App Service | {{< X >}} | {{< X >}} | +| Type | Threat Detection support | Software Composition Analysis | +|-------------------|--------------------------|------------------------------------------| +| Docker | {{< X >}} | {{< X >}} | +| Kubernetes | {{< X >}} | {{< X >}} | +| Amazon ECS | {{< X >}} | {{< X >}} | +| AWS Fargate | {{< X >}} | {{< X >}} | +| AWS Lambda | {{< X >}} | | +| Azure App Service | {{< X >}} | {{< X >}} | **Note**: Azure App Service is supported for **web applications only**. ASM doesn't support Azure Functions. diff --git a/content/en/security/application_security/enabling/compatibility/go.md b/content/en/security/application_security/enabling/compatibility/go.md index 2d8a713da28c7..5e43b3cbfeb71 100644 --- a/content/en/security/application_security/enabling/compatibility/go.md +++ b/content/en/security/application_security/enabling/compatibility/go.md @@ -25,13 +25,13 @@ The minimum tracer version to get all supported ASM capabilities for Go is 1.59. **Note**: Threat Protection requires enabling [Remote Configuration][1], which is included in the listed minimum tracer version. ### Supported deployment types -|Type | Threat Detection support | Vulnerability Management for OSS support | -| --- | --- | ---- | -| Docker | {{< X >}} | {{< X >}} | -| Kubernetes | {{< X >}} | {{< X >}} | -| Amazon ECS | {{< X >}} | {{< X >}} | -| AWS Fargate | {{< X >}} | {{< X >}} | -| AWS Lambda | {{< X >}} | | +| Type | Threat Detection support | Software Composition Analysis | +|-------------|--------------------------|-------------------------------| +| Docker | {{< X >}} | {{< X >}} | +| Kubernetes | {{< X >}} | {{< X >}} | +| Amazon ECS | {{< X >}} | {{< X >}} | +| AWS Fargate | {{< X >}} | {{< X >}} | +| AWS Lambda | {{< X >}} | | ## Language and framework compatibility diff --git a/content/en/security/application_security/enabling/compatibility/java.md b/content/en/security/application_security/enabling/compatibility/java.md index f927b75ed3284..c30ac3c13c99d 100644 --- a/content/en/security/application_security/enabling/compatibility/java.md +++ b/content/en/security/application_security/enabling/compatibility/java.md @@ -25,14 +25,14 @@ The minimum tracer version to get all supported ASM capabilities for Java is 1.3 **Note**: Threat Protection requires enabling [Remote Configuration][2], which is included in the listed minimum tracer version. ### Supported deployment types -|Type | Threat Detection support | Vulnerability Management for OSS support | -| --- | --- | ---- | -| Docker | {{< X >}} | {{< X >}} | -| Kubernetes | {{< X >}} | {{< X >}} | -| Amazon ECS | {{< X >}} | {{< X >}} | -| AWS Fargate | {{< X >}} | {{< X >}} | -| AWS Lambda | {{< X >}} | | -| Azure App Service | {{< X >}} | {{< X >}} | +| Type | Threat Detection support | Software Composition Analysis | +|-------------------|--------------------------|-------------------------------| +| Docker | {{< X >}} | {{< X >}} | +| Kubernetes | {{< X >}} | {{< X >}} | +| Amazon ECS | {{< X >}} | {{< X >}} | +| AWS Fargate | {{< X >}} | {{< X >}} | +| AWS Lambda | {{< X >}} | | +| Azure App Service | {{< X >}} | {{< X >}} | **Note**: Azure App Service is supported for **web applications only**. ASM doesn't support Azure Functions. diff --git a/content/en/security/application_security/enabling/compatibility/nodejs.md b/content/en/security/application_security/enabling/compatibility/nodejs.md index 548ea8d12954d..916fff6f80f07 100644 --- a/content/en/security/application_security/enabling/compatibility/nodejs.md +++ b/content/en/security/application_security/enabling/compatibility/nodejs.md @@ -27,13 +27,13 @@ The minimum tracer version to get all supported ASM capabilities for Node.js is - Threat Protection requires enabling [Remote Configuration][2], which is included in the listed minimum tracer version. ### Supported deployment types -| Type | Threat Detection support | Vulnerability Management for OSS support | -|-------------|--------------------------|------------------------------------------| -| Docker | {{< X >}} | {{< X >}} | -| Kubernetes | {{< X >}} | {{< X >}} | -| Amazon ECS | {{< X >}} | {{< X >}} | -| AWS Fargate | {{< X >}} | {{< X >}} | -| AWS Lambda | {{< X >}} | beta | +| Type | Threat Detection support | Software Composition Analysis | +|-------------|--------------------------|-------------------------------| +| Docker | {{< X >}} | {{< X >}} | +| Kubernetes | {{< X >}} | {{< X >}} | +| Amazon ECS | {{< X >}} | {{< X >}} | +| AWS Fargate | {{< X >}} | {{< X >}} | +| AWS Lambda | {{< X >}} | beta | ## Language and framework compatibility diff --git a/content/en/security/application_security/enabling/compatibility/php.md b/content/en/security/application_security/enabling/compatibility/php.md index 6f3b27b5e41df..884089a9594b0 100644 --- a/content/en/security/application_security/enabling/compatibility/php.md +++ b/content/en/security/application_security/enabling/compatibility/php.md @@ -26,13 +26,13 @@ The minimum tracer version to get all supported ASM capabilities for PHP is 0.98
If you would like to see support added for any of the unsupported capabilities, let us know! Fill out this short form to send details.
### Supported deployment types -|Type | Threat Detection support | Vulnerability Management for OSS support | -| --- | --- | ---- | -| Docker | {{< X >}} | {{< X >}} | -| Kubernetes | {{< X >}} | {{< X >}} | -| Amazon ECS | {{< X >}} | {{< X >}} | -| AWS Fargate | | | -| AWS Lambda | | | +| Type | Threat Detection support | Software Composition Analysis | +|-------------|--------------------------|-------------------------------| +| Docker | {{< X >}} | {{< X >}} | +| Kubernetes | {{< X >}} | {{< X >}} | +| Amazon ECS | {{< X >}} | {{< X >}} | +| AWS Fargate | | | +| AWS Lambda | | | ## Language and framework compatibility diff --git a/content/en/security/application_security/enabling/compatibility/python.md b/content/en/security/application_security/enabling/compatibility/python.md index 1741f7cf8bdc2..6f09a6f1c50c4 100644 --- a/content/en/security/application_security/enabling/compatibility/python.md +++ b/content/en/security/application_security/enabling/compatibility/python.md @@ -21,13 +21,13 @@ The following ASM capabilities are supported in the Python library, for the spec **Note**: Threat Protection requires enabling [Remote Configuration][2], which is included in the listed minimum tracer version. ### Supported deployment types -|Type | Threat Detection support | Vulnerability Management for OSS support | -| --- | --- | ---- | -| Docker | {{< X >}} | {{< X >}} | -| Kubernetes | {{< X >}} | {{< X >}} | -| Amazon ECS | {{< X >}} | {{< X >}} | -| AWS Fargate | {{< X >}} | {{< X >}} | -| AWS Lambda | {{< X >}} | | +| Type | Threat Detection support | Software Composition Analysis | +|-------------|--------------------------|-------------------------------| +| Docker | {{< X >}} | {{< X >}} | +| Kubernetes | {{< X >}} | {{< X >}} | +| Amazon ECS | {{< X >}} | {{< X >}} | +| AWS Fargate | {{< X >}} | {{< X >}} | +| AWS Lambda | {{< X >}} | | ## Language and framework compatibility diff --git a/content/en/security/application_security/enabling/compatibility/ruby.md b/content/en/security/application_security/enabling/compatibility/ruby.md index 32e048c52b950..581a1964df145 100644 --- a/content/en/security/application_security/enabling/compatibility/ruby.md +++ b/content/en/security/application_security/enabling/compatibility/ruby.md @@ -25,13 +25,13 @@ The minimum tracer version to get all supported ASM capabilities for Ruby is 1.1
If you would like to see support added for any of the unsupported capabilities, or for your Ruby framework, let us know! Fill out this short form to send details.
### Supported deployment types -|Type | Threat Detection support | Vulnerability Management for OSS support | -| --- | --- | ---- | -| Docker | {{< X >}} | | -| Kubernetes | {{< X >}} | | -| Amazon ECS | {{< X >}} | | -| AWS Fargate | {{< X >}} | | -| AWS Lambda | | | +| Type | Threat Detection support | Software Composition Analysis | +|-------------|--------------------------|-------------------------------| +| Docker | {{< X >}} | | +| Kubernetes | {{< X >}} | | +| Amazon ECS | {{< X >}} | | +| AWS Fargate | {{< X >}} | | +| AWS Lambda | | | ## Language and framework compatibility diff --git a/content/en/security/notifications/variables.md b/content/en/security/notifications/variables.md index 664cb4f1820fb..ee7487c26e7e9 100644 --- a/content/en/security/notifications/variables.md +++ b/content/en/security/notifications/variables.md @@ -102,6 +102,8 @@ Use attribute variables to customize signal notifications with specific informat To see a signal's list of event attributes, click **JSON** at the bottom of the **Overview** tab in the signal's side panel. Use the following syntax to add these event attributes in your rule notifications: `{{@attribute}}`. To access inner keys of the event attributes, use JSON dot notation, for example, `{{@attribute.inner_key}})`. +If the signal's JSON does not contain an attribute that is present in the related log's JSON, use the previously outlined syntax with the attribute name from the log's JSON. This attribute is then included in both the signal's JSON and the signal notifications. + The following is an example JSON object with event attributes that may be associated with a security signal: {{< tabs >}} diff --git a/content/en/sensitive_data_scanner/_index.md b/content/en/sensitive_data_scanner/_index.md index 841bdbb34c036..f1b6a70761a86 100644 --- a/content/en/sensitive_data_scanner/_index.md +++ b/content/en/sensitive_data_scanner/_index.md @@ -28,7 +28,9 @@ Sensitive data, such as credit card numbers, bank routing numbers, and API keys Sensitive Data Scanner is a stream-based, pattern matching service used to identify, tag, and optionally redact or hash sensitive data. Security and compliance teams can implement Sensitive Data Scanner as a new line of defense, helping prevent against sensitive data leaks and limiting non-compliance risks. -To use Sensitive Data Scanner, set up a scanning group to define what data to scan and then set up scanning rules to determine what sensitive information to match within the data. +To use Sensitive Data Scanner, set up a scanning group to define what data to scan and then set up scanning rules to determine what sensitive information to match within the data. + +If you want to redact your sensitive data in your environment before shipping to your downstream destinations, see how to [redact sensitive data with Observability Pipelines][14]. This document walks you through the following: @@ -182,3 +184,4 @@ When Sensitive Data Scanner is enabled, an [out-of-the-box dashboard][13] summar [11]: /logs/log_configuration/processors/?tab=ui#remapper [12]: https://app.datadoghq.com/logs/pipelines [13]: https://app.datadoghq.com/dash/integration/sensitive_data_scanner +[14]: /observability_pipelines/sensitive_data_redaction/ diff --git a/content/en/service_catalog/create_entries.md b/content/en/service_catalog/create_entries.md index 6a4f2d8c01056..71012b2aa6f58 100644 --- a/content/en/service_catalog/create_entries.md +++ b/content/en/service_catalog/create_entries.md @@ -22,24 +22,25 @@ further_reading: text: "Import Backstage YAML files into Datadog" --- -## Create user-defined services +## Create user-defined entries -To add your own services to Service Catalog, you can either manually add them by creating Service Definitions through the API or GitHub integration or [import](#import-data-from-other-sources) them from existing sources like ServiceNow or Backstage. These services are by default not associated with any Datadog telemetry, but you can link telemetries from Datadog or external sources manually using `service.datadog.yaml` files. +To manage your own components that are not currently emitting performance metrics through APM, USM, or RUM products with Datadog Service Catalog, you can either manually add them by creating Service Definitions through the API or GitHub integration or [import](#import-data-from-other-sources) them from existing sources like ServiceNow or Backstage. These services are by default not associated with any Datadog telemetry, but you can link telemetries from Datadog or external sources manually using `service.datadog.yaml` files. To create a user-defined service, name your service in the `dd-service` field in a `service.datadog.yaml` file at the root of the repository, using one of the supported metadata schema versions. For example: #### Example {{< code-block lang="yaml" filename="service.datadog.yaml" collapsible="true" >}} -schema-version: v2.1 +schema-version: v2.2 dd-service: my-unmonitored-cron-job -team: shopist +team: e-commerce +lifecycle: production +application: shopping-app +description: important cron job for shopist backend +tier: "2" +type: web contacts: - type: slack contact: https://datadogincidents.slack.com/archives/XXXXX -application: shopist -description: important cron job for shopist backend -tier: tier1 -lifecycle: production links: - name: Common Operations type: runbook diff --git a/content/en/service_catalog/integrations.md b/content/en/service_catalog/integrations.md index 53b17fcd6ef73..225d0a5c23912 100644 --- a/content/en/service_catalog/integrations.md +++ b/content/en/service_catalog/integrations.md @@ -76,7 +76,8 @@ You can add OpsGenie metadata to a service so that the Service Catalog displays Once you've completed these steps, an **On Call** information box appears in the **Ownership** tab of a service in the Service Catalog. {{< img src="tracing/service_catalog/oncall_information.png" alt="On Call information box displaying information from OpsGenie in the Service Catalog" style="width:85%;" >}} -## IDE integrations + +## IDE Plugins Datadog provides a [JSON Schema][6] for service definitions so that when you are editing a service definition in a [supporting IDE][7], features such as autocomplete and validation are provided. diff --git a/content/en/service_catalog/manage_entries.md b/content/en/service_catalog/manage_entries.md index 1d104239014c5..eacb6f7290ab5 100644 --- a/content/en/service_catalog/manage_entries.md +++ b/content/en/service_catalog/manage_entries.md @@ -97,20 +97,36 @@ extensions: customField2: customValue2 {{< /code-block >}} -## Changing the service color +## Manage service-related workflows +[Workflow Automation][14] allows you to automate end-to-end processes across your teams. It integrates with Datadog's Service Catalog to enable dynamic and self-service workflows. + +### Find Service Catalog actions +To explore the complete set of actions specifically related to Service Catalog, navigate to the [Datadog Action Catalog][23]. Filter for the actions you need: + +1. **Access the Action Catalog**: Look for the Action Catalog within your Datadog Workflow Automation environment. +2. **Search Functionality**: Use the search bar to search for keywords like "Service Catalog" or more specific terms related to desired actions (for example, "get service dependencies"). + +### Available Service Catalog Actions + +Below is a comprehensive list of actions available for Service Catalog in Datadog Workflow Automation. Note that this list may evolve as new actions are added. +- **Retrieve Service Information** + - "Get service definition" for a single service + - "List service definitions" to get all definitions from Datadog Service Catalog + - "Get service dependencies" to get a service's immediate upstream and downstream services +- **Incident Triage** + - "Get service PagerDuty on call" + - When integrated with other actions, you can trigger workflows based on critical events (for example, execute runbooks). + +## Changing the service color The service color is used in trace visualizations. Click the service type icon to change it. {{< img src="tracing/service_catalog/change_service_color.png" alt="Click the service icon to select a different icon color." style="width:80%;" >}} -## Manage service-related workflows -[Workflow Automation][14] allows you to automate end-to-end processes across your teams. It integrates with Datadog's Service Catalog to enable dynamic and self-service workflows. - -### Updating the service type and language +## Updating the service type and language With [Service Catalog metadata schema 2.2][19], you can specify the type and language for user-defined services or overwrite the auto-detected type and language for instrumented services. Correctly label the service type and language to help other teams further understand what your services do and how to interact with them. - ## Further reading {{< partial name="whats-next/whats-next.html" >}} @@ -130,3 +146,4 @@ With [Service Catalog metadata schema 2.2][19], you can specify the type and lan [20]: /service_catalog/service_definitions [21]: https://github.com/DataDog/schema/tree/main/service-catalog/v3 [22]: https://forms.gle/zbLfnJYhD5Ab4Wr18 +[23]: https://docs.datadoghq.com/service_management/workflows/actions_catalog/ diff --git a/content/en/service_catalog/navigating.md b/content/en/service_catalog/navigating.md index 0e6cf511d48a5..7c4e01509cba1 100644 --- a/content/en/service_catalog/navigating.md +++ b/content/en/service_catalog/navigating.md @@ -114,26 +114,6 @@ The **Software Delivery tab** provides several ways to assess and improve the pr To access additional details describing your CI status and static analysis violations, click on a service and see the status of each pipeline and rule violation. -### Find Service Catalog actions - -To explore the complete set of actions specifically related to Service Catalog, navigate to the [Datadog Action Catalog][15]. Filter for the actions you need: - -1. **Access the Action Catalog**: Look for the Action Catalog within your Datadog Workflow Automation environment. -2. **Search Functionality**: Use the search bar to search for keywords like "Service Catalog" or more specific terms related to desired actions (for example, "get service dependencies"). - -### Available Service Catalog Actions - -Below is a comprehensive list of actions available for Service Catalog in Datadog Workflow Automation. Note that this list may evolve as new actions are added. - -- **Retrieve Service Information** - - "Get service definition" for a single service - - "List service definitions" to get all definitions from Datadog Service Catalog - - "Get service dependencies" to get a service's immediate upstream and downstream services -- **Incident Triage** - - "Get service PagerDuty on call" - - When integrated with other actions, you can trigger workflows based on critical events (for example, execute runbooks). - - ## Further reading {{< partial name="whats-next/whats-next.html" >}} diff --git a/content/en/service_management/incident_management/incident_settings.md b/content/en/service_management/incident_management/incident_settings.md index 7d48040778543..d7f10d44e1691 100644 --- a/content/en/service_management/incident_management/incident_settings.md +++ b/content/en/service_management/incident_management/incident_settings.md @@ -163,7 +163,7 @@ To configure a new notification rule: 2. Under **For incidents matching...**, select the incident property field `key:value` pairs you want notifications to be sent for. By default, these filters are empty, and a notification rule triggers for any incident. 3. **Notify**: Select your notification recipients. Notifications can be sent to any of Datadog's existing [notification integrations][18]. If you want to notify a recipient's mobile device, select the option for their name that includes **(Mobile Push Notification)**. The recipient must have enabled notifications in the [Datadog mobile app][19] for this option to appear. 4. **With Template**: Select the desired message template you want the notification rule to use. -5. **Renotify on updates to**: Choose which incident properties trigger renotifications. Whenever one or more of the selected properties changes, a new notification is sent. Note that you cannot renotify on properties that are already in your filters (see step 2, above). +5. **Renotify on updates to**: Select the incident properties that trigger notifications. A new notification is sent whenever one or more of the selected properties change. **Note**: properties already in your filters (see step 2) are automatically included in these rules. 6. Click **Save** You can perform the following operations to manage your notification rules. diff --git a/content/en/service_management/service_level_objectives/_index.md b/content/en/service_management/service_level_objectives/_index.md index 7ca31ad9a4bfe..cac031e18d576 100644 --- a/content/en/service_management/service_level_objectives/_index.md +++ b/content/en/service_management/service_level_objectives/_index.md @@ -301,13 +301,12 @@ The SLO Calendar View is available on the [SLO status page][2]. On the top right The CSV Export feature is in Private Beta. Complete the form to request access. {{< /callout >}} -The SLO CSV Export feature is available on the [SLO status page][2] once you switch to the "Weekly" or "Monthly" Calendar View. In these views, you can access the new "Export to CSV" option to download a CSV of your historical SLO data. +The SLO CSV Export feature is available on the [SLO status page][2] once you switch to the "Weekly" or "Monthly" Calendar View. In these views, you can access the new "Export to CSV" option to download a CSV of your historical SLO data with the following information: -**Notes:** - -- The SLOs that are exported are based on your search query. -- The Calendar View is supported for Metric-based and Time Slice SLOs. If you export any Monitor-based SLOs, only the SLO ID and name will be in the CSV (not the SLO’s status history data). -- There is a limit of 1000 SLOs per export. +- SLO id, name, and type +- SLO tags +- SLO target +- Historical SLO status values {{< img src="service_management/service_level_objectives/slo-csv-export.png" alt="SLO calendar view" >}} @@ -322,6 +321,12 @@ The SLO statuses are calculated based on the SLO type: - **Metric-based SLOs:** Percent of good events out of total events for the time window - **Time Slice SLOs:** Percent of good minutes out of total minutes for the time window +**Notes:** + +- The SLOs that are exported are based on your search query. +- The Calendar View is supported for Metric-based and Time Slice SLOs. If you export any Monitor-based SLOs, only the SLO ID and name will be in the CSV (not the SLO’s status history data). +- There is a limit of 1000 SLOs per export. + ## Further Reading {{< partial name="whats-next/whats-next.html" >}} diff --git a/content/en/tests/guides/flaky_test_management.md b/content/en/tests/guides/flaky_test_management.md index d63c8ba175ad5..042a15ec10b6a 100644 --- a/content/en/tests/guides/flaky_test_management.md +++ b/content/en/tests/guides/flaky_test_management.md @@ -39,6 +39,16 @@ Once you identify a flaky test you want to fix, click on the test to see links t If a flaky test has not failed in the past 30 days, it is automatically removed from the table. You can also manually remove a flaky test by clicking on the trash icon that appears when you hover over the test row. It is added again if it re-exhibits flaky behavior. +### Flaky tests in the default branch + +The flaky test table for the default branch includes tests that have flaked in the default branch as well as any tests that have exhibited flakiness in a feature branch that was merged into the default branch. + +Flaky tests from merged feature branches are found by checking which tests have exhibited flakiness in the most recent 5,000 commits using the Git commit history. The Git commit history is collected by the [Test Visibility libraries][4] and uploaded along with the test results every time the testing phase of a particular commit is executed in your CI build. + +Limitations: +* If you squash or reset and force push commits in your feature branch, flaky tests that have been detected in that branch are not shown in the default branch because the commit history has been altered. +* If a flaky test is detected and subsequently fixed in the same feature branch, it still appears as a flaky test in the default branch, because the fix for the flaky test cannot be detected. However, [you can manually remove that flaky test from the flaky tests table][5]. + ### New flaky tests New flaky tests are tests that exhibit flaky behavior and didn't previously exist in the Flaky Tests table for the current branch or default branch of the repository. @@ -49,7 +59,7 @@ New flaky tests are tests that exhibit flaky behavior and didn't previously exis 1. Navigate to the [Test Runs][2] page. 2. In the facets list on the left sidebar, expand the **New Flaky** facet in the **Test** section, and check `true`. -All test runs that exhibited flakey behavior for the first time as per the definition above are displayed. +All test runs that exhibited flaky behavior for the first time as per the definition above are displayed. #### Branches page @@ -90,3 +100,5 @@ Failed test runs that were known to be flaky as per the definition above are dis [1]: /glossary/#flaky-test [2]: https://app.datadoghq.com/ci/test-runs [3]: https://app.datadoghq.com/ci/test-services?view=branches +[4]: /tests/#use-ci-tests-data +[5]: /tests/guides/flaky_test_management/#ignore-new-flaky-tests-detected-by-mistake \ No newline at end of file diff --git a/content/en/tests/search/_index.md b/content/en/tests/search/_index.md index 0714e8fba887e..62e4c3f0375e2 100644 --- a/content/en/tests/search/_index.md +++ b/content/en/tests/search/_index.md @@ -1,5 +1,5 @@ --- -title: Search and Manage CI Tests +title: Search and Manage CI Tests description: Learn how to search for your CI tests. algolia: rank: 70 @@ -16,10 +16,10 @@ further_reading: {{< site-region region="gov" >}}
CI Visibility is not available in the selected site ({{< region-param key="dd_site_name" >}}) at this time.
{{< /site-region >}} - + ## Overview -The [Tests page][1] is useful for developers who want to keep an eye on their test results. +The [Tests page][1] is useful for developers who want to keep an eye on their test results. {{< img src="/continuous_integration/tests.png" text="CI Tests page" style="width:100%" >}} @@ -27,7 +27,7 @@ You can access low-level and immediate insights: - See what tests are failing and why. - See your last commit's test results. -- View the wall time of your tests in your feature branch and compare it to the default branch, to identify if you're about to introduce a performance regression. +- View the total duration of your tests in your feature branch. - Find out if your commit introduces a new [flaky test][5] that wasn't flaky before, indicating that your code change is what's making it flaky. This gives you the opportunity to fix the problem before proceeding rather than contributing to the number of flaky tests in your CI. You can also access high-level accumulation and trends: @@ -48,18 +48,12 @@ In this page, you can filter the list by name, test service, or commit SHA, or t #### Test results -For each branch, you can see the test service, the number of failed, passed, and skipped tests, test regressions, wall time, the percentage of change compared to the default branch, when the commit was last updated, and the avatar of the author of the commit. +For each branch, you can see the test service, the number of failed, passed, and skipped tests, test regressions, total test time, when the commit was last updated, and the avatar of the author of the commit. Click on a branch to explore the test details page, which includes information about the branch's latest commits, flaky tests, test performance, common error types, and all test runs. {{< img src="continuous_integration/test_details.png" alt="Test Details page for a single branch" style="width:100%;">}} -#### Test suite performance - -There is also information about the [wall time][4] of the most recent test suite run, and a comparison to the average wall time of the default branch. The comparison of your branch's wall time to the default branch's wall time can help you determine if your commit is introducing performance [regressions][6] to your test suite. - -Hovering over the commit author avatar shows detailed information about the latest commit. - #### Test regressions [Test regressions][6] are evaluated per commit in an effort to tie performance regressions to specific code changes. @@ -78,7 +72,7 @@ Click the CI provider link to examine the Resource, Service, or Analytics page f The [Default Branches][7] view of the Tests page shows aggregated health metrics for the _default_ branch of each test service. This view is useful for teams to understand the overall health of the service over time. -The Default Branches view shows similar information to the Branches view, but applied to the default branch. It compares the current wall time with the average default branch wall time to give you an indication of how your test suite performance is trending over time. +The Default Branches view shows similar information to the Branches view, but applied to the default branch. ## Further reading @@ -87,7 +81,6 @@ The Default Branches view shows similar information to the Branches view, but ap [1]: https://app.datadoghq.com/ci/test-services [2]: https://app.datadoghq.com/ci/test-services?view=branches [3]: /glossary/#test-service -[4]: /glossary/#wall-time -[5]: /glossary/#flaky-test -[6]: /glossary/#test-regression -[7]: https://app.datadoghq.com/ci/test-services?view=default-branches \ No newline at end of file +[4]: /glossary/#flaky-test +[5]: /glossary/#test-regression +[6]: https://app.datadoghq.com/ci/test-services?view=default-branches diff --git a/content/en/tests/troubleshooting/_index.md b/content/en/tests/troubleshooting/_index.md index cf131f8016eb8..44ecd33c7f261 100644 --- a/content/en/tests/troubleshooting/_index.md +++ b/content/en/tests/troubleshooting/_index.md @@ -85,28 +85,18 @@ If you can see test results data in the **Test Runs** tab, but not the **Tests** 4. If no CI provider environment variables are found, tests results are sent with no Git metadata. -## The tests wall time is empty +### The total test time is empty +If you cannot see the total test time, it is likely that test suite level visibility is not enabled. To confirm, check if your language supports test suite level visibility in [Supported features][15]. If test suite level visibility is supported, update your tracer to the latest version. -If you cannot see the tests wall time it is likely that the CI provider metadata is missing. To confirm this is the case, open a test execution in the [**Test Runs**][4] section, and check if the `ci.pipeline.id`, `ci.pipeline.name`, `ci.pipeline.number`, or `ci.job.url` tags are missing. If these tags are not populated, then nothing shows in the wall time column. +If you still don't see the total time after updating the tracer version, contact [Datadog support][2] for help. -1. Tracers use the environment variables set by the CI provider to collect this information. See [Running tests inside a container][7] for a list of environment variables that the tracer attempts to read for each supported CI provider. Make sure that the environment variables have the expected values set. -2. Check that you are running your tests in a supported CI provider. For a list of supported CI providers, see [Running tests inside a container][7]. Only these CI providers can extract the information to enrich the test metadata with CI information. -3. If you still don't see the wall time, contact [Datadog support][2] for help. +### The total test time is different than expected -## The tests wall time is not what is expected +#### How total time is calculated +The total time is defined as the sum of the maximum test session durations. -### How wall time is calculated -The wall time is defined as the time difference between the start time of the first test and the end time of the last test for the given pipeline. - -This is done using the following algorithm: - -1. Compute a hash based on CI information to group the tests. - 1. If the tests include `ci.job.url`, use this tag to calculate the hash. - 2. If the tests don't include `ci.job.url`, use `ci.pipeline.id` + `ci.pipeline.name` + `ci.pipeline.number` to calculate the hash. -2. The calculated wall time is associated to a given hash. **Note**: If there are multiple jobs that execute tests, the wall time is calculated for each job, and the maximum from all calculated wall times is shown. - -### Possible issues with wall time calculation -If you're using a library for testing time-dependent code, like [timecop][8] for Ruby or [FreezeGun][9] for Python, it is possible that test timestamps are wrong, and therefore calculated wall times. If this is the case, make sure that modifications to time are rolled back before finishing your tests. +1. The maximum duration of a test session grouped by the test session fingerprint is calculated. +2. The maximum test session durations are summed. ## The test status numbers are not what is expected @@ -145,8 +135,6 @@ The default branch is used to power some features of the products, namely: - Default branches list on the Tests page: This list only displays default branches. Setting the wrong default branch can result in missing or incorrect data in the default branches list. -- Wall time comparison for non-default branches: On the Tests page, in the Branches view, the **VS Default** column is calculated by comparing wall time for the current branch against wall time for the default branch. - - New flaky tests: Tests that are not currently classified as flaky in the default branch. If the default branch is not properly set, this could lead to a wrong number of detected new flaky tests. - Pipelines list: The pipelines list only displays default branches. Setting the wrong default branch can result in missing or incorrect data in the pipelines list. diff --git a/content/en/tracing/metrics/metrics_namespace.md b/content/en/tracing/metrics/metrics_namespace.md index f2d4fc15d83d0..561c582be3940 100644 --- a/content/en/tracing/metrics/metrics_namespace.md +++ b/content/en/tracing/metrics/metrics_namespace.md @@ -58,7 +58,7 @@ With the following definitions: `trace..hits` : **Prerequisite:** This metric exists for any APM service.
-**Description:** Represent the count of hits for a given span.
+**Description:** Represent the count of spans created with a specific name (for example, `redis.command`, `pylons.request`, `rails.request`, or `mysql.query`).
**Metric type:** [COUNT][5].
**Tags:** `env`, `service`, `version`, `resource`, `resource_name`, `http.status_code`, all host tags from the Datadog Host Agent, and [the second primary tag][4]. diff --git a/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/dotnet-core.md b/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/dotnet-core.md index 5ce648a8f3c21..7421cf74f228f 100644 --- a/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/dotnet-core.md +++ b/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/dotnet-core.md @@ -61,7 +61,7 @@ For a full list of Datadog's .NET Core library and processor architecture suppor
- Note: Datadog's automatic instrumentation relies on the .NET CLR Profiling API. This API allows only one subscriber (for example, Datadog's .NET Tracer with Profiler enabled). To ensure maximum visibility, run only one APM solution in your application environment. + Note: Datadog's automatic instrumentation relies on the .NET CLR Profiling API. This API allows only one subscriber (for example, Datadog APM). To ensure maximum visibility, run only one APM solution in your application environment.
diff --git a/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/dotnet-framework.md b/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/dotnet-framework.md index 9f5812b43764f..697a8f5e548e4 100644 --- a/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/dotnet-framework.md +++ b/content/en/tracing/trace_collection/automatic_instrumentation/dd_libraries/dotnet-framework.md @@ -65,7 +65,7 @@ For a full list of Datadog's .NET Framework library and processor architecture s
- Note: Datadog's automatic instrumentation relies on the .NET CLR Profiling API. This API allows only one subscriber (for example, Datadog's .NET Tracer with Profiler enabled). To ensure maximum visibility, run only one APM solution in your application environment. + Note: Datadog's automatic instrumentation relies on the .NET CLR Profiling API. This API allows only one subscriber (for example, Datadog APM). To ensure maximum visibility, run only one APM solution in your application environment.
### Installation @@ -296,4 +296,4 @@ dotnet.exe example.dll [4]: /tracing/trace_collection/library_config/dotnet-framework/ [5]: /tracing/trace_collection/custom_instrumentation/dotnet/ [11]: /tracing/trace_collection/library_injection_local/ -[12]: /tracing/trace_collection/automatic_instrumentation/?tab=datadoglibraries#install-and-configure-the-agent \ No newline at end of file +[12]: /tracing/trace_collection/automatic_instrumentation/?tab=datadoglibraries#install-and-configure-the-agent diff --git a/content/en/tracing/trace_collection/automatic_instrumentation/single-step-apm.md b/content/en/tracing/trace_collection/automatic_instrumentation/single-step-apm.md index a3d9ced124df5..2a3a4476ea3cf 100644 --- a/content/en/tracing/trace_collection/automatic_instrumentation/single-step-apm.md +++ b/content/en/tracing/trace_collection/automatic_instrumentation/single-step-apm.md @@ -28,6 +28,8 @@ The following examples show how it works for each deployment type. {{< tabs >}} {{% tab "Linux host or VM" %}} +
If you've previously used Single Step Instrumentation with Linux hosts, update to the latest version.
+ For an Ubuntu host: 1. Run the one-line installation command: diff --git a/content/en/tracing/trace_collection/automatic_instrumentation/ssi-0-13-1.md b/content/en/tracing/trace_collection/automatic_instrumentation/ssi-0-13-1.md new file mode 100644 index 0000000000000..9d0aa7096222f --- /dev/null +++ b/content/en/tracing/trace_collection/automatic_instrumentation/ssi-0-13-1.md @@ -0,0 +1,13 @@ +--- +title: Single Step Instrumentation v0.13.1 +kind: documentation +private: true +--- + +The Agent installer script now installs version 0.13.1 of Single Step Instrumentation. Previous versions of Single Step Instrumentation created a temporary file without restricting read permissions. This file contains the environment variables specified when a process is launched. Single Step Instrumentation deletes the file automatically, although in rare circumstances this may not occur. The permissions on this file have been updated. If you find any files in the `/tmp` directory of your Linux hosts whose names start with `dd_process_info_` , we recommend deleting them. + +Customers who use Single Step Instrumentation for Linux Hosts should update to the latest version of Single Step Instrumentation by running the Datadog installer script with the environment variable `DD_APM_INSTRUMENTATION_ENABLED=host` : + +```sh +DD_API_KEY= DD_SITE="" DD_APM_INSTRUMENTATION_ENABLED=host DD_ENV= bash -c "$(curl -L https://s3.amazonaws.com/dd-agent/scripts/install_script_agent7.sh)" +``` diff --git a/content/en/tracing/trace_collection/custom_instrumentation/dotnet/otel.md b/content/en/tracing/trace_collection/custom_instrumentation/dotnet/otel.md index b7ae958a7429e..d1ddc6b89dfe7 100644 --- a/content/en/tracing/trace_collection/custom_instrumentation/dotnet/otel.md +++ b/content/en/tracing/trace_collection/custom_instrumentation/dotnet/otel.md @@ -17,45 +17,116 @@ further_reading: text: 'Interoperability of OpenTelemetry API and Datadog instrumented traces' --- -{{% otel-custom-instrumentation %}} +{{% otel-custom-instrumentation-lang %}} -## Requirements and limitations +## Setup -- Datadog .NET tracing library `dd-trace-dotnet` version 2.21.0 or greater. +To configure OpenTelemetry to use the Datadog trace provider: -The following OpenTelemetry features implemented in the Datadog library as noted: +1. Add your desired manual OpenTelemetry instrumentation to your .NET code following the [OpenTelemetry .NET Manual Instrumentation documentation][5]. **Note**: Where those instructions indicate that your code should call the OpenTelemetry SDK, call the Datadog tracing library instead. -| Feature | Support notes | -|---------------------------------------|---------------------------------------------------------| -| OpenTelemetry Context propagation | [W3C Trace Context and Datadog header formats][9] are enabled by default. | -| [Span processors][2] | Unsupported | -| [Span Exporters][3] | Unsupported | -| Trace/span [ID generators][4] | ID generation is performed by the tracing library, with support for [128-bit trace IDs][12]. | +2. Install the Datadog .NET tracing library and enable the tracer for your [.NET Framework service][10] or your [.NET Core (and .NET 5+) service][11]. **Beta**: You can optionally do this with [Single Step APM Instrumentation][13]. +3. Set `DD_TRACE_OTEL_ENABLED` environment variable to `true`. -## Configuring OpenTelemetry to use the Datadog trace provider +4. Run your application. -1. Add your desired manual OpenTelemetry instrumentation to your .NET code following the [OpenTelemetry .NET Manual Instrumentation documentation][5]. **Important!** Where those instructions indicate that your code should call the OpenTelemetry SDK, call the Datadog tracing library instead. +Datadog combines these OpenTelemetry spans with other Datadog APM spans into a single trace of your application. It also supports [OpenTelemetry instrumentation libraries][8]. -2. Install the Datadog .NET tracing library and enable the tracer for your [.NET Framework service][10] or your [.NET Core (and .NET 5+) service][11]. **Beta:** You can optionally do this with [One-Step APM Instrumentation][13]. +## Creating custom spans -3. Set `DD_TRACE_OTEL_ENABLED` environment variable to `true`. +To manually create spans that start a new, independent trace: -4. Run your application. +```csharp +using OpenTelemetry.Resources; +using OpenTelemetry.Trace; + +// Start a new span +using (Activity? activity = Telemetry.ActivitySource.StartActivity("")) + { + activity?.SetTag("operation.name", "custom-operation"); + // Do something + } + +``` + +## Creating spans + +To create custom spans within an existing trace context: + +```csharp +using OpenTelemetry.Resources; +using OpenTelemetry.Trace; + +using (Activity? parentScope = Telemetry.ActivitySource.StartActivity("")) +{ + parentScope?.SetTag("operation.name", "manual.sortorders"); + using (Activity? childScope = Telemetry.ActivitySource.StartActivity("")) + { + // Nest using statements around the code to trace + childScope?.SetTag("operation.name", "manual.sortorders.child"); + SortOrders(); + } +} +``` + +## Adding span tags + +Add custom tags to your spans to provide additional context: + +{{< highlight csharp "hl_lines=15" >}} +using OpenTelemetry.Resources; +using OpenTelemetry.Trace; + +public class ShoppingCartController : Controller +{ + private IShoppingCartRepository _shoppingCartRepository; + + [HttpGet] + public IActionResult Index(int customerId) + { + Activity? activity = + Telemetry.ActivitySource.StartActivity("") + + // Add a tag to the span for use in the Datadog web UI + activity?.SetTag("customer.id", customerId.ToString()); + + var cart = _shoppingCartRepository.Get(customerId); + + return View(cart); + } +} +{{< /highlight >}} + +## Setting errors on spans + +Set error information on a span when an error occurs during its execution. + +```csharp +try +{ + // do work that can throw an exception +} +catch(Exception e) +{ + activity?.SetTag("error", 1); + activity?.SetTag("error.msg", exception.Message); + activity?.SetTag("error.stack", exception.ToString()); + activity?.SetTag("error.type", exception.GetType().ToString()); +} +``` + +## Propagating context with headers extraction and injection -Datadog combines these OpenTelemetry spans with other Datadog APM spans into a single trace of your application. It supports [OpenTelemetry instrumentation libraries][8] also. +You can configure the propagation of context for distributed traces by injecting and extracting headers. Read [Trace Context Propagation][14] for information. ## Further Reading {{< partial name="whats-next/whats-next.html" >}} -[2]: https://opentelemetry.io/docs/reference/specification/trace/sdk/#span-processor -[3]: https://opentelemetry.io/docs/reference/specification/trace/sdk/#span-exporter -[4]: https://opentelemetry.io/docs/reference/specification/trace/sdk/#id-generators [5]: https://opentelemetry.io/docs/instrumentation/net/manual/ [8]: https://opentelemetry.io/docs/instrumentation/net/libraries/ -[9]: /tracing/trace_collection/trace_context_propagation/dotnet/ [10]: /tracing/trace_collection/dd_libraries/dotnet-framework/#installation-and-getting-started [11]: /tracing/trace_collection/dd_libraries/dotnet-core/#installation-and-getting-started -[12]: /opentelemetry/guide/otel_api_tracing_interoperability/ -[13]: /tracing/trace_collection/single-step-apm/ \ No newline at end of file +[13]: /tracing/trace_collection/single-step-apm/ +[14]: /tracing/trace_collection/trace_context_propagation/dotnet/ \ No newline at end of file diff --git a/content/en/tracing/trace_collection/custom_instrumentation/go/otel.md b/content/en/tracing/trace_collection/custom_instrumentation/go/otel.md index a130edf96d075..7da08b14e1d3a 100644 --- a/content/en/tracing/trace_collection/custom_instrumentation/go/otel.md +++ b/content/en/tracing/trace_collection/custom_instrumentation/go/otel.md @@ -17,25 +17,30 @@ further_reading: text: 'Interoperability of OpenTelemetry API and Datadog instrumented traces' --- -{{% otel-custom-instrumentation %}} +{{% otel-custom-instrumentation-lang %}} -## Requirements and limitations +## Imports + +Import the following packages to setup the Datadog trace provider and use cases demonstrated below: -- Datadog Go tracing library `dd-trace-go` version 1.5.0 or greater. -- Go version 1.18 or greater. -- The Datadog OpenTelemetry API implementation is dependent on upstream [OpenTelemetry Go][6]. +```go +import ( + "context" + "log" + "os" -The following OpenTelemetry features are implemented in the Datadog library as noted: + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" + ddotel "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentelemetry" + ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" -| Feature | Support notes | -|---------------------------------------|------------------------------------| -| [OpenTelemetry Context propagation][1] | [W3C Trace Context and Datadog header formats][9] are enabled by default. | -| [Span processors][2] | Unsupported | -| [Span Exporters][3] | Unsupported | -| Trace/span [ID generators][4] | ID generation is performed by the tracing library, with support for [128-bit trace IDs][12]. | + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" +) +``` +## Setup -## Configuring OpenTelemetry to use the Datadog trace provider +To configure OpenTelemetry to use the Datadog trace provider: 1. Add your desired manual OpenTelemetry instrumentation to your Go code following the [OpenTelemetry Go Manual Instrumentation documentation][5]. **Important!** Where those instructions indicate that your code should call the OpenTelemetry SDK, call the Datadog tracing library instead. @@ -77,6 +82,155 @@ The following OpenTelemetry features are implemented in the Datadog library as n Datadog combines these OpenTelemetry spans with other Datadog APM spans into a single trace of your application. +## Adding span tags + +Add custom tags to your spans to attach additional metadata and context to your traces. + +```go +// Can only be done after the setup steps, such as initialising the tracer. + +// Start a span. +ctx, span := t.Start(ctx, "read.file") +// Set an attribute, or a tag in Datadog terminology, on a span. +span.SetAttributes(attribute.String(ext.ResourceName, "test.json")) +``` + +### Adding tags globally to all spans + +Add tags to all spans by configuring the tracer with the `WithGlobalTag` option: + +```go +// Here we can leverage the Datadog tracer options by passing them into the +// NewTracerProvider function. +provider := ddotel.NewTracerProvider( + ddtracer.WithGlobalTag("datacenter", "us-1"), + ddtracer.WithGlobalTag("env", "dev"), +) +defer provider.Shutdown() + +// Use it with the OpenTelemetry API to set the global TracerProvider. +otel.SetTracerProvider(provider) + +// Start the Tracer with the OpenTelemetry API. +t := otel.Tracer("") +``` + +### Setting errors on a span + +To set an error on a span, use the `setStatus` method like this: + +```go +// Start a span. +ctx, span := t.Start(context.Background(), "span_name") + +... +// Set an error on a span with 'span.SetAttributes'. +span.SetAttributes(attribute.String(ext.ErrorMsg, "error_message")) + +// ALternatively, it is possible to set an error on a span via end span options. +EndOptions(sp, tracer.WithError(errors.New("persisted_option"))) +sp.End() + +``` + +## Adding spans + +Unlike other Datadog tracing libraries, when tracing Go applications, Datadog recommends that you explicitly manage and pass the Go context of your spans. This approach ensures accurate span relationships and meaningful tracing. For more information, see the [Go context library documentation][16] or documentation for any third-party libraries integrated with your application. + +```go +// Can only be done after the setup steps. + +// Here we can leverage context.Context to pass in Datadog-specifc start span options, +// like 'ddtracer.Measured()' +ctx, span := t.Start( + ddotel.ContextWithStartOptions(context.Background(), ddtracer.Measured()), "span_name") + +span.End() +``` + +### Asynchronous traces + +When working with asynchronous or concurrent operations, it's important to ensure that traces are properly propagated and connected across different execution contexts. Here's an example of how to create and manage asynchronous traces: + +```go +func main() { + ctx, span := t.Start(context.Background(), "main_op") + defer span.End() + + go func() { + ctx, asyncSpan := t.Start(ctx, "asyncOp") + defer asyncSpan.End() + performOp() + }() +} +``` + +### Distributed tracing + +Create a distributed [trace][15] by manually propagating the tracing context: + +```go +package main + +import ( + "net/http" + + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" +) + +func handler(w http.ResponseWriter, r *http.Request) { + span, ctx := tracer.StartSpanFromContext(r.Context(), "post.process") + defer span.Finish() + + req, err := http.NewRequest("GET", "http://example.com", nil) + req = req.WithContext(ctx) + // Inject the span Context in the Request headers + err = tracer.Inject(span.Context(), tracer.HTTPHeadersCarrier(req.Header)) + if err != nil { + // Handle or log injection error + } + http.DefaultClient.Do(req) +} +``` + +Then, on the server side, to continue the trace, start a new [Span][2] from the extracted `Context`: + +```go +package main + +import ( + "net/http" + + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" +) + +func handler(w http.ResponseWriter, r *http.Request) { + // Extract the span Context and continue the trace in this service + sctx, err := tracer.Extract(tracer.HTTPHeadersCarrier(r.Header)) + if err != nil { + // Handle or log extraction error + } + + span := tracer.StartSpan("post.filter", tracer.ChildOf(sctx)) + defer span.Finish() +} +``` + +## Trace client and Agent configuration + +There are additional configurations to consider for both the tracing client and Datadog Agent: +- Context propagation with B3 Headers +- Exclude specific resources from sending traces to Datadog if you do not want to include these traces in metrics calculated, such as Health Checks. + + +### Propagating context with headers extraction and injection + +You can configure the propagation of context for distributed traces by injecting and extracting headers. Read [Trace Context Propagation][13] for information. + +### Resource filtering + +Traces can be excluded based on their resource name, to remove synthetic traffic such as health checks from reporting traces to Datadog. This and other security and fine-tuning configurations can be found on the [Security][14] page. + [1]: https://opentelemetry.io/docs/instrumentation/go/manual/#propagators-and-context [2]: https://opentelemetry.io/docs/reference/specification/trace/sdk/#span-processor @@ -85,4 +239,8 @@ Datadog combines these OpenTelemetry spans with other Datadog APM spans into a s [5]: https://opentelemetry.io/docs/instrumentation/go/manual/ [6]: https://opentelemetry.io/docs/instrumentation/go/ [9]: /tracing/trace_collection/trace_context_propagation/go/ -[12]: /opentelemetry/guide/otel_api_tracing_interoperability/ \ No newline at end of file +[12]: /opentelemetry/guide/otel_api_tracing_interoperability/ +[13]: /tracing/trace_collection/trace_context_propagation/go/ +[14]: /tracing/security +[15]: /tracing/glossary/#trace +[16]: https://pkg.go.dev/context \ No newline at end of file diff --git a/content/en/tracing/trace_collection/custom_instrumentation/java/otel.md b/content/en/tracing/trace_collection/custom_instrumentation/java/otel.md index d8f46a58dd1a4..06e54ee444afb 100644 --- a/content/en/tracing/trace_collection/custom_instrumentation/java/otel.md +++ b/content/en/tracing/trace_collection/custom_instrumentation/java/otel.md @@ -17,134 +17,190 @@ further_reading: text: 'Interoperability of OpenTelemetry API and Datadog instrumented traces' --- -{{% otel-custom-instrumentation %}} +{{% otel-custom-instrumentation-lang %}} -## Requirements and limitations +## Setup -- Datadog Java tracing library `dd-trace-java` version 1.10.0 or greater. +
OpenTelemetry is supported in Java after version 1.24.0.
-The following OpenTelemetry features are implemented in the Datadog library as noted: +To configure OpenTelemetry to use the Datadog trace provider: -| OpenTelemetry feature | Datadog support notes | -|---------------------------------------------------|---------------------------------------------------------------------------------------------| -| [Context propagation][1] | [Datadog distributed header format][2] is used instead by default. | -| [Span processors][3] | Unsupported | -| [Span Exporters][4] | Unsupported | -| Trace/span [ID generators][5] | ID generation is performed by the tracing library, with support for [128-bit trace IDs][6]. | -| [Metrics][7], [Baggage][8] and [Context][9] API | Unsupported | -| [Span links ][14] (Beta) | Requires `dd-trace-java` version 1.24.0 or greater. | +1. If you have not yet read the instructions for auto-instrumentation and setup, start with the [Java Setup Instructions][15]. -## Configuring OpenTelemetry to use the Datadog tracing library +1. Make sure you only depend on the OpenTelemetry API (and not the OpenTelemetry SDK). -
-If you have not yet read the instructions for auto-instrumentation and setup, start with the Java Setup Instructions. -
+1. Set the `dd.trace.otel.enabled` system property or the `DD_TRACE_OTEL_ENABLED` environment variable to `true`. -1. Add your desired manual OpenTelemetry instrumentation to your Java code following the [OpenTelemetry Java Manual Instrumentation documentation][10]. +## Adding span tags -1. Add the [Datadog tracing library to the JVM][11]. **Beta:** You can optionally do this with [One-Step APM Instrumentation][12]. +### Add custom span tags +Add custom tags to your spans corresponding to any dynamic value within your application code such as `customer.id`. -1. Make sure you only depend on the OpenTelemetry API (and not the OpenTelemetry SDK). +```java +import io.opentelemetry.api.trace.Span; -{{< tabs >}} -{{% tab "Gradle" %}} +public void doSomething() { + Span span = Span.current(); + span.setAttribute("user-name", "Some User"); +} +``` -{{< code-block lang="groovy" >}} -// OpenTelemetry API -implementation "io.opentelemetry:opentelemetry-api:${opentelemetryVersion}" -{{< /code-block >}} +### Adding tags globally to all spans -{{% /tab %}} -{{% tab "Maven" %}} +The `dd.tags` property allows you to set tags across all generated spans for an application. This is useful for grouping stats for your applications, data centers, or any other tags you would like to see in Datadog. -{{< code-block lang="xml" >}} - - - io.opentelemetry - opentelemetry-api - ${io.opentelemetry.version} - -{{< /code-block >}} +```shell +java -javaagent:.jar \ + -Ddd.tags=datacenter:njc,: \ + -jar .jar -{{% /tab %}} -{{< /tabs >}} +``` -1. Set the `dd.trace.otel.enabled` system property or the `DD_TRACE_OTEL_ENABLED` environment variable to `true`. +### Setting errors on a root span from a child span + +To set an error on a root span from a child span, you can use the `setStatus` method on the current span like this: -Datadog combines these OpenTelemetry spans with other Datadog APM spans into a single trace of your application. +```java +import static io.opentelemetry.api.trace.StatusCode.ERROR; +import io.opentelemetry.api.trace.Span; +public void doSomething() { + Span span = Span.current(); + span.setStatus(ERROR, "Some error details..."); +} +``` -## Common use cases +### Setting tags and errors on a root span from a child span -### Add custom attributes to the current or local root span +This example demonstrates how to set tags and errors on a root span from a child span: ```java -// Add attributes to the current span -Span currentSpan = Span.current(); -currentSpan.setAttributes("some-key", "some-value"); - -// Add attributes to the local root span -ContextKey localRootSpanKey = ContextKey.named("opentelemetry-traces-local-root-span"); -Span rootSpan = Context.current().get(localRootSpanKey); -rootSpan.setAttributes("some-key", "some-value"); +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Context; +import io.opentelemetry.context.ContextKey; +import io.opentelemetry.context.Scope; +import io.opentelemetry.exporter.otlp.trace.OtlpGrpcSpanExporter; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; +import io.opentelemetry.semconv.ResourceAttributes; +import java.util.concurrent.TimeUnit; + +public class Example { + + private final static ContextKey CONTEXT_KEY = + ContextKey.named("opentelemetry-traces-local-root-span"); + + public void begin() { + tracer = GlobalOpenTelemetry.getTracer("my-scope", "0.1.0"); + Span parentSpan = tracer.spanBuilder("begin").startSpan(); + try (Scope scope = parentSpan.makeCurrent()) { + createChildSpan(); + } finally { + parentSpan.end(); + } + } + + private void createChildSpan() { + Span childSpan = tracer.spanBuilder("child-span").startSpan(); + try { + Span rootSpan = Context.current().get(CONTEXT_KEY); + if (null != rootSpan) { + rootSpan.setAttribute("my-attribute", "my-attribute-value"); + rootSpan.setStatus(StatusCode.ERROR, "Some error details..."); + } + } finally { + childSpan.end(); + } + } + +} ``` -**Note:** If there isn't a current or local root span, the returned span is invalid, not `null`, and attributes are not set. +## Adding spans + +If you aren't using a [supported framework instrumentation][17], or you would like additional depth in your application's [traces][16], you may want to add custom instrumentation to your code for complete flame graphs or to measure execution times for pieces of code. -### Add custom spans using annotations +If modifying application code is not possible, use the environment variable dd.trace.methods to detail these methods. -First add a dependency to the `opentelemetry-instrumentation-annotations` library. +If you have existing @Trace or similar annotations, or prefer to use annotations to complete any incomplete traces within Datadog, use Trace Annotations. -{{< tabs >}} -{{% tab "Gradle" %}} +Traces may also be created using the OpenTelemetry `@WithSpan` annotation as described in [Trace annotations](#trace-annotations). -{{< code-block lang="groovy" >}} - // OpenTelemetry instrumentation annotations - implementation "io.opentelemetry.instrumentation:opentelemetry-instrumentation-annotations:${opentelemetryVersion}" -{{< /code-block >}} +### Trace annotations -{{% /tab %}} -{{% tab "Maven" %}} +Add `@WithSpan` to methods to have them be traced when running OpenTelemetry and the `dd-java-agent.jar`. If the Agent is not attached, this annotation has no effect on your application. +OpenTelemetry's `@WithSpan` annotation is provided by the `opentelemetry-instrumentation-annotations` dependency. + +```java +import io.opentelemetry.instrumentation.annotations.WithSpan; -{{< code-block lang="xml" >}} - - - io.opentelemetry.instrumentation - opentelemetry-instrumentation-annotations - ${io.opentelemetry.version} - -{{< /code-block >}} +public class SessionManager { + + @WithSpan + public static void saveSession() { + // your method implementation here + } +} +``` -{{% /tab %}} -{{< /tabs >}} +### Manually creating a new span -Then annotate your methods with the `@WithSpan` annotation to create a new span each call. The parameters of the call can be annotated with the `@SpanAttribute` annotation to capture the arguments as span attributes: +To manually create new spans within the current trace context: ```java -@WithSpan -public void myMethod(@SpanAttribute("parameter1") String parameter1, - @SpanAttribute("parameter2") long parameter2) { - <...> +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Scope; +import io.opentelemetry.exporter.otlp.trace.OtlpGrpcSpanExporter; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; +import io.opentelemetry.semconv.ResourceAttributes; +import java.util.concurrent.TimeUnit; + +public class Example { + + public void doSomething() { + Tracer tracer = GlobalOpenTelemetry.getTracer("my-scope", "0.1.0"); + Span span = tracer.spanBuilder("my-resource").startSpan(); + try (Scope scope = span.makeCurrent()) { + // do some work + } catch (Throwable t) { + span.recordException(t); + throw t; + } finally { + span.end(); + } + } + } ``` -**Note:** Using the `@AddingSpanAttributes` method annotation instead of `@WithSpan` allows capturing method arguments using the `@SpanAttribute` annotation without creating a new span. The current span, if it exists, is going to be updated with the captured arguments. +## Trace client and Agent configuration + +Both the tracing client and Datadog Agent offer additional configuration options for context propagation. You can also exclude specific resources from sending traces to Datadog if you don't want those traces to be included in calculated metrics, such as traces related to health checks. + +### Propagating context with headers extraction and injection + +You can configure the propagation of context for distributed traces by injecting and extracting headers. Read [Trace Context Propagation][18] for information. + +### Resource filtering + +Traces can be excluded based on their resource name, to remove synthetic traffic such as health checks from reporting traces to Datadog. This and other security and fine-tuning configurations can be found on the [Security][19] page or in [Ignoring Unwanted Resources][20]. ## Further Reading {{< partial name="whats-next/whats-next.html" >}} -[1]: https://opentelemetry.io/docs/instrumentation/java/manual/#context-propagation -[2]: /tracing/trace_collection/trace_context_propagation/java/ -[3]: https://opentelemetry.io/docs/reference/specification/trace/sdk/#span-processor -[4]: https://opentelemetry.io/docs/reference/specification/trace/sdk/#span-exporter -[5]: https://opentelemetry.io/docs/reference/specification/trace/sdk/#id-generators -[6]: /opentelemetry/guide/otel_api_tracing_interoperability/ -[7]: https://opentelemetry.io/docs/specs/otel/metrics/api/ -[8]: https://opentelemetry.io/docs/specs/otel/baggage/api/ -[9]: https://opentelemetry.io/docs/specs/otel/context/ -[10]: https://opentelemetry.io/docs/instrumentation/java/manual/ -[11]: /tracing/trace_collection/dd_libraries/java/?tab=springboot#add-the-java-tracer-to-the-jvm -[12]: /tracing/trace_collection/single-step-apm/ -[13]: /tracing/trace_collection/single-step-apm/ -[14]: https://opentelemetry.io/docs/instrumentation/java/manual/#create-spans-with-links +[15]: /tracing/setup/java/ +[16]: /tracing/glossary/#trace +[17]: /tracing/trace_collection/automatic_instrumentation/dd_libraries/java/?tab=wget#compatibility +[18]: /tracing/trace_collection/trace_context_propagation/java/ +[19]: /tracing/security +[20]: /tracing/guide/ignoring_apm_resources/ diff --git a/content/en/tracing/trace_collection/custom_instrumentation/nodejs/otel.md b/content/en/tracing/trace_collection/custom_instrumentation/nodejs/otel.md index 47cf6b84ee5cd..1cf9989778279 100644 --- a/content/en/tracing/trace_collection/custom_instrumentation/nodejs/otel.md +++ b/content/en/tracing/trace_collection/custom_instrumentation/nodejs/otel.md @@ -17,27 +17,14 @@ further_reading: text: 'Interoperability of OpenTelemetry API and Datadog instrumented traces' --- -{{% otel-custom-instrumentation %}} +{{% otel-custom-instrumentation-lang %}} -## Requirements and limitations +## Setup - -- Datadog Node.js tracing library `dd-trace` version 4.2.0+, 3.23.0+, or v2.36.0+. +To configure OpenTelemetry to use the Datadog trace provider: -The following OpenTelemetry features implemented in the Datadog library as noted: - -| Feature | Support notes | -|---------------------------------------|--------------------------------------| -| [OpenTelemetry Context propagation][1] | [Datadog and W3C Trace Context header formats][9] are enabled by default. | -| [Span processors][2] | Unsupported | -| [Span Exporters][3] | Unsupported | -| Trace/span [ID generators][4] | ID generation is performed by the tracing library, with support for [128-bit trace IDs][12]. | - - -## Configuring OpenTelemetry to use the Datadog tracing library - -1. Add your desired manual OpenTelemetry instrumentation to your Node.js code following the [OpenTelemetry Node.js Manual Instrumentation documentation][5]. **Important!** Where those instructions indicate that your code should call the OpenTelemetry SDK, call the Datadog tracing library instead. +1. Add your desired manual OpenTelemetry instrumentation to your Node.js code following the [OpenTelemetry Node.js Manual Instrumentation documentation][1]. **Note**: Where those instructions indicate that your code should call the OpenTelemetry SDK, call the Datadog tracing library instead. 2. Add the `dd-trace` module to your package.json: @@ -66,22 +53,87 @@ The following OpenTelemetry features implemented in the Datadog library as noted provider.register() ``` -6. Run your application. +6. Import the OpenTelemetry API and create an OpenTelemetry tracer instance: + + ```js + const ot = require('@opentelemetry/api') + const otelTracer = ot.trace.getTracer( + 'my-service' + ) + ``` + +7. Run your application. + +Datadog combines these OpenTelemetry spans with other Datadog APM spans into a single trace of your application. It also supports [integration instrumentation][2] and [OpenTelemetry automatic instrumentation][3]. + +## Adding span tags + +Add custom attributes to your spans to provide additional context: + +{{< highlight js "hl_lines=6" >}} +function processData(i, param1, param2) { + return tracer.startActiveSpan(`processData:${i}`, (span) => { + const result = someOperation(param1, param2); + + // Add an attribute to the span + span.setAttribute('app.processedData', result.toString()); + + span.end(); + return result; + }); +} +{{< /highlight >}} + +## Creating spans + +To create a new span and properly close it, use the `startActiveSpan` method: + +{{< highlight js "hl_lines=3 9" >}} +function performTask(iterations, param1, param2) { + // Create a span. A span must be closed. + return tracer.startActiveSpan('performTask', (span) => { + const results = []; + for (let i = 0; i < iterations; i++) { + results.push(processData(i, param1, param2)); + } + // Be sure to end the span! + span.end(); + return results; + }); +} +{{< /highlight >}} + +## Filtering requests + +In some cases, you may want to exclude certain requests from being instrumented, such as health checks or synthetic traffic. You can use the `blocklist` or `allowlist` option on the `http` plugin to ignore these requests. + +To exclude requests at the application level, add the following after initializing the tracer: + +```javascript +// at the top of the entry point right after tracer.init() +tracer.use('http', { + blocklist: ['/health', '/ping'] +}) +``` + +You can also split the configuration between client and server if needed: + +```javascript +tracer.use('http', { + server: { + blocklist: ['/ping'] + } +}) +``` -Datadog combines these OpenTelemetry spans with other Datadog APM spans into a single trace of your application. It supports [integration instrumentation][7] and [OpenTelemetry Automatic instrumentation][8] also. +Additionally, you can exclude traces based on their resource name to prevent the Agent from sending them to Datadog. For more information on security and fine-tuning Agent configurations, read the [Security][4] or [Ignoring Unwanted Resources][5]. ## Further Reading {{< partial name="whats-next/whats-next.html" >}} -[1]: https://opentelemetry.io/docs/instrumentation/js/propagation/ -[2]: https://opentelemetry.io/docs/reference/specification/trace/sdk/#span-processor -[3]: https://opentelemetry.io/docs/reference/specification/trace/sdk/#span-exporter -[4]: https://opentelemetry.io/docs/reference/specification/trace/sdk/#id-generators -[5]: https://opentelemetry.io/docs/instrumentation/js/instrumentation/ -[6]: /tracing/trace_collection/dd_libraries/nodejs/#additional-configuration -[7]: /tracing/trace_collection/dd_libraries/nodejs#integration-instrumentation -[8]: https://opentelemetry.io/docs/instrumentation/js/automatic/ -[9]: /tracing/trace_collection/trace_context_propagation/nodejs/ -[10]: /tracing/trace_collection/dd_libraries/nodejs/#custom-logging -[12]: /opentelemetry/guide/otel_api_tracing_interoperability/ \ No newline at end of file +[1]: https://opentelemetry.io/docs/instrumentation/js/instrumentation/ +[2]: /tracing/trace_collection/dd_libraries/nodejs#integration-instrumentation +[3]: https://opentelemetry.io/docs/instrumentation/js/automatic/ +[4]: /tracing/security +[5]: /tracing/guide/ignoring_apm_resources/ diff --git a/content/en/tracing/trace_collection/custom_instrumentation/php/otel.md b/content/en/tracing/trace_collection/custom_instrumentation/php/otel.md index 8724aa1d4c36f..51c74854df58d 100644 --- a/content/en/tracing/trace_collection/custom_instrumentation/php/otel.md +++ b/content/en/tracing/trace_collection/custom_instrumentation/php/otel.md @@ -17,41 +17,103 @@ further_reading: text: 'Interoperability of OpenTelemetry API and Datadog instrumented traces' --- -{{% otel-custom-instrumentation %}} +{{% otel-custom-instrumentation-lang %}} -## Requirements and limitations +## Setup -- Datadog PHP tracing library `dd-trace-php` version 0.94.0 or greater. +To configure OpenTelemetry to use the Datadog trace provider: -The following OpenTelemetry features implemented in the Datadog library as noted: +1. Install [OpenTelemetry API packages][13]. + ```php + composer require open-telemetry/sdk + ``` +2. Add your desired manual OpenTelemetry instrumentation to your PHP code following the [OpenTelemetry PHP Manual Instrumentation documentation][5]. -| Feature | Support notes | -|----------------------------------------|---------------------------------------------------------| -| [OpenTelemetry Context propagation][1] | [W3C Trace Context and Datadog header formats][9] are enabled by default. | -| [Span limits][2] | Unsupported | -| [Metrics API][7] | Unsupported | -| Trace/span [ID generators][3] | ID generation is performed by the tracing library, with support for [128-bit trace IDs][12]. | +3. Install the [Datadog PHP tracing library][6]. -## Configuring OpenTelemetry to use the Datadog tracing library +4. Set `DD_TRACE_OTEL_ENABLED` to `true`. -1. Add your desired manual OpenTelemetry instrumentation to your PHP code following the [OpenTelemetry PHP Manual Instrumentation documentation][5]. **Important!** Where those instructions indicate that your code should call the OpenTelemetry SDK, call the Datadog tracing library instead. +Datadog combines these OpenTelemetry spans with other Datadog APM spans into a single trace of your application. -2. Install the [Datadog PHP tracing library][6]. +## Adding span tags -3. Set `DD_TRACE_OTEL_ENABLED` to `true`. +You can add attributes at the exact moment as you are starting the span: -Datadog combines these OpenTelemetry spans with other Datadog APM spans into a single trace of your application. +```php +$span = $tracer->spanBuilder('mySpan') + ->setAttribute('key', 'value') + ->startSpan(); +``` + +Or while the span is active: + +```php +$activeSpan = OpenTelemetry\API\Trace\Span::getCurrent(); + +$activeSpan->setAttribute('key', 'value'); +``` + + +## Setting errors on a span + +Exception information is captured and attached to a span if one is active when the exception is raised. + +```php +// Create a span +$span = $tracer->spanBuilder('mySpan')->startSpan(); + +throw new \Exception('Oops!'); + +// 'mySpan' will be flagged as erroneous and have +// the stack trace and exception message attached as tags +``` + +Flagging a trace as erroneous can also be done manually: + +```php +use OpenTelemetry\API\Trace\Span; +use OpenTelemetry\Context\Context; + +// Can only be done after the setup steps, such as initializing the tracer. + +try { + throw new \Exception('Oops!'); +} catch (\Exception $e) { + $rootSpan = Span::fromContext(Context::getRoot()); + $rootSpan->recordException($e); +} +``` +## Adding spans + +To add a span: + +```php +// Get a tracer or use an existing one +$tracerProvider = \OpenTelemetry\API\Globals::tracerProvider(); +$tracer = $tracerProvider->getTracer('datadog') + +// Create a span +$span = $tracer->spanBuilder('mySpan')->startSpan(); + +// ... do stuff + +// Close the span +$span->end(); + +``` + +## Accessing active spans + +To access the currently active span: + +```php +$span = OpenTelemetry\API\Trace\Span::getCurrent(); +``` ## Further Reading {{< partial name="whats-next/whats-next.html" >}} -[1]: https://opentelemetry.io/docs/instrumentation/php/propagation/ -[2]: https://opentelemetry.io/docs/specs/otel/trace/sdk/#span-limits -[3]: https://opentelemetry.io/docs/reference/specification/trace/sdk/#id-generators -[4]: /tracing/trace_collection/trace_context_propagation/php/ [5]: https://opentelemetry.io/docs/instrumentation/php/manual/ [6]: /tracing/trace_collection/dd_libraries/php#getting-started -[7]: https://opentelemetry.io/docs/specs/otel/metrics/ -[9]: /tracing/trace_collection/trace_context_propagation/php/ -[12]: /opentelemetry/guide/otel_api_tracing_interoperability/ +[13]: https://opentelemetry.io/docs/languages/php/instrumentation/#instrumentation-setup diff --git a/content/en/tracing/trace_collection/custom_instrumentation/python/otel.md b/content/en/tracing/trace_collection/custom_instrumentation/python/otel.md index 4f8a7740a5799..8317b9ba6a321 100644 --- a/content/en/tracing/trace_collection/custom_instrumentation/python/otel.md +++ b/content/en/tracing/trace_collection/custom_instrumentation/python/otel.md @@ -17,50 +17,60 @@ further_reading: text: 'Interoperability of OpenTelemetry API and Datadog instrumented traces' --- -{{% otel-custom-instrumentation %}} +{{% otel-custom-instrumentation-lang %}} -## Requirements and limitations -- Datadog python tracing library `dd-trace-py` version 1.12.0 or greater. -- Python version 3.7 or greater. +## Setup -The following OpenTelemetry features implemented in the Datadog library as noted: +To configure OpenTelemetry to use the Datadog trace provider: -| Feature | Support notes | -|---------------------------------------|---------------------------------------| -| [OpenTelemetry Context propagation][1] | [W3C Trace Context and Datadog header formats][9] are enabled by default. | -| [Span processors][2] | Unsupported | -| [Span Exporters][3] | Unsupported | -| Trace/span [ID generators][4] | ID generation is performed by the tracing library, with support for [128-bit trace IDs][12]. | +1. If you have not yet read the instructions for auto-instrumentation and setup, start with the [Python Setup Instructions][1]. +1. Set `DD_TRACE_OTEL_ENABLED` environment variable to `true`. -## Configuring OpenTelemetry to use the Datadog Tracer Provider +### Creating custom spans -1. Add your desired manual OpenTelemetry instrumentation to your Python code following the [OpenTelemetry Python Manual Instrumentation documentation][5]. **Important!** Where those instructions indicate that your code should call the OpenTelemetry SDK, call the Datadog tracing library instead. +To create custom spans within an existing trace context: -2. Install the python tracer: +{{< highlight python "hl_lines=6" >}} +from opentelemetry import trace - ``` - pip install "ddtrace>=1.12.0" - ``` +tracer = trace.get_tracer(__name__) -3. Set `DD_TRACE_OTEL_ENABLED` environment variable to `True`. +def do_work(): + with tracer.start_as_current_span("operation_name") as span: + # Perform the work that you want to track with the span + print("Doing work...") + # When the 'with' block ends, the span is automatically closed +{{< /highlight >}} -4. Run your application with `ddtrace-run`. This automatically configures the `Datadog Tracer Provider`. If your application cannot use `ddtrace-run` read [the `dd-trace-py` OpenTelemetry API docs][11] for additional configurations. +## Accessing active spans -Datadog combines these OpenTelemetry spans with other Datadog APM spans into a single trace of your application. It supports [OpenTelemetry Automatic instrumentation][8] also. +To access the currently active span, use the `get_current_span()` function: -## Further Reading +```python +from opentelemetry import trace + +current_span = trace.get_current_span() +# enrich 'current_span' with information +``` + +## Adding span tags + +Add attributes to a span to provide additional context or metadata. + +Here's an example of how to add attributes to the current span: + +```python +from opentelemetry import trace + +current_span = trace.get_current_span() + +current_span.set_attribute("attribute_key1", 1) +``` + +## Further reading {{< partial name="whats-next/whats-next.html" >}} -[1]: https://opentelemetry.io/docs/instrumentation/python/manual/#change-the-default-propagation-format -[2]: https://opentelemetry.io/docs/reference/specification/trace/sdk/#span-processor -[3]: https://opentelemetry.io/docs/reference/specification/trace/sdk/#span-exporter -[4]: https://opentelemetry.io/docs/reference/specification/trace/sdk/#id-generators -[5]: https://opentelemetry.io/docs/instrumentation/python/manual/ -[8]: https://opentelemetry.io/docs/instrumentation/python/automatic/ -[9]: /tracing/trace_collection/trace_context_propagation/python/ -[10]: /tracing/trace_collection/dd_libraries/python/#custom-logging -[11]: https://ddtrace.readthedocs.io/en/stable/api.html#opentelemetry-api -[12]: /opentelemetry/guide/otel_api_tracing_interoperability/ \ No newline at end of file +[1]: /tracing/setup/python/ diff --git a/content/es/getting_started/continuous_testing/_index.md b/content/es/getting_started/continuous_testing/_index.md index 493a443b75f2a..27d0c653d7da2 100644 --- a/content/es/getting_started/continuous_testing/_index.md +++ b/content/es/getting_started/continuous_testing/_index.md @@ -94,7 +94,7 @@ Por separado, puedes utilizar la [Integración Datadog Synthetics frente a códi * Ejecuta tests de API HTTP y tests de navegador y ve tus resultados dentro de VS Code. * Prueba solo lo importante ejecutando los tests pertinentes al mismo tiempo. -{{< img src="developers/ide_integrations/vscode/vscode-extension-demo.png" alt="vscode-extension-demo" style="width:100%;" >}} +{{< img src="developers/ide_plugins/vscode/vscode-extension-demo.png" alt="vscode-extension-demo" style="width:100%;" >}} ### Ejecución de pruebas en VS Code @@ -158,12 +158,12 @@ Consulta la [Documentación sobre paralelización][18] para obtener más detalle [4]: /es/mobile_app_testing/ [5]: /es/synthetics/explorer?track=synbatch [6]: /es/continuous_testing/cicd_integrations/configuration/?tab=npm -[7]: /es/developers/ide_integrations/ +[7]: /es/developers/ide_plugins/ [8]: https://datadoghq.com [9]: /es/getting_started/synthetics/browser_test/#create-a-browser-test [10]: /es/continuous_testing/cicd_integrations/configuration/?tab=npm#install-the-package [11]: /es/continuous_testing/cicd_integrations/configuration/?tab=npm#reporters -[12]: /es/developers/ide_integrations/vscode/ +[12]: /es/developers/ide_plugins/vscode/ [13]: /es/getting_started/synthetics/private_location/ [14]: /es/continuous_testing/ [15]: /es/continuous_testing/explorer/?tab=testruns#create-a-search-query diff --git a/content/ja/code_analysis/_index.md b/content/ja/code_analysis/_index.md index 7d3acbcbb7f63..324b526600f65 100644 --- a/content/ja/code_analysis/_index.md +++ b/content/ja/code_analysis/_index.md @@ -110,8 +110,8 @@ Datadog UI の Static Analysis 結果の一部として問題のあるコード [1]: /ja/code_analysis/static_analysis [2]: /ja/code_analysis/software_composition_analysis -[3]: /ja/developers/ide_integrations/vscode/#static-analysis -[4]: /ja/developers/ide_integrations/idea/#static-analysis +[3]: /ja/developers/ide_plugins/vscode/#static-analysis +[4]: /ja/developers/ide_plugins/idea/#static-analysis [5]: /ja/code_analysis/github_pull_requests/ [6]: /ja/code_analysis/static_analysis_rules [7]: /ja/integrations/github/#link-a-repository-in-your-organization-or-personal-account diff --git a/content/ko/code_analysis/_index.md b/content/ko/code_analysis/_index.md index 0d0f51d00099f..e2f90621c49cc 100644 --- a/content/ko/code_analysis/_index.md +++ b/content/ko/code_analysis/_index.md @@ -111,8 +111,8 @@ title: 코드 분석 [1]: /ko/code_analysis/static_analysis [2]: /ko/code_analysis/software_composition_analysis -[3]: /ko/developers/ide_integrations/vscode/#static-analysis -[4]: /ko/developers/ide_integrations/idea/#static-analysis +[3]: /ko/developers/ide_plugins/vscode/#static-analysis +[4]: /ko/developers/ide_plugins/idea/#static-analysis [5]: /ko/code_analysis/github_pull_requests/ [6]: /ko/code_analysis/static_analysis_rules [7]: /ko/integrations/github/#link-a-repository-in-your-organization-or-personal-account diff --git a/content/ko/code_analysis/static_analysis/_index.md b/content/ko/code_analysis/static_analysis/_index.md index 044495872a2ba..2e84ee96b8fc3 100644 --- a/content/ko/code_analysis/static_analysis/_index.md +++ b/content/ko/code_analysis/static_analysis/_index.md @@ -59,8 +59,8 @@ title: 정적 분석 ### IDE {{< whatsnext desc="정적 분석을 사용하면 IDE(통합 개발 환경)에서 파일 편집 시 코드 취약점을 식별할 수 있습니다. 다음 통합에 대한 자세한 내용을 확인하려면 설명서를 참조하세요:">}} - {{< nextlink href="developers/ide_integrations/idea/" >}}Datadog IntelliJ IDEA 플러그인{{< /nextlink >}} - {{< nextlink href="developers/ide_integrations/vscode/" >}}Datadog Visual Studio 코드 확장{{< /nextlink >}} + {{< nextlink href="developers/ide_plugins/idea/" >}}Datadog IntelliJ IDEA 플러그인{{< /nextlink >}} + {{< nextlink href="developers/ide_plugins/vscode/" >}}Datadog Visual Studio 코드 확장{{< /nextlink >}} {{< /whatsnext >}} ## 검색 및 필터 결과 diff --git a/content/ko/getting_started/continuous_testing/_index.md b/content/ko/getting_started/continuous_testing/_index.md index 4b3eff4abf8c8..2ceb94ac9db45 100644 --- a/content/ko/getting_started/continuous_testing/_index.md +++ b/content/ko/getting_started/continuous_testing/_index.md @@ -91,7 +91,7 @@ title: 지속적 테스팅 시작하기 * HTTP API 테스트와 브라우저 테스트를 실행하고 VS 코드 내에서 결과를 확인할 수 있습니다. * 관련 테스트를 동시에 실행하여 중요한 것만 테스트할 수 있습니다. -{{< img src="developers/ide_integrations/vscode/vscode-extension-demo.png" alt="vscode-extension-demo" style="width:100%;" >}} +{{< img src="developers/ide_plugins/vscode/vscode-extension-demo.png" alt="vscode-extension-demo" style="width:100%;" >}} ### VS 코드에서 테스트 실행 @@ -155,12 +155,12 @@ $$\text"병렬화 예측" = {\text"CI 배치 당 24개 테스트"* \text"소요 [4]: /ko/mobile_app_testing/ [5]: /ko/synthetics/explorer?track=synbatch [6]: /ko/continuous_testing/cicd_integrations/configuration/?tab=npm -[7]: /ko/developers/ide_integrations/ +[7]: /ko/developers/ide_plugins/ [8]: https://datadoghq.com [9]: /ko/getting_started/synthetics/browser_test/#create-a-browser-test [10]: /ko/continuous_testing/cicd_integrations/configuration/?tab=npm#install-the-package [11]: /ko/continuous_testing/cicd_integrations/configuration/?tab=npm#reporters -[12]: /ko/developers/ide_integrations/vscode/ +[12]: /ko/developers/ide_plugins/vscode/ [13]: /ko/getting_started/synthetics/private_location/ [14]: /ko/continuous_testing/testing_tunnel/ [15]: /ko/continuous_testing/explorer/?tab=testruns#create-a-search-query diff --git a/data/api/v1/full_spec.yaml b/data/api/v1/full_spec.yaml index 0ceab739659c9..94f2a2ed53bac 100644 --- a/data/api/v1/full_spec.yaml +++ b/data/api/v1/full_spec.yaml @@ -21651,6 +21651,7 @@ components: data_scanner_read: View Data Scanner configurations. data_scanner_write: Edit Data Scanner configurations. events_read: Read Events data. + hosts_read: List hosts and their attributes. incident_notification_settings_write: Configure Incidents Notification settings. incident_read: View incidents in Datadog. @@ -24462,7 +24463,8 @@ paths: security: - apiKeyAuth: [] appKeyAuth: [] - - AuthZ: [] + - AuthZ: + - hosts_read summary: Get all hosts for your organization tags: - Hosts @@ -24510,7 +24512,8 @@ paths: security: - apiKeyAuth: [] appKeyAuth: [] - - AuthZ: [] + - AuthZ: + - hosts_read summary: Get the total number of active hosts tags: - Hosts diff --git a/data/api/v1/full_spec_deref.json b/data/api/v1/full_spec_deref.json index 6b93f8d156f4c..1e49ed49391de 100644 --- a/data/api/v1/full_spec_deref.json +++ b/data/api/v1/full_spec_deref.json @@ -603592,6 +603592,7 @@ "data_scanner_read": "View Data Scanner configurations.", "data_scanner_write": "Edit Data Scanner configurations.", "events_read": "Read Events data.", + "hosts_read": "List hosts and their attributes.", "incident_notification_settings_write": "Configure Incidents Notification settings.", "incident_read": "View incidents in Datadog.", "incident_settings_write": "Configure Incident Settings.", @@ -860193,7 +860194,9 @@ "appKeyAuth": [] }, { - "AuthZ": [] + "AuthZ": [ + "hosts_read" + ] } ], "summary": "Get all hosts for your organization", @@ -860328,7 +860331,9 @@ "appKeyAuth": [] }, { - "AuthZ": [] + "AuthZ": [ + "hosts_read" + ] } ], "summary": "Get the total number of active hosts", diff --git a/data/api/v2/full_spec.yaml b/data/api/v2/full_spec.yaml index e8e1c73411658..214f62ee1d84f 100644 --- a/data/api/v2/full_spec.yaml +++ b/data/api/v2/full_spec.yaml @@ -22951,6 +22951,7 @@ components: data_scanner_read: View Data Scanner configurations. data_scanner_write: Edit Data Scanner configurations. events_read: Read Events data. + hosts_read: List hosts and their attributes. incident_notification_settings_write: Configure Incidents Notification settings. incident_read: View incidents in Datadog. diff --git a/data/api/v2/full_spec_deref.json b/data/api/v2/full_spec_deref.json index 43865522dc520..bde63139f5202 100644 --- a/data/api/v2/full_spec_deref.json +++ b/data/api/v2/full_spec_deref.json @@ -137090,6 +137090,7 @@ "data_scanner_read": "View Data Scanner configurations.", "data_scanner_write": "Edit Data Scanner configurations.", "events_read": "Read Events data.", + "hosts_read": "List hosts and their attributes.", "incident_notification_settings_write": "Configure Incidents Notification settings.", "incident_read": "View incidents in Datadog.", "incident_settings_write": "Configure Incident Settings.", diff --git a/data/cloudcraft.json b/data/cloudcraft.json index 12f82eb42ea66..a33373b0b75ca 100644 --- a/data/cloudcraft.json +++ b/data/cloudcraft.json @@ -1751,6 +1751,167 @@ } } }, + "/team": { + "get": { + "tags": [ + "Teams" + ], + "summary": "List teams", + "description": "List all teams linked to your Cloudcraft account.\n\nThe response is an array of teams. Each entry includes information about the team, such as the team ID, name, and members.", + "operationId": "listMyTeams", + "responses": { + "200": { + "description": "OK", + "headers": { + "Cache-Control": { + "schema": { + "type": "string", + "example": "no-cache, no-store" + } + }, + "Connection": { + "schema": { + "type": "string", + "example": "keep-alive" + } + }, + "Content-Length": { + "schema": { + "type": "string", + "example": "271" + } + }, + "Date": { + "schema": { + "type": "string", + "example": "Wed, 14 Nov 2018 21:14:32 GMT" + } + }, + "ETag": { + "schema": { + "type": "string", + "example": "W/\"10f-XQrUSiTExKF+oTaRV4ZXDCtfrGc\"" + } + }, + "Vary": { + "schema": { + "type": "string", + "example": "Accept-Encoding" + } + }, + "X-Content-Type-Options": { + "schema": { + "type": "string", + "example": "nosniff" + } + }, + "X-Frame-Options": { + "schema": { + "type": "string", + "example": "deny" + } + }, + "X-XSS-Protection": { + "schema": { + "type": "string", + "example": "1; mode=block" + } + } + }, + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The unique identifier of the team.", + "example": "us46e9aa-5806-4cd6-8e78-c22d58602d09" + }, + "name": { + "type": "string", + "description": "The name of the team.", + "example": "Example Team Name" + }, + "visible": { + "type": "boolean", + "description": "Whether the team is visible to other users.", + "example": true + }, + "crossOrganizational": { + "type": "boolean", + "description": "Whether the team is cross-organizational.", + "example": false + }, + "customerId": { + "type": "string", + "description": "The unique customer ID of the team.", + "example": "86e9951e-3764-48d7-b318-969f8a6d180b" + }, + "updatedAt": { + "type": "string", + "description": "The date and time the team was last updated.", + "example": "2022-01-01T20:54:53.963Z" + }, + "createdAt": { + "type": "string", + "description": "The date and time the team was created.", + "example": "2022-01-01T20:54:47.282Z" + }, + "externalSharing": { + "type": "boolean", + "description": "Whether external sharing is enabled for the team.", + "example": true + }, + "role": { + "type": "string", + "description": "The role of the user in the team.", + "example": "admin" + }, + "members": { + "type": "object", + "description": "An array of key-value pairs representing the team's members.", + "properties": { + "key": { + "type": "string", + "example": "value" + } + } + } + } + }, + "examples": { + "Get team information": { + "value": { + "id": "us46e9aa-5806-4cd6-8e78-c22d58602d09", + "name": "Example Team Name", + "visible": true, + "crossOrganizational": false, + "customerId": "86e9951e-3764-48d7-b318-969f8a6d180b", + "updatedAt": "2022-01-01T20:54:53.963Z", + "createdAt": "2022-01-01T20:54:47.282Z", + "externalSharing": true, + "role": "admin", + "members": { + "id": "17cf37c9-578c-4587-acb9-299c5431ad10", + "role": "admin", + "userId": "370809cc-abdc-42ad-bb40-8541ca0dfb1a", + "name": "Example User Name", + "email": "user@example.org", + "mfaEnabled": true + } + } + } + } + } + } + }, + "401": { + "description": "Unauthorized" + } + } + } + }, "/user/me": { "get": { "tags": [ @@ -1898,6 +2059,10 @@ { "name": "Budgets" }, + { + "name": "Teams", + "description": "Teams are groups of users in Cloudcraft. Teams can be used to manage access to resources and share diagrams." + }, { "name": "Users", "description": "Users represent you and your team members in Cloudcraft." diff --git a/data/partials/home.ja.yaml b/data/partials/home.ja.yaml index 2da5cb7beac81..4e7031e67795d 100644 --- a/data/partials/home.ja.yaml +++ b/data/partials/home.ja.yaml @@ -121,7 +121,7 @@ nav_sections: title: RDS - desc: 脅威、脆弱性、誤構成の検出 icon: security-platform - link: synthetics/api_tests/udp_tests + link: security/ title: セキュリティ - desc: メトリクスの探索、検索、および分布の作成 icon: メトリクス diff --git a/go.mod b/go.mod index 2f8f9b7fe8a5e..79071f67b476c 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.14 require ( github.com/DataDog/websites-modules v1.4.141 // indirect - github.com/DataDog/websites-sources v0.0.0-20240429180210-49917b24a0b7 // indirect + github.com/DataDog/websites-sources v0.0.0-20240509212537-66852c208f36 // indirect ) // replace github.com/DataDog/websites-modules => /Users/colin.cole/webops/websites-modules diff --git a/go.sum b/go.sum index 1478368836039..473f6d99cde2a 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,4 @@ github.com/DataDog/websites-modules v1.4.141 h1:uAGjWfXK3UHU+OTo73z69sFC6IKxkgkDPhw2IB85fEI= github.com/DataDog/websites-modules v1.4.141/go.mod h1:CcQxAmCXoiFr3hNw6Q+1si65C3uOP1gB+7aX4S3h+CQ= -github.com/DataDog/websites-sources v0.0.0-20240429180210-49917b24a0b7 h1:iMvON2TmEfgmSTrXUYIDjHqd3qTz5Hvxs5cbxn1XyHw= -github.com/DataDog/websites-sources v0.0.0-20240429180210-49917b24a0b7/go.mod h1:RvGhXV0uQC6Ocs+n84QyL97kows6vg6VG5ZLQMHw4Fs= +github.com/DataDog/websites-sources v0.0.0-20240509212537-66852c208f36 h1:LqUTc+R9UXNqpNPzwjvziOg9FU+tTZ4kY+kflP0WPv0= +github.com/DataDog/websites-sources v0.0.0-20240509212537-66852c208f36/go.mod h1:RvGhXV0uQC6Ocs+n84QyL97kows6vg6VG5ZLQMHw4Fs= diff --git a/layouts/partials/nav/left-nav.html b/layouts/partials/nav/left-nav.html index 574040ecd34c4..a97f9f1dc797a 100644 --- a/layouts/partials/nav/left-nav.html +++ b/layouts/partials/nav/left-nav.html @@ -66,7 +66,7 @@ {{ .Name }} {{ if .HasChildren }} -