diff --git a/.codecov.yml b/.codecov.yml index 105f39b3902da..248475af4b933 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -498,6 +498,10 @@ coverage: target: 75 flags: - pulsar + Quarkus: + target: 75 + flags: + - quarkus RabbitMQ: target: 75 flags: @@ -1373,6 +1377,11 @@ flags: paths: - pulsar/datadog_checks/pulsar - pulsar/tests + quarkus: + carryforward: true + paths: + - quarkus/datadog_checks/quarkus + - quarkus/tests rabbitmq: carryforward: true paths: diff --git a/.github/workflows/config/labeler.yml b/.github/workflows/config/labeler.yml index fdcc767071606..5fa2b9bc4ffa9 100644 --- a/.github/workflows/config/labeler.yml +++ b/.github/workflows/config/labeler.yml @@ -439,6 +439,8 @@ integration/proxysql: - proxysql/**/* integration/pulsar: - pulsar/**/* +integration/quarkus: +- quarkus/**/* integration/rabbitmq: - rabbitmq/**/* integration/ray: diff --git a/.github/workflows/test-all.yml b/.github/workflows/test-all.yml index 606f6125af01e..d1701ec09df8b 100644 --- a/.github/workflows/test-all.yml +++ b/.github/workflows/test-all.yml @@ -2994,6 +2994,26 @@ jobs: minimum-base-package: ${{ inputs.minimum-base-package }} pytest-args: ${{ inputs.pytest-args }} secrets: inherit + jcc156e5: + uses: ./.github/workflows/test-target.yml + with: + job-name: Quarkus + target: quarkus + platform: linux + runner: '["ubuntu-22.04"]' + repo: "${{ inputs.repo }}" + python-version: "${{ inputs.python-version }}" + standard: ${{ inputs.standard }} + latest: ${{ inputs.latest }} + agent-image: "${{ inputs.agent-image }}" + agent-image-py2: "${{ inputs.agent-image-py2 }}" + agent-image-windows: "${{ inputs.agent-image-windows }}" + agent-image-windows-py2: "${{ inputs.agent-image-windows-py2 }}" + test-py2: ${{ inputs.test-py2 }} + test-py3: ${{ inputs.test-py3 }} + minimum-base-package: ${{ inputs.minimum-base-package }} + pytest-args: ${{ inputs.pytest-args }} + secrets: inherit j694032b: uses: ./.github/workflows/test-target.yml with: diff --git a/quarkus/CHANGELOG.md b/quarkus/CHANGELOG.md new file mode 100644 index 0000000000000..d0112b5d3908f --- /dev/null +++ b/quarkus/CHANGELOG.md @@ -0,0 +1,4 @@ +# CHANGELOG - Quarkus + + + diff --git a/quarkus/README.md b/quarkus/README.md new file mode 100644 index 0000000000000..514709024d86d --- /dev/null +++ b/quarkus/README.md @@ -0,0 +1,55 @@ +# Agent Check: Quarkus + +## Overview + +This check monitors [Quarkus][1] through the Datadog Agent. + +## Setup + +Follow the instructions below to install and configure this check for an Agent running on a host. For containerized environments, see the [Autodiscovery Integration Templates][3] for guidance on applying these instructions. + +### Installation + +The Quarkus check is included in the [Datadog Agent][2] package. +No additional installation is needed on your server. + +### Configuration + +1. Edit the `quarkus.d/conf.yaml` file, in the `conf.d/` folder at the root of your Agent's configuration directory to start collecting your Quarkus performance data. See the [sample quarkus.d/conf.yaml][4] for all available configuration options. + +2. [Restart the Agent][5]. + +### Validation + +[Run the Agent's status subcommand][6] and look for `quarkus` under the Checks section. + +## Data Collected + +### Metrics + +See [metadata.csv][7] for a list of metrics provided by this integration. + +### Events + +The Quarkus integration does not include any events. + +### Service Checks + +The Quarkus integration does not include any service checks. + +See [service_checks.json][8] for a list of service checks provided by this integration. + +## Troubleshooting + +Need help? Contact [Datadog support][9]. + + +[1]: **LINK_TO_INTEGRATION_SITE** +[2]: https://app.datadoghq.com/account/settings/agent/latest +[3]: https://docs.datadoghq.com/agent/kubernetes/integrations/ +[4]: https://github.com/DataDog/integrations-core/blob/master/quarkus/datadog_checks/quarkus/data/conf.yaml.example +[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent +[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information +[7]: https://github.com/DataDog/integrations-core/blob/master/quarkus/metadata.csv +[8]: https://github.com/DataDog/integrations-core/blob/master/quarkus/assets/service_checks.json +[9]: https://docs.datadoghq.com/help/ diff --git a/quarkus/assets/configuration/spec.yaml b/quarkus/assets/configuration/spec.yaml new file mode 100644 index 0000000000000..69a82c06c7270 --- /dev/null +++ b/quarkus/assets/configuration/spec.yaml @@ -0,0 +1,14 @@ +name: Quarkus +files: +- name: quarkus.yaml + options: + - template: init_config + options: + - template: init_config/default + - template: instances + options: + - template: instances/openmetrics + overrides: + openmetrics_endpoint.value.example: http://localhost:8080/q/metrics + openmetrics_endpoint.description: | + Set this to the endpoint that Quarkus's Micrometer Prometheus MeterRegistry extension exposes. diff --git a/quarkus/assets/dashboards/overview.json b/quarkus/assets/dashboards/overview.json new file mode 100644 index 0000000000000..cd248ecb5d4d4 --- /dev/null +++ b/quarkus/assets/dashboards/overview.json @@ -0,0 +1,431 @@ +{ + "author_name": "Datadog", + "description": "## Quarkus\n\nThis dashboard lets you monitor your applications developed with the Quarkus framework.\n\n**Note: This dashboard only displays out of the box metrics. Tweak it as you add more metrics to your application.**\n\n## Useful Links\n- [Quarkus Homepage](https://quarkus.io/)\n- [How to add metrics in Quarkus](https://quarkus.io/guides/telemetry-micrometer-tutorial#inject-the-meterregistry)", + "layout_type": "ordered", + "template_variables": [ + { + "available_values": [], + "default": "*", + "name": "host", + "prefix": "host" + }, + { + "available_values": [], + "default": "*", + "name": "method", + "prefix": "method" + }, + { + "available_values": [], + "default": "*", + "name": "status", + "prefix": "status" + }, + { + "available_values": [], + "default": "*", + "name": "uri", + "prefix": "uri" + } + ], + "title": "Quarkus Overview", + "widgets": [ + { + "definition": { + "banner_img": "/static/images/logos/quarkus_small.svg", + "layout_type": "ordered", + "show_title": true, + "title": "", + "type": "group", + "widgets": [ + { + "definition": { + "background_color": "white", + "content": "## Quarkus\n\nThis dashboard lets you monitor your applications developed with the Quarkus framework.\n\n**Note: This dashboard only displays out of the box metrics. Tweak it as you add more metrics to your application.**\n", + "font_size": "14", + "has_padding": true, + "show_tick": false, + "text_align": "left", + "tick_edge": "left", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 5685022835071772, + "layout": { + "height": 3, + "width": 3, + "x": 0, + "y": 0 + } + }, + { + "definition": { + "background_color": "white", + "content": "## Useful Links\n- [Quarkus Homepage](https://quarkus.io/)\n- [How to add metrics in Quarkus](https://quarkus.io/guides/telemetry-micrometer-tutorial#inject-the-meterregistry)", + "font_size": "14", + "has_padding": true, + "show_tick": false, + "text_align": "center", + "tick_edge": "left", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 8921963557059570, + "layout": { + "height": 3, + "width": 3, + "x": 3, + "y": 0 + } + } + ] + }, + "id": 4717263751542750, + "layout": { + "height": 6, + "width": 6, + "x": 0, + "y": 0 + } + }, + { + "definition": { + "background_color": "vivid_blue", + "layout_type": "ordered", + "show_title": true, + "title": "Overview", + "type": "group", + "widgets": [ + { + "definition": { + "background_color": "blue", + "content": "See the overall status of your application. The health service check reports whether or not your application is up. The monitor alerts you if the maximum duration for a request exceeds a certain threshold.", + "font_size": "14", + "has_padding": true, + "show_tick": false, + "text_align": "center", + "tick_edge": "left", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 4528647613111842, + "layout": { + "height": 2, + "width": 6, + "x": 0, + "y": 0 + } + }, + { + "definition": { + "check": "quarkus.openmetrics.health", + "group": "$host", + "group_by": [], + "grouping": "check", + "tags": [], + "time": { + "hide_incomplete_cost_data": true + }, + "title": "Quarkus Health Check", + "title_align": "left", + "title_size": "16", + "type": "check_status" + }, + "id": 4975142618182494, + "layout": { + "height": 3, + "width": 2, + "x": 0, + "y": 2 + } + }, + { + "definition": { + "color_preference": "text", + "count": 50, + "display_format": "countsAndList", + "hide_zero_counts": true, + "last_triggered_format": "relative", + "query": "tag:(integration:quarkus)", + "show_last_triggered": false, + "show_priority": false, + "show_status": true, + "sort": "status,asc", + "start": 0, + "summary_type": "monitors", + "title": "Monitor Summary", + "type": "manage_status" + }, + "id": 7873059155305294, + "layout": { + "height": 3, + "width": 4, + "x": 2, + "y": 2 + } + } + ] + }, + "id": 2737008660122334, + "layout": { + "height": 6, + "width": 6, + "x": 6, + "y": 0 + } + }, + { + "definition": { + "background_color": "vivid_pink", + "layout_type": "ordered", + "show_title": true, + "title": "HTTP Server", + "type": "group", + "widgets": [ + { + "definition": { + "background_color": "pink", + "content": "See how many requests your HTTP server is getting and which ones take the longest.", + "font_size": "14", + "has_padding": true, + "show_tick": false, + "text_align": "center", + "tick_edge": "left", + "tick_pos": "50%", + "type": "note", + "vertical_align": "center" + }, + "id": 5193429521650892, + "layout": { + "height": 1, + "width": 12, + "x": 0, + "y": 0 + } + }, + { + "definition": { + "legend_columns": [ + "avg", + "min", + "max", + "value", + "sum" + ], + "legend_layout": "auto", + "requests": [ + { + "display_type": "line", + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "avg:quarkus.http_server.requests.seconds.max{*}" + } + ], + "response_format": "timeseries", + "style": { + "line_type": "solid", + "line_width": "normal", + "order_by": "values", + "palette": "dog_classic" + } + } + ], + "show_legend": true, + "title": "Longest Request", + "title_align": "left", + "title_size": "16", + "type": "timeseries" + }, + "id": 7305731361762322, + "layout": { + "height": 2, + "width": 4, + "x": 0, + "y": 1 + } + }, + { + "definition": { + "requests": [ + { + "formulas": [ + { + "formula": "query1" + } + ], + "queries": [ + { + "aggregator": "avg", + "data_source": "metrics", + "name": "query1", + "query": "avg:quarkus.http_server.requests.seconds.max{*} by {uri}" + } + ], + "response_format": "scalar", + "sort": { + "count": 10, + "order_by": [ + { + "index": 0, + "order": "desc", + "type": "formula" + } + ] + } + } + ], + "style": { + "display": { + "legend": "automatic", + "type": "stacked" + } + }, + "title": "URIs with Long Requests", + "title_align": "left", + "title_size": "16", + "type": "toplist" + }, + "id": 2683629281370146, + "layout": { + "height": 2, + "width": 4, + "x": 4, + "y": 1 + } + }, + { + "definition": { + "autoscale": true, + "precision": 2, + "requests": [ + { + "formulas": [ + { + "formula": "query1", + "number_format": { + "unit": { + "type": "canonical_unit", + "unit_name": "request" + } + } + } + ], + "queries": [ + { + "aggregator": "avg", + "data_source": "metrics", + "name": "query1", + "query": "avg:quarkus.http_server.requests.seconds.count{*}.as_rate()" + } + ], + "response_format": "scalar" + } + ], + "timeseries_background": { + "type": "area" + }, + "title": "Requests per Second", + "title_align": "left", + "title_size": "16", + "type": "query_value" + }, + "id": 6228596123664624, + "layout": { + "height": 2, + "width": 4, + "x": 8, + "y": 1 + } + } + ] + }, + "id": 880646291321010, + "layout": { + "height": 4, + "width": 12, + "x": 0, + "y": 6 + } + }, + { + "definition": { + "background_color": "white", + "layout_type": "ordered", + "show_title": true, + "title": "Logs", + "type": "group", + "widgets": [ + { + "definition": { + "requests": [ + { + "columns": [ + { + "field": "status_line", + "width": "auto" + }, + { + "field": "timestamp", + "width": "auto" + }, + { + "field": "host", + "width": "auto" + }, + { + "field": "service", + "width": "auto" + }, + { + "field": "content", + "width": "compact" + } + ], + "query": { + "data_source": "logs_stream", + "indexes": [], + "query_string": "source:quarkus", + "sort": { + "column": "timestamp", + "order": "desc" + }, + "storage": "hot" + }, + "response_format": "event_list" + } + ], + "title": "", + "title_align": "left", + "title_size": "16", + "type": "list_stream" + }, + "id": 2489993328338580, + "layout": { + "height": 4, + "width": 12, + "x": 0, + "y": 0 + } + } + ] + }, + "id": 7174398085271826, + "layout": { + "height": 5, + "width": 12, + "x": 0, + "y": 10 + } + } + ] +} \ No newline at end of file diff --git a/quarkus/assets/monitors/long_requests.json b/quarkus/assets/monitors/long_requests.json new file mode 100644 index 0000000000000..f0f48a7d92bc1 --- /dev/null +++ b/quarkus/assets/monitors/long_requests.json @@ -0,0 +1,30 @@ +{ + "version": 2, + "created_at": "2024-12-10", + "last_updated_at": "2024-12-10", + "title": "Some Requests Taking Too Long", + "description": "This monitor alerts you if your longest request is taking too long. This can indicate overall degraded service and that other requests are also taking longer to complete.", + "tags": [ + "integration:quarkus" + ], + "definition": { + "name": "Some requests are taking too long", + "type": "query alert", + "query": "avg(last_5m):avg:quarkus.http_server.requests.seconds.max{*} > 1", + "message": "Detected some requests taking extra long to complete. This merits an investigation because it can be a symptom that the overall service is degraded.", + "tags": [ + "integration:quarkus" + ], + "options": { + "thresholds": { + "critical": 1, + "warning": 0.5 + }, + "notify_audit": false, + "include_tags": false, + "new_host_delay": 300, + "avalanche_window": 10 + }, + "priority": null + } +} diff --git a/quarkus/assets/service_checks.json b/quarkus/assets/service_checks.json new file mode 100644 index 0000000000000..ceeec6c578b3d --- /dev/null +++ b/quarkus/assets/service_checks.json @@ -0,0 +1,17 @@ +[ + { + "agent_version": "7.62.0", + "integration": "Quarkus", + "check": "quarkus.openmetrics.health", + "statuses": [ + "ok", + "critical" + ], + "groups": [ + "host", + "endpoint" + ], + "name": "Quarkus OpenMetrics endpoint health", + "description": "Returns `CRITICAL` if the Agent is unable to connect to the Quarkus OpenMetrics endpoint, otherwise returns `OK`." + } +] diff --git a/quarkus/changelog.d/19196.added b/quarkus/changelog.d/19196.added new file mode 100644 index 0000000000000..aa949b47b7b41 --- /dev/null +++ b/quarkus/changelog.d/19196.added @@ -0,0 +1 @@ +Initial Release \ No newline at end of file diff --git a/quarkus/datadog_checks/__init__.py b/quarkus/datadog_checks/__init__.py new file mode 100644 index 0000000000000..1517d901c0aae --- /dev/null +++ b/quarkus/datadog_checks/__init__.py @@ -0,0 +1,4 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore diff --git a/quarkus/datadog_checks/quarkus/__about__.py b/quarkus/datadog_checks/quarkus/__about__.py new file mode 100644 index 0000000000000..e9541ce83e9e5 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/__about__.py @@ -0,0 +1,4 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +__version__ = '0.0.1' diff --git a/quarkus/datadog_checks/quarkus/__init__.py b/quarkus/datadog_checks/quarkus/__init__.py new file mode 100644 index 0000000000000..be45b413005d0 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/__init__.py @@ -0,0 +1,7 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from .__about__ import __version__ +from .check import QuarkusCheck + +__all__ = ['__version__', 'QuarkusCheck'] diff --git a/quarkus/datadog_checks/quarkus/check.py b/quarkus/datadog_checks/quarkus/check.py new file mode 100644 index 0000000000000..1d6705a88778e --- /dev/null +++ b/quarkus/datadog_checks/quarkus/check.py @@ -0,0 +1,16 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +from datadog_checks.base import OpenMetricsBaseCheckV2 +from datadog_checks.quarkus.metrics import METRIC_MAP + + +class QuarkusCheck(OpenMetricsBaseCheckV2): + __NAMESPACE__ = 'quarkus' + DEFAULT_METRIC_LIMIT = 0 + + def get_default_config(self): + return { + "metrics": [METRIC_MAP], + } diff --git a/quarkus/datadog_checks/quarkus/config_models/__init__.py b/quarkus/datadog_checks/quarkus/config_models/__init__.py new file mode 100644 index 0000000000000..106fff2032f68 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/config_models/__init__.py @@ -0,0 +1,24 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from .instance import InstanceConfig +from .shared import SharedConfig + + +class ConfigMixin: + _config_model_instance: InstanceConfig + _config_model_shared: SharedConfig + + @property + def config(self) -> InstanceConfig: + return self._config_model_instance + + @property + def shared_config(self) -> SharedConfig: + return self._config_model_shared diff --git a/quarkus/datadog_checks/quarkus/config_models/defaults.py b/quarkus/datadog_checks/quarkus/config_models/defaults.py new file mode 100644 index 0000000000000..0138cd77a5ea8 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/config_models/defaults.py @@ -0,0 +1,124 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + + +def instance_allow_redirects(): + return True + + +def instance_auth_type(): + return 'basic' + + +def instance_cache_metric_wildcards(): + return True + + +def instance_cache_shared_labels(): + return True + + +def instance_collect_counters_with_distributions(): + return False + + +def instance_collect_histogram_buckets(): + return True + + +def instance_disable_generic_tags(): + return False + + +def instance_empty_default_hostname(): + return False + + +def instance_enable_health_service_check(): + return True + + +def instance_histogram_buckets_as_distributions(): + return False + + +def instance_ignore_connection_errors(): + return False + + +def instance_kerberos_auth(): + return 'disabled' + + +def instance_kerberos_delegate(): + return False + + +def instance_kerberos_force_initiate(): + return False + + +def instance_log_requests(): + return False + + +def instance_min_collection_interval(): + return 15 + + +def instance_non_cumulative_histogram_buckets(): + return False + + +def instance_persist_connections(): + return False + + +def instance_request_size(): + return 16 + + +def instance_skip_proxy(): + return False + + +def instance_tag_by_endpoint(): + return True + + +def instance_telemetry(): + return False + + +def instance_timeout(): + return 10 + + +def instance_tls_ignore_warning(): + return False + + +def instance_tls_use_host_header(): + return False + + +def instance_tls_verify(): + return True + + +def instance_use_latest_spec(): + return False + + +def instance_use_legacy_auth_encoding(): + return True + + +def instance_use_process_start_time(): + return False diff --git a/quarkus/datadog_checks/quarkus/config_models/instance.py b/quarkus/datadog_checks/quarkus/config_models/instance.py new file mode 100644 index 0000000000000..8e39a0e921719 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/config_models/instance.py @@ -0,0 +1,171 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from __future__ import annotations + +from types import MappingProxyType +from typing import Any, Optional, Union + +from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import defaults, validators + + +class AuthToken(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + reader: Optional[MappingProxyType[str, Any]] = None + writer: Optional[MappingProxyType[str, Any]] = None + + +class ExtraMetrics(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra='allow', + frozen=True, + ) + name: Optional[str] = None + type: Optional[str] = None + + +class MetricPatterns(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + exclude: Optional[tuple[str, ...]] = None + include: Optional[tuple[str, ...]] = None + + +class Metrics(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra='allow', + frozen=True, + ) + name: Optional[str] = None + type: Optional[str] = None + + +class Proxy(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + http: Optional[str] = None + https: Optional[str] = None + no_proxy: Optional[tuple[str, ...]] = None + + +class ShareLabels(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + frozen=True, + ) + labels: Optional[tuple[str, ...]] = None + match: Optional[tuple[str, ...]] = None + + +class InstanceConfig(BaseModel): + model_config = ConfigDict( + validate_default=True, + arbitrary_types_allowed=True, + frozen=True, + ) + allow_redirects: Optional[bool] = None + auth_token: Optional[AuthToken] = None + auth_type: Optional[str] = None + aws_host: Optional[str] = None + aws_region: Optional[str] = None + aws_service: Optional[str] = None + cache_metric_wildcards: Optional[bool] = None + cache_shared_labels: Optional[bool] = None + collect_counters_with_distributions: Optional[bool] = None + collect_histogram_buckets: Optional[bool] = None + connect_timeout: Optional[float] = None + disable_generic_tags: Optional[bool] = None + empty_default_hostname: Optional[bool] = None + enable_health_service_check: Optional[bool] = None + exclude_labels: Optional[tuple[str, ...]] = None + exclude_metrics: Optional[tuple[str, ...]] = None + exclude_metrics_by_labels: Optional[MappingProxyType[str, Union[bool, tuple[str, ...]]]] = None + extra_headers: Optional[MappingProxyType[str, Any]] = None + extra_metrics: Optional[tuple[Union[str, MappingProxyType[str, Union[str, ExtraMetrics]]], ...]] = None + headers: Optional[MappingProxyType[str, Any]] = None + histogram_buckets_as_distributions: Optional[bool] = None + hostname_format: Optional[str] = None + hostname_label: Optional[str] = None + ignore_connection_errors: Optional[bool] = None + ignore_tags: Optional[tuple[str, ...]] = None + include_labels: Optional[tuple[str, ...]] = None + kerberos_auth: Optional[str] = None + kerberos_cache: Optional[str] = None + kerberos_delegate: Optional[bool] = None + kerberos_force_initiate: Optional[bool] = None + kerberos_hostname: Optional[str] = None + kerberos_keytab: Optional[str] = None + kerberos_principal: Optional[str] = None + log_requests: Optional[bool] = None + metric_patterns: Optional[MetricPatterns] = None + metrics: Optional[tuple[Union[str, MappingProxyType[str, Union[str, Metrics]]], ...]] = None + min_collection_interval: Optional[float] = None + namespace: Optional[str] = Field(None, pattern='\\w*') + non_cumulative_histogram_buckets: Optional[bool] = None + ntlm_domain: Optional[str] = None + openmetrics_endpoint: str + password: Optional[str] = None + persist_connections: Optional[bool] = None + proxy: Optional[Proxy] = None + raw_line_filters: Optional[tuple[str, ...]] = None + raw_metric_prefix: Optional[str] = None + read_timeout: Optional[float] = None + rename_labels: Optional[MappingProxyType[str, Any]] = None + request_size: Optional[float] = None + service: Optional[str] = None + share_labels: Optional[MappingProxyType[str, Union[bool, ShareLabels]]] = None + skip_proxy: Optional[bool] = None + tag_by_endpoint: Optional[bool] = None + tags: Optional[tuple[str, ...]] = None + telemetry: Optional[bool] = None + timeout: Optional[float] = None + tls_ca_cert: Optional[str] = None + tls_cert: Optional[str] = None + tls_ignore_warning: Optional[bool] = None + tls_private_key: Optional[str] = None + tls_protocols_allowed: Optional[tuple[str, ...]] = None + tls_use_host_header: Optional[bool] = None + tls_verify: Optional[bool] = None + use_latest_spec: Optional[bool] = None + use_legacy_auth_encoding: Optional[bool] = None + use_process_start_time: Optional[bool] = None + username: Optional[str] = None + + @model_validator(mode='before') + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) + + @field_validator('*', mode='before') + def _validate(cls, value, info): + field = cls.model_fields[info.field_name] + field_name = field.alias or info.field_name + if field_name in info.context['configured_fields']: + value = getattr(validators, f'instance_{info.field_name}', identity)(value, field=field) + else: + value = getattr(defaults, f'instance_{info.field_name}', lambda: value)() + + return validation.utils.make_immutable(value) + + @model_validator(mode='after') + def _final_validation(cls, model): + return validation.core.check_model(getattr(validators, 'check_instance', identity)(model)) diff --git a/quarkus/datadog_checks/quarkus/config_models/shared.py b/quarkus/datadog_checks/quarkus/config_models/shared.py new file mode 100644 index 0000000000000..e39d447dfc4b9 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/config_models/shared.py @@ -0,0 +1,45 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# This file is autogenerated. +# To change this file you should edit assets/configuration/spec.yaml and then run the following commands: +# ddev -x validate config -s +# ddev -x validate models -s + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel, ConfigDict, field_validator, model_validator + +from datadog_checks.base.utils.functions import identity +from datadog_checks.base.utils.models import validation + +from . import validators + + +class SharedConfig(BaseModel): + model_config = ConfigDict( + validate_default=True, + arbitrary_types_allowed=True, + frozen=True, + ) + service: Optional[str] = None + + @model_validator(mode='before') + def _initial_validation(cls, values): + return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values)) + + @field_validator('*', mode='before') + def _validate(cls, value, info): + field = cls.model_fields[info.field_name] + field_name = field.alias or info.field_name + if field_name in info.context['configured_fields']: + value = getattr(validators, f'shared_{info.field_name}', identity)(value, field=field) + + return validation.utils.make_immutable(value) + + @model_validator(mode='after') + def _final_validation(cls, model): + return validation.core.check_model(getattr(validators, 'check_shared', identity)(model)) diff --git a/quarkus/datadog_checks/quarkus/config_models/validators.py b/quarkus/datadog_checks/quarkus/config_models/validators.py new file mode 100644 index 0000000000000..70150e85e6124 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/config_models/validators.py @@ -0,0 +1,13 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +# Here you can include additional config validators or transformers +# +# def initialize_instance(values, **kwargs): +# if 'my_option' not in values and 'my_legacy_option' in values: +# values['my_option'] = values['my_legacy_option'] +# if values.get('my_number') > 10: +# raise ValueError('my_number max value is 10, got %s' % str(values.get('my_number'))) +# +# return values diff --git a/quarkus/datadog_checks/quarkus/data/conf.yaml.example b/quarkus/datadog_checks/quarkus/data/conf.yaml.example new file mode 100644 index 0000000000000..2de1b5ccf2751 --- /dev/null +++ b/quarkus/datadog_checks/quarkus/data/conf.yaml.example @@ -0,0 +1,593 @@ +## All options defined here are available to all instances. +# +init_config: + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Additionally, this sets the default `service` for every log source. + # + # service: + +## Every instance is scheduled independently of the others. +# +instances: + + ## @param openmetrics_endpoint - string - required + ## Set this to the endpoint that Quarkus's Micrometer Prometheus MeterRegistry extension exposes. + # + - openmetrics_endpoint: http://localhost:8080/q/metrics + + ## @param raw_metric_prefix - string - optional + ## A prefix that is removed from all exposed metric names, if present. + ## All configuration options will use the prefix-less name. + # + # raw_metric_prefix: _ + + ## @param extra_metrics - (list of string or mapping) - optional + ## This list defines metrics to collect from the `openmetrics_endpoint`, in addition to + ## what the check collects by default. If the check already collects a metric, then + ## metric definitions here take precedence. Metrics may be defined in 3 ways: + ## + ## 1. If the item is a string, then it represents the exposed metric name, and + ## the sent metric name will be identical. For example: + ## + ## extra_metrics: + ## - + ## - + ## 2. If the item is a mapping, then the keys represent the exposed metric names. + ## + ## a. If a value is a string, then it represents the sent metric name. For example: + ## + ## extra_metrics: + ## - : + ## - : + ## b. If a value is a mapping, then it must have a `name` and/or `type` key. + ## The `name` represents the sent metric name, and the `type` represents how + ## the metric should be handled, overriding any type information the endpoint + ## may provide. For example: + ## + ## extra_metrics: + ## - : + ## name: + ## type: + ## - : + ## name: + ## type: + ## + ## The supported native types are `gauge`, `counter`, `histogram`, and `summary`. + ## + ## Note: To collect counter metrics with names ending in `_total`, specify the metric name without the `_total` + ## suffix. For example, to collect the counter metric `promhttp_metric_handler_requests_total`, specify + ## `promhttp_metric_handler_requests`. This submits to Datadog the metric name appended with `.count`. + ## For more information, see: + ## https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#suffixes + ## + ## Regular expressions may be used to match the exposed metric names, for example: + ## + ## extra_metrics: + ## - ^network_(ingress|egress)_.+ + ## - .+: + ## type: gauge + # + # extra_metrics: [] + + ## @param exclude_metrics - list of strings - optional + ## A list of metrics to exclude, with each entry being either + ## the exact metric name or a regular expression. + ## In order to exclude all metrics but the ones matching a specific filter, + ## you can use a negative lookahead regex like: + ## - ^(?!foo).*$ + # + # exclude_metrics: [] + + ## @param exclude_metrics_by_labels - mapping - optional + ## A mapping of labels to exclude metrics with matching label name and their corresponding metric values. To match + ## all values of a label, set it to `true`. + ## + ## Note: Label filtering happens before `rename_labels`. + ## + ## For example, the following configuration instructs the check to exclude all metrics with + ## a label `worker` or a label `pid` with the value of either `23` or `42`. + ## + ## exclude_metrics_by_labels: + ## worker: true + ## pid: + ## - '23' + ## - '42' + # + # exclude_metrics_by_labels: {} + + ## @param exclude_labels - list of strings - optional + ## A list of labels to exclude, useful for high cardinality values like timestamps or UUIDs. + ## May be used in conjunction with `include_labels`. + ## Labels defined in `exclude_labels` will take precedence in case of overlap. + ## + ## Note: Label filtering happens before `rename_labels`. + # + # exclude_labels: [] + + ## @param include_labels - list of strings - optional + ## A list of labels to include. May be used in conjunction with `exclude_labels`. + ## Labels defined in `exclude_labels` will take precedence in case of overlap. + ## + ## Note: Label filtering happens before `rename_labels`. + # + # include_labels: [] + + ## @param rename_labels - mapping - optional + ## A mapping of label names to their new names. + # + # rename_labels: + # : + # : + + ## @param enable_health_service_check - boolean - optional - default: true + ## Whether or not to send a service check named `.openmetrics.health` which reports + ## the health of the `openmetrics_endpoint`. + # + # enable_health_service_check: true + + ## @param ignore_connection_errors - boolean - optional - default: false + ## Whether or not to ignore connection errors when scraping `openmetrics_endpoint`. + # + # ignore_connection_errors: false + + ## @param hostname_label - string - optional + ## Override the hostname for every metric submission with the value of one of its labels. + # + # hostname_label: + + ## @param hostname_format - string - optional + ## When `hostname_label` is set, this instructs the check how to format the values. The string + ## `` is replaced by the value of the label defined by `hostname_label`. + # + # hostname_format: + + ## @param collect_histogram_buckets - boolean - optional - default: true + ## Whether or not to send histogram buckets. + # + # collect_histogram_buckets: true + + ## @param non_cumulative_histogram_buckets - boolean - optional - default: false + ## Whether or not histogram buckets are non-cumulative and to come with a `lower_bound` tag. + # + # non_cumulative_histogram_buckets: false + + ## @param histogram_buckets_as_distributions - boolean - optional - default: false + ## Whether or not to send histogram buckets as Datadog distribution metrics. This implicitly + ## enables the `collect_histogram_buckets` and `non_cumulative_histogram_buckets` options. + ## + ## Learn more about distribution metrics: + ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#metric-types + # + # histogram_buckets_as_distributions: false + + ## @param collect_counters_with_distributions - boolean - optional - default: false + ## Whether or not to also collect the observation counter metrics ending in `.sum` and `.count` + ## when sending histogram buckets as Datadog distribution metrics. This implicitly enables the + ## `histogram_buckets_as_distributions` option. + # + # collect_counters_with_distributions: false + + ## @param use_process_start_time - boolean - optional - default: false + ## Whether to enable a heuristic for reporting counter values on the first scrape. When true, + ## the first time an endpoint is scraped, check `process_start_time_seconds` to decide whether zero + ## initial value can be assumed for counters. This requires keeping metrics in memory until the entire + ## response is received. + # + # use_process_start_time: false + + ## @param share_labels - mapping - optional + ## This mapping allows for the sharing of labels across multiple metrics. The keys represent the + ## exposed metrics from which to share labels, and the values are mappings that configure the + ## sharing behavior. Each mapping must have at least one of the following keys: + ## + ## labels - This is a list of labels to share. All labels are shared if this is not set. + ## match - This is a list of labels to match on other metrics as a condition for sharing. + ## values - This is a list of allowed values as a condition for sharing. + ## + ## To unconditionally share all labels of a metric, set it to `true`. + ## + ## For example, the following configuration instructs the check to apply all labels from `metric_a` + ## to all other metrics, the `node` label from `metric_b` to only those metrics that have a `pod` + ## label value that matches the `pod` label value of `metric_b`, and all labels from `metric_c` + ## to all other metrics if their value is equal to `23` or `42`. + ## + ## share_labels: + ## metric_a: true + ## metric_b: + ## labels: + ## - node + ## match: + ## - pod + ## metric_c: + ## values: + ## - 23 + ## - 42 + # + # share_labels: {} + + ## @param cache_shared_labels - boolean - optional - default: true + ## When `share_labels` is set, it instructs the check to cache labels collected from the first payload + ## for improved performance. + ## + ## Set this to `false` to compute label sharing for every payload at the risk of potentially increased memory usage. + # + # cache_shared_labels: true + + ## @param raw_line_filters - list of strings - optional + ## A list of regular expressions used to exclude lines read from the `openmetrics_endpoint` + ## from being parsed. + # + # raw_line_filters: [] + + ## @param cache_metric_wildcards - boolean - optional - default: true + ## Whether or not to cache data from metrics that are defined by regular expressions rather + ## than the full metric name. + # + # cache_metric_wildcards: true + + ## @param telemetry - boolean - optional - default: false + ## Whether or not to submit metrics prefixed by `.telemetry.` for debugging purposes. + # + # telemetry: false + + ## @param ignore_tags - list of strings - optional + ## A list of regular expressions used to ignore tags added by Autodiscovery and entries in the `tags` option. + # + # ignore_tags: + # - + # - + # - + + ## @param proxy - mapping - optional + ## This overrides the `proxy` setting in `init_config`. + ## + ## Set HTTP or HTTPS proxies for this instance. Use the `no_proxy` list + ## to specify hosts that must bypass proxies. + ## + ## The SOCKS protocol is also supported, for example: + ## + ## socks5://user:pass@host:port + ## + ## Using the scheme `socks5` causes the DNS resolution to happen on the + ## client, rather than on the proxy server. This is in line with `curl`, + ## which uses the scheme to decide whether to do the DNS resolution on + ## the client or proxy. If you want to resolve the domains on the proxy + ## server, use `socks5h` as the scheme. + # + # proxy: + # http: http://: + # https: https://: + # no_proxy: + # - + # - + + ## @param skip_proxy - boolean - optional - default: false + ## This overrides the `skip_proxy` setting in `init_config`. + ## + ## If set to `true`, this makes the check bypass any proxy + ## settings enabled and attempt to reach services directly. + # + # skip_proxy: false + + ## @param auth_type - string - optional - default: basic + ## The type of authentication to use. The available types (and related options) are: + ## + ## - basic + ## |__ username + ## |__ password + ## |__ use_legacy_auth_encoding + ## - digest + ## |__ username + ## |__ password + ## - ntlm + ## |__ ntlm_domain + ## |__ password + ## - kerberos + ## |__ kerberos_auth + ## |__ kerberos_cache + ## |__ kerberos_delegate + ## |__ kerberos_force_initiate + ## |__ kerberos_hostname + ## |__ kerberos_keytab + ## |__ kerberos_principal + ## - aws + ## |__ aws_region + ## |__ aws_host + ## |__ aws_service + ## + ## The `aws` auth type relies on boto3 to automatically gather AWS credentials, for example: from `.aws/credentials`. + ## Details: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#configuring-credentials + # + # auth_type: basic + + ## @param use_legacy_auth_encoding - boolean - optional - default: true + ## When `auth_type` is set to `basic`, this determines whether to encode as `latin1` rather than `utf-8`. + # + # use_legacy_auth_encoding: true + + ## @param username - string - optional + ## The username to use if services are behind basic or digest auth. + # + # username: + + ## @param password - string - optional + ## The password to use if services are behind basic or NTLM auth. + # + # password: + + ## @param ntlm_domain - string - optional + ## If your services use NTLM authentication, specify + ## the domain used in the check. For NTLM Auth, append + ## the username to domain, not as the `username` parameter. + # + # ntlm_domain: \ + + ## @param kerberos_auth - string - optional - default: disabled + ## If your services use Kerberos authentication, you can specify the Kerberos + ## strategy to use between: + ## + ## - required + ## - optional + ## - disabled + ## + ## See https://github.com/requests/requests-kerberos#mutual-authentication + # + # kerberos_auth: disabled + + ## @param kerberos_cache - string - optional + ## Sets the KRB5CCNAME environment variable. + ## It should point to a credential cache with a valid TGT. + # + # kerberos_cache: + + ## @param kerberos_delegate - boolean - optional - default: false + ## Set to `true` to enable Kerberos delegation of credentials to a server that requests delegation. + ## + ## See https://github.com/requests/requests-kerberos#delegation + # + # kerberos_delegate: false + + ## @param kerberos_force_initiate - boolean - optional - default: false + ## Set to `true` to preemptively initiate the Kerberos GSS exchange and + ## present a Kerberos ticket on the initial request (and all subsequent). + ## + ## See https://github.com/requests/requests-kerberos#preemptive-authentication + # + # kerberos_force_initiate: false + + ## @param kerberos_hostname - string - optional + ## Override the hostname used for the Kerberos GSS exchange if its DNS name doesn't + ## match its Kerberos hostname, for example: behind a content switch or load balancer. + ## + ## See https://github.com/requests/requests-kerberos#hostname-override + # + # kerberos_hostname: + + ## @param kerberos_principal - string - optional + ## Set an explicit principal, to force Kerberos to look for a + ## matching credential cache for the named user. + ## + ## See https://github.com/requests/requests-kerberos#explicit-principal + # + # kerberos_principal: + + ## @param kerberos_keytab - string - optional + ## Set the path to your Kerberos key tab file. + # + # kerberos_keytab: + + ## @param auth_token - mapping - optional + ## This allows for the use of authentication information from dynamic sources. + ## Both a reader and writer must be configured. + ## + ## The available readers are: + ## + ## - type: file + ## path (required): The absolute path for the file to read from. + ## pattern: A regular expression pattern with a single capture group used to find the + ## token rather than using the entire file, for example: Your secret is (.+) + ## - type: oauth + ## url (required): The token endpoint. + ## client_id (required): The client identifier. + ## client_secret (required): The client secret. + ## basic_auth: Whether the provider expects credentials to be transmitted in + ## an HTTP Basic Auth header. The default is: false + ## options: Mapping of additional options to pass to the provider, such as the audience + ## or the scope. For example: + ## options: + ## audience: https://example.com + ## scope: read:example + ## + ## The available writers are: + ## + ## - type: header + ## name (required): The name of the field, for example: Authorization + ## value: The template value, for example `Bearer `. The default is: + ## placeholder: The substring in `value` to replace with the token, defaults to: + # + # auth_token: + # reader: + # type: + # : + # : + # writer: + # type: + # : + # : + + ## @param aws_region - string - optional + ## If your services require AWS Signature Version 4 signing, set the region. + ## + ## See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + # + # aws_region: + + ## @param aws_host - string - optional + ## If your services require AWS Signature Version 4 signing, set the host. + ## This only needs the hostname and does not require the protocol (HTTP, HTTPS, and more). + ## For example, if connecting to https://us-east-1.amazonaws.com/, set `aws_host` to `us-east-1.amazonaws.com`. + ## + ## Note: This setting is not necessary for official integrations. + ## + ## See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + # + # aws_host: + + ## @param aws_service - string - optional + ## If your services require AWS Signature Version 4 signing, set the service code. For a list + ## of available service codes, see https://docs.aws.amazon.com/general/latest/gr/rande.html + ## + ## Note: This setting is not necessary for official integrations. + ## + ## See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html + # + # aws_service: + + ## @param tls_verify - boolean - optional - default: true + ## Instructs the check to validate the TLS certificate of services. + # + # tls_verify: true + + ## @param tls_use_host_header - boolean - optional - default: false + ## If a `Host` header is set, this enables its use for SNI (matching against the TLS certificate CN or SAN). + # + # tls_use_host_header: false + + ## @param tls_ignore_warning - boolean - optional - default: false + ## If `tls_verify` is disabled, security warnings are logged by the check. + ## Disable those by setting `tls_ignore_warning` to true. + # + # tls_ignore_warning: false + + ## @param tls_cert - string - optional + ## The path to a single file in PEM format containing a certificate as well as any + ## number of CA certificates needed to establish the certificate's authenticity for + ## use when connecting to services. It may also contain an unencrypted private key to use. + # + # tls_cert: + + ## @param tls_private_key - string - optional + ## The unencrypted private key to use for `tls_cert` when connecting to services. This is + ## required if `tls_cert` is set and it does not already contain a private key. + # + # tls_private_key: + + ## @param tls_ca_cert - string - optional + ## The path to a file of concatenated CA certificates in PEM format or a directory + ## containing several CA certificates in PEM format. If a directory, the directory + ## must have been processed using the `openssl rehash` command. See: + ## https://www.openssl.org/docs/man3.2/man1/c_rehash.html + # + # tls_ca_cert: + + ## @param tls_protocols_allowed - list of strings - optional + ## The expected versions of TLS/SSL when fetching intermediate certificates. + ## Only `SSLv3`, `TLSv1.2`, `TLSv1.3` are allowed by default. The possible values are: + ## SSLv3 + ## TLSv1 + ## TLSv1.1 + ## TLSv1.2 + ## TLSv1.3 + # + # tls_protocols_allowed: + # - SSLv3 + # - TLSv1.2 + # - TLSv1.3 + + ## @param headers - mapping - optional + ## The headers parameter allows you to send specific headers with every request. + ## You can use it for explicitly specifying the host header or adding headers for + ## authorization purposes. + ## + ## This overrides any default headers. + # + # headers: + # Host: + # X-Auth-Token: + + ## @param extra_headers - mapping - optional + ## Additional headers to send with every request. + # + # extra_headers: + # Host: + # X-Auth-Token: + + ## @param timeout - number - optional - default: 10 + ## The timeout for accessing services. + ## + ## This overrides the `timeout` setting in `init_config`. + # + # timeout: 10 + + ## @param connect_timeout - number - optional + ## The connect timeout for accessing services. Defaults to `timeout`. + # + # connect_timeout: + + ## @param read_timeout - number - optional + ## The read timeout for accessing services. Defaults to `timeout`. + # + # read_timeout: + + ## @param request_size - number - optional - default: 16 + ## The number of kibibytes (KiB) to read from streaming HTTP responses at a time. + # + # request_size: 16 + + ## @param log_requests - boolean - optional - default: false + ## Whether or not to debug log the HTTP(S) requests made, including the method and URL. + # + # log_requests: false + + ## @param persist_connections - boolean - optional - default: false + ## Whether or not to persist cookies and use connection pooling for improved performance. + # + # persist_connections: false + + ## @param allow_redirects - boolean - optional - default: true + ## Whether or not to allow URL redirection. + # + # allow_redirects: true + + ## @param tags - list of strings - optional + ## A list of tags to attach to every metric and service check emitted by this instance. + ## + ## Learn more about tagging at https://docs.datadoghq.com/tagging + # + # tags: + # - : + # - : + + ## @param service - string - optional + ## Attach the tag `service:` to every metric, event, and service check emitted by this integration. + ## + ## Overrides any `service` defined in the `init_config` section. + # + # service: + + ## @param min_collection_interval - number - optional - default: 15 + ## This changes the collection interval of the check. For more information, see: + ## https://docs.datadoghq.com/developers/write_agent_check/#collection-interval + # + # min_collection_interval: 15 + + ## @param empty_default_hostname - boolean - optional - default: false + ## This forces the check to send metrics with no hostname. + ## + ## This is useful for cluster-level checks. + # + # empty_default_hostname: false + + ## @param metric_patterns - mapping - optional + ## A mapping of metrics to include or exclude, with each entry being a regular expression. + ## + ## Metrics defined in `exclude` will take precedence in case of overlap. + # + # metric_patterns: + # include: + # - + # exclude: + # - diff --git a/quarkus/datadog_checks/quarkus/metrics.py b/quarkus/datadog_checks/quarkus/metrics.py new file mode 100644 index 0000000000000..a753aa86c965c --- /dev/null +++ b/quarkus/datadog_checks/quarkus/metrics.py @@ -0,0 +1,53 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +gauges_counters = { + 'http_server_active_requests': 'http_server.active_requests', + 'http_server_bytes_read_max': 'http_server.bytes_read.max', + 'http_server_bytes_written_max': 'http_server.bytes_written.max', + 'http_server_connections_seconds_max': 'http_server.connections.seconds.max', + 'http_server_requests_seconds_max': 'http_server.requests.seconds.max', + 'jvm_buffer_count_buffers': 'jvm.buffer.count_buffers', + 'jvm_buffer_memory_used_bytes': 'jvm.buffer.memory_used.bytes', + 'jvm_buffer_total_capacity_bytes': 'jvm.buffer.total_capacity.bytes', + 'jvm_classes_loaded_classes': 'jvm.classes.loaded_classes', + 'jvm_gc_live_data_size_bytes': 'jvm.gc.live_data_size.bytes', + 'jvm_gc_max_data_size_bytes': 'jvm.gc.max_data_size.bytes', + 'jvm_gc_overhead': 'jvm.gc.overhead', + 'jvm_memory_committed_bytes': 'jvm.memory.committed.bytes', + 'jvm_memory_max_bytes': 'jvm.memory.max.bytes', + 'jvm_memory_usage_after_gc': 'jvm.memory.usage_after_gc', + 'jvm_memory_used_bytes': 'jvm.memory.used.bytes', + 'jvm_threads_daemon_threads': 'jvm.threads.daemon_threads', + 'jvm_threads_live_threads': 'jvm.threads.live_threads', + 'jvm_threads_peak_threads': 'jvm.threads.peak_threads', + 'jvm_threads_states_threads': 'jvm.threads.states_threads', + 'netty_allocator_memory_pinned': 'netty.allocator.memory.pinned', + 'netty_allocator_memory_used': 'netty.allocator.memory.used', + 'netty_allocator_pooled_arenas': 'netty.allocator.pooled.arenas', + 'netty_allocator_pooled_cache_size': 'netty.allocator.pooled.cache_size', + 'netty_allocator_pooled_chunk_size': 'netty.allocator.pooled.chunk_size', + 'netty_allocator_pooled_threadlocal_caches': 'netty.allocator.pooled.threadlocal_caches', + 'netty_eventexecutor_tasks_pending': 'netty.eventexecutor.tasks_pending', + 'process_cpu_usage': 'process.cpu.usage', + 'process_files_max_files': 'process.files.max_files', + 'process_files_open_files': 'process.files.open_files', + 'process_uptime_seconds': 'process.uptime.seconds', + 'system_cpu_count': 'system.cpu.count', + 'system_cpu_usage': 'system.cpu.usage', + 'system_load_average_1m': 'system.load_average_1m', + 'worker_pool_active': 'worker_pool.active', + 'worker_pool_idle': 'worker_pool.idle', + 'worker_pool_queue_delay_seconds_max': 'worker_pool.queue.delay.seconds.max', + 'worker_pool_queue_size': 'worker_pool.queue.size', + 'worker_pool_ratio': 'worker_pool.ratio', + 'worker_pool_usage_seconds_max': 'worker_pool.usage.seconds.max', +} +summaries = { + 'http_server_bytes_read': 'http_server.bytes_read', + 'http_server_bytes_written': 'http_server.bytes_written', + 'http_server_requests_seconds': 'http_server.requests.seconds', + 'worker_pool_queue_delay_seconds': 'worker_pool.queue.delay.seconds', + 'worker_pool_usage_seconds': 'worker_pool.usage.seconds', +} +METRIC_MAP = {**gauges_counters, **summaries} diff --git a/quarkus/hatch.toml b/quarkus/hatch.toml new file mode 100644 index 0000000000000..c85c5f07a7df2 --- /dev/null +++ b/quarkus/hatch.toml @@ -0,0 +1,4 @@ +[env.collectors.datadog-checks] + +[[envs.default.matrix]] +python = ["3.12"] diff --git a/quarkus/manifest.json b/quarkus/manifest.json new file mode 100644 index 0000000000000..27a97dfcd0dcf --- /dev/null +++ b/quarkus/manifest.json @@ -0,0 +1,60 @@ +{ + "manifest_version": "2.0.0", + "app_uuid": "78e72ed2-6ea6-4186-9e57-2015a4a52afc", + "app_id": "quarkus", + "display_on_public_website": false, + "tile": { + "overview": "README.md#Overview", + "configuration": "README.md#Setup", + "support": "README.md#Support", + "changelog": "CHANGELOG.md", + "description": "Monitor your application built with Quarkus.", + "title": "Quarkus", + "media": [], + "classifier_tags": [ + "Supported OS::Linux", + "Supported OS::Windows", + "Supported OS::macOS", + "Category::Metrics", + "Offering::Integration", + "Queried Data Type::Metrics", + "Submitted Data Type::Metrics" + ] + }, + "assets": { + "integration": { + "auto_install": true, + "source_type_id": 29763785, + "source_type_name": "Quarkus", + "configuration": { + "spec": "assets/configuration/spec.yaml" + }, + "events": { + "creates_events": false + }, + "metrics": { + "prefix": "quarkus.", + "check": "quarkus.process.cpu.usage", + "metadata_path": "metadata.csv" + }, + "process_signatures": [ + "java quarkus-run.jar" + ], + "service_checks": { + "metadata_path": "assets/service_checks.json" + } + }, + "dashboards": { + "Quarkus Overview": "assets/dashboards/overview.json" + }, + "monitors": { + "Long Requests": "assets/monitors/long_requests.json" + } + }, + "author": { + "support_email": "help@datadoghq.com", + "name": "Datadog", + "homepage": "https://www.datadoghq.com", + "sales_email": "info@datadoghq.com" + } +} diff --git a/quarkus/metadata.csv b/quarkus/metadata.csv new file mode 100644 index 0000000000000..4e4869d8325eb --- /dev/null +++ b/quarkus/metadata.csv @@ -0,0 +1,51 @@ +metric_name,metric_type,interval,unit_name,per_unit_name,description,orientation,integration,short_name,curated_metric,sample_tags +quarkus.http_server.active_requests,gauge,,request,,Requests to the server that are active right now.,0,quarkus,,, +quarkus.http_server.bytes_read.count,count,,,,Number of times some bytes were received by the server.,0,quarkus,,, +quarkus.http_server.bytes_read.max,gauge,,byte,,Maximum number of bytes currently received by the server.,0,quarkus,,, +quarkus.http_server.bytes_read.sum,count,,byte,,Total number of bytes received by the server since it started.,0,quarkus,,, +quarkus.http_server.bytes_written.count,count,,,,Number of times some bytes were by the server.,0,quarkus,,, +quarkus.http_server.bytes_written.max,gauge,,byte,,Current maximum number of bytes sent by the server.,0,quarkus,,, +quarkus.http_server.bytes_written.sum,count,,byte,,Total number of bytes sent by the server.,0,quarkus,,, +quarkus.http_server.connections.seconds.max,gauge,,second,,The duration of the connections in seconds.,0,quarkus,,, +quarkus.http_server.requests.seconds.count,count,,,,The number of requests observed so far.,0,quarkus,,, +quarkus.http_server.requests.seconds.max,gauge,,second,,The current longest request duration in seconds.,0,quarkus,,, +quarkus.http_server.requests.seconds.sum,count,,second,,Total number of seconds that all requests took so far.,0,quarkus,,, +quarkus.jvm.buffer.count_buffers,gauge,,buffer,,An estimate of the number of buffers in the pool.,0,quarkus,,, +quarkus.jvm.buffer.memory_used.bytes,gauge,,byte,,An estimate of the memory that the Java virtual machine is using for this buffer pool.,0,quarkus,,, +quarkus.jvm.buffer.total_capacity.bytes,gauge,,byte,,An estimate of the total capacity of the buffers in this pool.,0,quarkus,,, +quarkus.jvm.classes.loaded_classes,gauge,,,,The number of classes that are currently loaded in the Java virtual machine.,0,quarkus,,, +quarkus.jvm.gc.live_data_size.bytes,gauge,,byte,,Size of long-lived heap memory pool after reclamation.,0,quarkus,,, +quarkus.jvm.gc.max_data_size.bytes,gauge,,byte,,Max size of long-lived heap memory pool.,0,quarkus,,, +quarkus.jvm.gc.overhead,gauge,,,,"An approximation of the percent of CPU time used by GC activities over the last lookback period or since monitoring began, whichever is shorter, in the range [0..1].",0,quarkus,,, +quarkus.jvm.memory.committed.bytes,gauge,,byte,,The amount of memory in bytes that is committed for the Java virtual machine to use.,0,quarkus,,, +quarkus.jvm.memory.max.bytes,gauge,,byte,,The maximum amount of memory in bytes that can be used for memory management.,0,quarkus,,, +quarkus.jvm.memory.usage_after_gc,gauge,,fraction,,"The percentage of long-lived heap pool used after the last GC event, in the range [0..1].",0,quarkus,,, +quarkus.jvm.memory.used.bytes,gauge,,byte,,The amount of used memory.,0,quarkus,,, +quarkus.jvm.threads.daemon_threads,gauge,,thread,,The current number of live daemon threads.,0,quarkus,,, +quarkus.jvm.threads.live_threads,gauge,,thread,,The current number of live threads including both daemon and non-daemon threads.,0,quarkus,,, +quarkus.jvm.threads.peak_threads,gauge,,thread,,The peak live thread count since the Java virtual machine started or peak was reset.,0,quarkus,,, +quarkus.jvm.threads.states_threads,gauge,,thread,,The current number of threads.,0,quarkus,,, +quarkus.netty.allocator.memory.pinned,gauge,,byte,,"Size, in bytes, of the memory that the allocated buffer uses.",0,quarkus,,, +quarkus.netty.allocator.memory.used,gauge,,byte,,"Size, in bytes, of the memory that the allocator uses.",0,quarkus,,, +quarkus.netty.allocator.pooled.arenas,gauge,,byte,,Number of arenas for a pooled allocator.,0,quarkus,,, +quarkus.netty.allocator.pooled.cache_size,gauge,,byte,,"Size, in bytes, of the cache for a pooled allocator.",0,quarkus,,, +quarkus.netty.allocator.pooled.chunk_size,gauge,,byte,,"Size, in bytes, of memory chunks for a pooled allocator.",0,quarkus,,, +quarkus.netty.allocator.pooled.threadlocal_caches,gauge,,,,Number of ThreadLocal caches for a pooled allocator.,0,quarkus,,, +quarkus.netty.eventexecutor.tasks_pending,gauge,,task,,Number of pending tasks in the event executor.,0,quarkus,,, +quarkus.process.cpu.usage,gauge,,,,The recent cpu usage for the Java Virtual Machine process.,0,quarkus,,, +quarkus.process.files.max_files,gauge,,file,,The maximum file descriptor count.,0,quarkus,,, +quarkus.process.files.open_files,gauge,,file,,The open file descriptor count.,0,quarkus,,, +quarkus.process.uptime.seconds,gauge,,second,,The uptime of the Java virtual machine.,0,quarkus,,, +quarkus.system.cpu.count,gauge,,,,The number of processors available to the Java virtual machine.,0,quarkus,,, +quarkus.system.cpu.usage,gauge,,,,The recent cpu usage of the system the application is running in.,0,quarkus,,, +quarkus.system.load_average_1m,gauge,,,,The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time.,0,quarkus,,, +quarkus.worker_pool.active,gauge,,,,The number of resources from the pool currently used.,0,quarkus,,, +quarkus.worker_pool.idle,gauge,,,,The number of resources from the pool currently used.,0,quarkus,,, +quarkus.worker_pool.queue.delay.seconds.count,count,,,,Number of items that spent time in the waiting queue before being processed.,0,quarkus,,, +quarkus.worker_pool.queue.delay.seconds.max,gauge,,second,,Current maximum time spent in the waiting queue before being processed.,0,quarkus,,, +quarkus.worker_pool.queue.delay.seconds.sum,count,,,,Total time spent in the waiting queue before being processed.,0,quarkus,,, +quarkus.worker_pool.queue.size,gauge,,,,Number of pending elements in the waiting queue.,0,quarkus,,, +quarkus.worker_pool.ratio,gauge,,fraction,,Ratio of workers being used at the moment.,0,quarkus,,, +quarkus.worker_pool.usage.seconds.count,count,,second,,Number of times resources from the pool were being used.,0,quarkus,,, +quarkus.worker_pool.usage.seconds.max,gauge,,second,,Maximum time spent using resources from the pool.,0,quarkus,,, +quarkus.worker_pool.usage.seconds.sum,count,,second,,Total time spent using resources from the pool.,0,quarkus,,, diff --git a/quarkus/pyproject.toml b/quarkus/pyproject.toml new file mode 100644 index 0000000000000..fe7cf0e23997a --- /dev/null +++ b/quarkus/pyproject.toml @@ -0,0 +1,60 @@ +[build-system] +requires = [ + "hatchling>=0.13.0", +] +build-backend = "hatchling.build" + +[project] +name = "datadog-quarkus" +description = "The Quarkus check" +readme = "README.md" +license = "BSD-3-Clause" +requires-python = ">=3.12" +keywords = [ + "datadog", + "datadog agent", + "datadog check", + "quarkus", +] +authors = [ + { name = "Datadog", email = "packages@datadoghq.com" }, +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: BSD License", + "Private :: Do Not Upload", + "Programming Language :: Python :: 3.12", + "Topic :: System :: Monitoring", +] +dependencies = [ + "datadog-checks-base>=37.0.0", +] +dynamic = [ + "version", +] + +[project.optional-dependencies] +deps = [] + +[project.urls] +Source = "https://github.com/DataDog/integrations-core" + +[tool.hatch.version] +path = "datadog_checks/quarkus/__about__.py" + +[tool.hatch.build.targets.sdist] +include = [ + "/datadog_checks", + "/tests", + "/manifest.json", +] + +[tool.hatch.build.targets.wheel] +include = [ + "/datadog_checks/quarkus", +] +dev-mode-dirs = [ + ".", +] diff --git a/quarkus/tests/__init__.py b/quarkus/tests/__init__.py new file mode 100644 index 0000000000000..9103122bf028d --- /dev/null +++ b/quarkus/tests/__init__.py @@ -0,0 +1,3 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) diff --git a/quarkus/tests/conftest.py b/quarkus/tests/conftest.py new file mode 100644 index 0000000000000..1e8d20eae623f --- /dev/null +++ b/quarkus/tests/conftest.py @@ -0,0 +1,29 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +import copy +from pathlib import Path + +import pytest + +from datadog_checks.dev import docker_run +from datadog_checks.dev.conditions import CheckEndpoints + +INSTANCE = {'openmetrics_endpoint': 'http://localhost:8080/q/metrics'} + + +@pytest.fixture(scope='session') +def dd_environment(): + compose_file = str(Path(__file__).parent.absolute() / 'docker' / 'docker-compose.yaml') + conditions = [ + CheckEndpoints(INSTANCE["openmetrics_endpoint"]), + ] + with docker_run(compose_file, conditions=conditions): + yield { + 'instances': [INSTANCE], + } + + +@pytest.fixture +def instance(): + return copy.deepcopy(INSTANCE) diff --git a/quarkus/tests/docker/README.md b/quarkus/tests/docker/README.md new file mode 100644 index 0000000000000..51ee36d9d6ef9 --- /dev/null +++ b/quarkus/tests/docker/README.md @@ -0,0 +1,10 @@ +To test an example Quarkus app that exposed metrics, we took the documented example from here: +https://github.com/quarkusio/quarkus-quickstarts/tree/1347e49b4441e43c3faac3b3953dd5e988af379b/micrometer-quickstart + +We then used this StackOverflow post to write a Dockerfile that would build the app: +https://stackoverflow.com/a/75759520 + +We needed the following tweaks: + +- Tweak `.dockerignore` to stop ignoring all files. +- Disable the step `RUN ./mvnw dependency:go-offline -B` in the Dockerfile. diff --git a/quarkus/tests/docker/docker-compose.yaml b/quarkus/tests/docker/docker-compose.yaml new file mode 100755 index 0000000000000..1f07754eca0d1 --- /dev/null +++ b/quarkus/tests/docker/docker-compose.yaml @@ -0,0 +1,6 @@ +services: + + quarkus-app: + build: micrometer-quickstart + ports: + - "8080:8080" diff --git a/quarkus/tests/docker/micrometer-quickstart/.dockerignore b/quarkus/tests/docker/micrometer-quickstart/.dockerignore new file mode 100644 index 0000000000000..7b6be1b3d4556 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/.dockerignore @@ -0,0 +1,4 @@ +!target/*-runner +!target/*-runner.jar +!target/lib/* +!target/quarkus-app/ diff --git a/quarkus/tests/docker/micrometer-quickstart/.gitignore b/quarkus/tests/docker/micrometer-quickstart/.gitignore new file mode 100644 index 0000000000000..087a18358fe57 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/.gitignore @@ -0,0 +1,35 @@ +# Eclipse +.project +.classpath +.settings/ +bin/ + +# IntelliJ +.idea +*.ipr +*.iml +*.iws + +# NetBeans +nb-configuration.xml + +# Visual Studio Code +.vscode + +# OSX +.DS_Store + +# Vim +*.swp +*.swo + +# patch +*.orig +*.rej + +# Maven +target/ +pom.xml.tag +pom.xml.releaseBackup +pom.xml.versionsBackup +release.properties \ No newline at end of file diff --git a/quarkus/tests/docker/micrometer-quickstart/.mvn/wrapper/maven-wrapper.jar b/quarkus/tests/docker/micrometer-quickstart/.mvn/wrapper/maven-wrapper.jar new file mode 100644 index 0000000000000..7967f30dd1d25 Binary files /dev/null and b/quarkus/tests/docker/micrometer-quickstart/.mvn/wrapper/maven-wrapper.jar differ diff --git a/quarkus/tests/docker/micrometer-quickstart/.mvn/wrapper/maven-wrapper.properties b/quarkus/tests/docker/micrometer-quickstart/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000000000..9548abd8e8c0e --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +wrapperVersion=3.3.2 +distributionType=bin +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.9/apache-maven-3.9.9-bin.zip +wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.3.2/maven-wrapper-3.3.2.jar diff --git a/quarkus/tests/docker/micrometer-quickstart/Dockerfile b/quarkus/tests/docker/micrometer-quickstart/Dockerfile new file mode 100644 index 0000000000000..09f4137121700 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/Dockerfile @@ -0,0 +1,52 @@ +# Use the official JDK 19 image as the base image for the build stage +FROM openjdk:19-jdk AS build + +# Enable preview features +ENV JAVA_OPTS="--enable-preview" + +# Set the working directory +WORKDIR /app + +# Copy the Maven wrapper +COPY ./mvnw . +COPY ./.mvn .mvn + +# Copy the pom.xml file +COPY ./pom.xml . + +# The StackOverflow post where we got this dockerfile included the steps to download the deps. +# This didn't work for us, so we disabled it. +# https://stackoverflow.com/a/75759520 +# ENV HTTP_PROXY="http://host.docker.internal:3128" +# ENV HTTPS_PROXY="http://host.docker.internal:3128" +# ENV http_proxy="http://host.docker.internal:3128" +# ENV https_proxy="http://host.docker.internal:3128" +# ENV MAVEN_OPTS="-Dhttp.proxyHost=host.docker.internal -Dhttp.proxyPort=3128 -Dhttps.proxyHost=host.docker.internal -Dhttps.proxyPort=3128 --enable-preview" +# Download dependencies and cache them +# RUN ./mvnw dependency:go-offline -B + +# Copy the source code +COPY ./src . + +# Compile and package the application +RUN ./mvnw package -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -B -V + + +# Use the official JDK 19 image as the base image for the runtime stage +FROM openjdk:19-jdk AS runtime + +# Enable preview features +ENV JAVA_OPTS="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager --enable-preview" + +# Set the working directory +WORKDIR /app + +# Copy the build artifacts from the build stage +#COPY --from=build /app/target/quarkus-app/quarkus-run.jar /app/app.jar +COPY --from=build /app/target/quarkus-app/lib/ /app/lib/ +COPY --from=build /app/target/quarkus-app/*.jar /app/ +COPY --from=build /app/target/quarkus-app/app/ /app/app/ +COPY --from=build /app/target/quarkus-app/quarkus/ /app/quarkus/ + +# Set the entrypoint and command to run the application +ENTRYPOINT ["sh", "-c", "java $JAVA_OPTS -jar /app/quarkus-run.jar"] diff --git a/quarkus/tests/docker/micrometer-quickstart/README.md b/quarkus/tests/docker/micrometer-quickstart/README.md new file mode 100644 index 0000000000000..500cfac084a17 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/README.md @@ -0,0 +1 @@ +Quarkus guide: https://quarkus.io/guides/micrometer diff --git a/quarkus/tests/docker/micrometer-quickstart/mvnw b/quarkus/tests/docker/micrometer-quickstart/mvnw new file mode 100755 index 0000000000000..5e9618cac26d1 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/mvnw @@ -0,0 +1,332 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Apache Maven Wrapper startup batch script, version 3.3.2 +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ]; then + + if [ -f /usr/local/etc/mavenrc ]; then + . /usr/local/etc/mavenrc + fi + + if [ -f /etc/mavenrc ]; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ]; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false +darwin=false +mingw=false +case "$(uname)" in +CYGWIN*) cygwin=true ;; +MINGW*) mingw=true ;; +Darwin*) + darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + JAVA_HOME="$(/usr/libexec/java_home)" + export JAVA_HOME + else + JAVA_HOME="/Library/Java/Home" + export JAVA_HOME + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ]; then + if [ -r /etc/gentoo-release ]; then + JAVA_HOME=$(java-config --jre-home) + fi +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin; then + [ -n "$JAVA_HOME" ] \ + && JAVA_HOME=$(cygpath --unix "$JAVA_HOME") + [ -n "$CLASSPATH" ] \ + && CLASSPATH=$(cygpath --path --unix "$CLASSPATH") +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw; then + [ -n "$JAVA_HOME" ] && [ -d "$JAVA_HOME" ] \ + && JAVA_HOME="$( + cd "$JAVA_HOME" || ( + echo "cannot cd into $JAVA_HOME." >&2 + exit 1 + ) + pwd + )" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="$(which javac)" + if [ -n "$javaExecutable" ] && ! [ "$(expr "$javaExecutable" : '\([^ ]*\)')" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=$(which readlink) + if [ ! "$(expr "$readLink" : '\([^ ]*\)')" = "no" ]; then + if $darwin; then + javaHome="$(dirname "$javaExecutable")" + javaExecutable="$(cd "$javaHome" && pwd -P)/javac" + else + javaExecutable="$(readlink -f "$javaExecutable")" + fi + javaHome="$(dirname "$javaExecutable")" + javaHome=$(expr "$javaHome" : '\(.*\)/bin') + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ]; then + if [ -n "$JAVA_HOME" ]; then + if [ -x "$JAVA_HOME/jre/sh/java" ]; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="$( + \unset -f command 2>/dev/null + \command -v java + )" + fi +fi + +if [ ! -x "$JAVACMD" ]; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ]; then + echo "Warning: JAVA_HOME environment variable is not set." >&2 +fi + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + if [ -z "$1" ]; then + echo "Path not specified to find_maven_basedir" >&2 + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ]; do + if [ -d "$wdir"/.mvn ]; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=$( + cd "$wdir/.." || exit 1 + pwd + ) + fi + # end of workaround + done + printf '%s' "$( + cd "$basedir" || exit 1 + pwd + )" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + # Remove \r in case we run on Windows within Git Bash + # and check out the repository with auto CRLF management + # enabled. Otherwise, we may read lines that are delimited with + # \r\n and produce $'-Xarg\r' rather than -Xarg due to word + # splitting rules. + tr -s '\r\n' ' ' <"$1" + fi +} + +log() { + if [ "$MVNW_VERBOSE" = true ]; then + printf '%s\n' "$1" + fi +} + +BASE_DIR=$(find_maven_basedir "$(dirname "$0")") +if [ -z "$BASE_DIR" ]; then + exit 1 +fi + +MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +export MAVEN_PROJECTBASEDIR +log "$MAVEN_PROJECTBASEDIR" + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +wrapperJarPath="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" +if [ -r "$wrapperJarPath" ]; then + log "Found $wrapperJarPath" +else + log "Couldn't find $wrapperJarPath, downloading it ..." + + if [ -n "$MVNW_REPOURL" ]; then + wrapperUrl="$MVNW_REPOURL/org/apache/maven/wrapper/maven-wrapper/3.3.2/maven-wrapper-3.3.2.jar" + else + wrapperUrl="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.3.2/maven-wrapper-3.3.2.jar" + fi + while IFS="=" read -r key value; do + # Remove '\r' from value to allow usage on windows as IFS does not consider '\r' as a separator ( considers space, tab, new line ('\n'), and custom '=' ) + safeValue=$(echo "$value" | tr -d '\r') + case "$key" in wrapperUrl) + wrapperUrl="$safeValue" + break + ;; + esac + done <"$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.properties" + log "Downloading from: $wrapperUrl" + + if $cygwin; then + wrapperJarPath=$(cygpath --path --windows "$wrapperJarPath") + fi + + if command -v wget >/dev/null; then + log "Found wget ... using wget" + [ "$MVNW_VERBOSE" = true ] && QUIET="" || QUIET="--quiet" + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget $QUIET "$wrapperUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" + else + wget $QUIET --http-user="$MVNW_USERNAME" --http-password="$MVNW_PASSWORD" "$wrapperUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" + fi + elif command -v curl >/dev/null; then + log "Found curl ... using curl" + [ "$MVNW_VERBOSE" = true ] && QUIET="" || QUIET="--silent" + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl $QUIET -o "$wrapperJarPath" "$wrapperUrl" -f -L || rm -f "$wrapperJarPath" + else + curl $QUIET --user "$MVNW_USERNAME:$MVNW_PASSWORD" -o "$wrapperJarPath" "$wrapperUrl" -f -L || rm -f "$wrapperJarPath" + fi + else + log "Falling back to using Java to download" + javaSource="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/MavenWrapperDownloader.java" + javaClass="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/MavenWrapperDownloader.class" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaSource=$(cygpath --path --windows "$javaSource") + javaClass=$(cygpath --path --windows "$javaClass") + fi + if [ -e "$javaSource" ]; then + if [ ! -e "$javaClass" ]; then + log " - Compiling MavenWrapperDownloader.java ..." + ("$JAVA_HOME/bin/javac" "$javaSource") + fi + if [ -e "$javaClass" ]; then + log " - Running MavenWrapperDownloader.java ..." + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$wrapperUrl" "$wrapperJarPath") || rm -f "$wrapperJarPath" + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +# If specified, validate the SHA-256 sum of the Maven wrapper jar file +wrapperSha256Sum="" +while IFS="=" read -r key value; do + case "$key" in wrapperSha256Sum) + wrapperSha256Sum=$value + break + ;; + esac +done <"$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.properties" +if [ -n "$wrapperSha256Sum" ]; then + wrapperSha256Result=false + if command -v sha256sum >/dev/null; then + if echo "$wrapperSha256Sum $wrapperJarPath" | sha256sum -c >/dev/null 2>&1; then + wrapperSha256Result=true + fi + elif command -v shasum >/dev/null; then + if echo "$wrapperSha256Sum $wrapperJarPath" | shasum -a 256 -c >/dev/null 2>&1; then + wrapperSha256Result=true + fi + else + echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." >&2 + echo "Please install either command, or disable validation by removing 'wrapperSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + fi + if [ $wrapperSha256Result = false ]; then + echo "Error: Failed to validate Maven wrapper SHA-256, your Maven wrapper might be compromised." >&2 + echo "Investigate or delete $wrapperJarPath to attempt a clean download." >&2 + echo "If you updated your Maven version, you need to update the specified wrapperSha256Sum property." >&2 + exit 1 + fi +fi + +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$JAVA_HOME" ] \ + && JAVA_HOME=$(cygpath --path --windows "$JAVA_HOME") + [ -n "$CLASSPATH" ] \ + && CLASSPATH=$(cygpath --path --windows "$CLASSPATH") + [ -n "$MAVEN_PROJECTBASEDIR" ] \ + && MAVEN_PROJECTBASEDIR=$(cygpath --path --windows "$MAVEN_PROJECTBASEDIR") +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $*" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +# shellcheck disable=SC2086 # safe args +exec "$JAVACMD" \ + $MAVEN_OPTS \ + $MAVEN_DEBUG_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/quarkus/tests/docker/micrometer-quickstart/mvnw.cmd b/quarkus/tests/docker/micrometer-quickstart/mvnw.cmd new file mode 100644 index 0000000000000..4136715f081ec --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/mvnw.cmd @@ -0,0 +1,206 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Apache Maven Wrapper startup batch script, version 3.3.2 +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%USERPROFILE%\mavenrc_pre.bat" call "%USERPROFILE%\mavenrc_pre.bat" %* +if exist "%USERPROFILE%\mavenrc_pre.cmd" call "%USERPROFILE%\mavenrc_pre.cmd" %* +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. >&2 +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. >&2 +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. >&2 +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. >&2 +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set WRAPPER_URL="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.3.2/maven-wrapper-3.3.2.jar" + +FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET WRAPPER_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET WRAPPER_URL="%MVNW_REPOURL%/org/apache/maven/wrapper/maven-wrapper/3.3.2/maven-wrapper-3.3.2.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %WRAPPER_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%WRAPPER_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM If specified, validate the SHA-256 sum of the Maven wrapper jar file +SET WRAPPER_SHA_256_SUM="" +FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperSha256Sum" SET WRAPPER_SHA_256_SUM=%%B +) +IF NOT %WRAPPER_SHA_256_SUM%=="" ( + powershell -Command "&{"^ + "Import-Module $PSHOME\Modules\Microsoft.PowerShell.Utility -Function Get-FileHash;"^ + "$hash = (Get-FileHash \"%WRAPPER_JAR%\" -Algorithm SHA256).Hash.ToLower();"^ + "If('%WRAPPER_SHA_256_SUM%' -ne $hash){"^ + " Write-Error 'Error: Failed to validate Maven wrapper SHA-256, your Maven wrapper might be compromised.';"^ + " Write-Error 'Investigate or delete %WRAPPER_JAR% to attempt a clean download.';"^ + " Write-Error 'If you updated your Maven version, you need to update the specified wrapperSha256Sum property.';"^ + " exit 1;"^ + "}"^ + "}" + if ERRORLEVEL 1 goto error +) + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% ^ + %JVM_CONFIG_MAVEN_PROPS% ^ + %MAVEN_OPTS% ^ + %MAVEN_DEBUG_OPTS% ^ + -classpath %WRAPPER_JAR% ^ + "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" ^ + %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%"=="" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%USERPROFILE%\mavenrc_post.bat" call "%USERPROFILE%\mavenrc_post.bat" +if exist "%USERPROFILE%\mavenrc_post.cmd" call "%USERPROFILE%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%"=="on" pause + +if "%MAVEN_TERMINATE_CMD%"=="on" exit %ERROR_CODE% + +cmd /C exit /B %ERROR_CODE% diff --git a/quarkus/tests/docker/micrometer-quickstart/pom.xml b/quarkus/tests/docker/micrometer-quickstart/pom.xml new file mode 100644 index 0000000000000..623b16ead9cdb --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/pom.xml @@ -0,0 +1,127 @@ + + + 4.0.0 + + org.acme + micrometer-quickstart + 1.0.0-SNAPSHOT + + + quarkus-bom + io.quarkus.platform + 3.17.4 + 3.11.0 + 3.1.2 + 17 + 17 + true + UTF-8 + + + + + + ${quarkus.platform.group-id} + ${quarkus.platform.artifact-id} + ${quarkus.platform.version} + pom + import + + + + + + + io.quarkus + quarkus-micrometer-registry-prometheus + + + io.quarkus + quarkus-rest + + + io.quarkus + quarkus-junit5 + test + + + io.rest-assured + rest-assured + test + + + + + ${project.artifactId} + + + maven-compiler-plugin + ${compiler-plugin.version} + + + maven-surefire-plugin + ${surefire-plugin.version} + + + org.jboss.logmanager.LogManager + ${maven.home} + + + + + ${quarkus.platform.group-id} + quarkus-maven-plugin + ${quarkus.platform.version} + + + + build + + + + + + + + + native + + + native + + + + true + false + + + + + org.apache.maven.plugins + maven-failsafe-plugin + ${surefire-plugin.version} + + + + integration-test + verify + + + + ${project.build.directory}/${project.build.finalName}-runner + org.jboss.logmanager.LogManager + ${maven.home} + + + + + + + + + + + + diff --git a/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.jvm b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.jvm new file mode 100644 index 0000000000000..e79d3a6e0865c --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.jvm @@ -0,0 +1,97 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode +# +# Before building the container image run: +# +# ./mvnw package +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/micrometer-quickstart-jvm . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/micrometer-quickstart-jvm +# +# If you want to include the debug port into your docker image +# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. +# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 +# when running the container +# +# Then run the container using : +# +# docker run -i --rm -p 8080:8080 quarkus/micrometer-quickstart-jvm +# +# This image uses the `run-java.sh` script to run the application. +# This scripts computes the command line to execute your Java application, and +# includes memory/GC tuning. +# You can configure the behavior using the following environment properties: +# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") +# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options +# in JAVA_OPTS (example: "-Dsome.property=foo") +# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is +# used to calculate a default maximal heap memory based on a containers restriction. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio +# of the container available memory as set here. The default is `50` which means 50% +# of the available memory is used as an upper boundary. You can skip this mechanism by +# setting this value to `0` in which case no `-Xmx` option is added. +# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This +# is used to calculate a default initial heap memory based on the maximum heap memory. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio +# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` +# is used as the initial heap size. You can skip this mechanism by setting this value +# to `0` in which case no `-Xms` option is added (example: "25") +# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. +# This is used to calculate the maximum value of the initial heap memory. If used in +# a container without any memory constraints for the container then this option has +# no effect. If there is a memory constraint then `-Xms` is limited to the value set +# here. The default is 4096MB which means the calculated value of `-Xms` never will +# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") +# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output +# when things are happening. This option, if set to true, will set +# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). +# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: +# true"). +# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). +# - CONTAINER_CORE_LIMIT: A calculated core limit as described in +# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") +# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). +# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. +# (example: "20") +# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. +# (example: "40") +# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. +# (example: "4") +# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus +# previous GC times. (example: "90") +# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") +# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") +# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should +# contain the necessary JRE command-line options to specify the required GC, which +# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). +# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") +# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") +# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be +# accessed directly. (example: "foo.example.com,bar.example.com") +# +### +FROM registry.access.redhat.com/ubi8/openjdk-17:1.20 + +ENV LANGUAGE='en_US:en' + + +# We make four distinct layers so if there are application changes the library layers can be re-used +COPY --chown=185 target/quarkus-app/lib/ /deployments/lib/ +COPY --chown=185 target/quarkus-app/*.jar /deployments/ +COPY --chown=185 target/quarkus-app/app/ /deployments/app/ +COPY --chown=185 target/quarkus-app/quarkus/ /deployments/quarkus/ + +EXPOSE 8080 +USER 185 +ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" +ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" + +ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] + diff --git a/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.legacy-jar b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.legacy-jar new file mode 100644 index 0000000000000..53a3108267d77 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.legacy-jar @@ -0,0 +1,93 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode +# +# Before building the container image run: +# +# ./mvnw package -Dquarkus.package.jar.type=legacy-jar +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.legacy-jar -t quarkus/micrometer-quickstart-legacy-jar . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/micrometer-quickstart-legacy-jar +# +# If you want to include the debug port into your docker image +# you will have to expose the debug port (default 5005 being the default) like this : EXPOSE 8080 5005. +# Additionally you will have to set -e JAVA_DEBUG=true and -e JAVA_DEBUG_PORT=*:5005 +# when running the container +# +# Then run the container using : +# +# docker run -i --rm -p 8080:8080 quarkus/micrometer-quickstart-legacy-jar +# +# This image uses the `run-java.sh` script to run the application. +# This scripts computes the command line to execute your Java application, and +# includes memory/GC tuning. +# You can configure the behavior using the following environment properties: +# - JAVA_OPTS: JVM options passed to the `java` command (example: "-verbose:class") +# - JAVA_OPTS_APPEND: User specified Java options to be appended to generated options +# in JAVA_OPTS (example: "-Dsome.property=foo") +# - JAVA_MAX_MEM_RATIO: Is used when no `-Xmx` option is given in JAVA_OPTS. This is +# used to calculate a default maximal heap memory based on a containers restriction. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xmx` is set to a ratio +# of the container available memory as set here. The default is `50` which means 50% +# of the available memory is used as an upper boundary. You can skip this mechanism by +# setting this value to `0` in which case no `-Xmx` option is added. +# - JAVA_INITIAL_MEM_RATIO: Is used when no `-Xms` option is given in JAVA_OPTS. This +# is used to calculate a default initial heap memory based on the maximum heap memory. +# If used in a container without any memory constraints for the container then this +# option has no effect. If there is a memory constraint then `-Xms` is set to a ratio +# of the `-Xmx` memory as set here. The default is `25` which means 25% of the `-Xmx` +# is used as the initial heap size. You can skip this mechanism by setting this value +# to `0` in which case no `-Xms` option is added (example: "25") +# - JAVA_MAX_INITIAL_MEM: Is used when no `-Xms` option is given in JAVA_OPTS. +# This is used to calculate the maximum value of the initial heap memory. If used in +# a container without any memory constraints for the container then this option has +# no effect. If there is a memory constraint then `-Xms` is limited to the value set +# here. The default is 4096MB which means the calculated value of `-Xms` never will +# be greater than 4096MB. The value of this variable is expressed in MB (example: "4096") +# - JAVA_DIAGNOSTICS: Set this to get some diagnostics information to standard output +# when things are happening. This option, if set to true, will set +# `-XX:+UnlockDiagnosticVMOptions`. Disabled by default (example: "true"). +# - JAVA_DEBUG: If set remote debugging will be switched on. Disabled by default (example: +# true"). +# - JAVA_DEBUG_PORT: Port used for remote debugging. Defaults to 5005 (example: "8787"). +# - CONTAINER_CORE_LIMIT: A calculated core limit as described in +# https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt. (example: "2") +# - CONTAINER_MAX_MEMORY: Memory limit given to the container (example: "1024"). +# - GC_MIN_HEAP_FREE_RATIO: Minimum percentage of heap free after GC to avoid expansion. +# (example: "20") +# - GC_MAX_HEAP_FREE_RATIO: Maximum percentage of heap free after GC to avoid shrinking. +# (example: "40") +# - GC_TIME_RATIO: Specifies the ratio of the time spent outside the garbage collection. +# (example: "4") +# - GC_ADAPTIVE_SIZE_POLICY_WEIGHT: The weighting given to the current GC time versus +# previous GC times. (example: "90") +# - GC_METASPACE_SIZE: The initial metaspace size. (example: "20") +# - GC_MAX_METASPACE_SIZE: The maximum metaspace size. (example: "100") +# - GC_CONTAINER_OPTIONS: Specify Java GC to use. The value of this variable should +# contain the necessary JRE command-line options to specify the required GC, which +# will override the default of `-XX:+UseParallelGC` (example: -XX:+UseG1GC). +# - HTTPS_PROXY: The location of the https proxy. (example: "myuser@127.0.0.1:8080") +# - HTTP_PROXY: The location of the http proxy. (example: "myuser@127.0.0.1:8080") +# - NO_PROXY: A comma separated lists of hosts, IP addresses or domains that can be +# accessed directly. (example: "foo.example.com,bar.example.com") +# +### +FROM registry.access.redhat.com/ubi8/openjdk-17:1.20 + +ENV LANGUAGE='en_US:en' + + +COPY target/lib/* /deployments/lib/ +COPY target/*-runner.jar /deployments/quarkus-run.jar + +EXPOSE 8080 +USER 185 +ENV JAVA_OPTS_APPEND="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager" +ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" + +ENTRYPOINT [ "/opt/jboss/container/java/run/run-java.sh" ] diff --git a/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.native b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.native new file mode 100644 index 0000000000000..e8fd1da6b4245 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.native @@ -0,0 +1,27 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. +# +# Before building the container image run: +# +# ./mvnw package -Dnative +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.native -t quarkus/micrometer-quickstart . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/micrometer-quickstart +# +### +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10 +WORKDIR /work/ +RUN chown 1001 /work \ + && chmod "g+rwX" /work \ + && chown 1001:root /work +COPY --chown=1001:root target/*-runner /work/application + +EXPOSE 8080 +USER 1001 + +ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.native-micro b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.native-micro new file mode 100644 index 0000000000000..4eff6a24a7bf2 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/src/main/docker/Dockerfile.native-micro @@ -0,0 +1,30 @@ +#### +# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode. +# It uses a micro base image, tuned for Quarkus native executables. +# It reduces the size of the resulting container image. +# Check https://quarkus.io/guides/quarkus-runtime-base-image for further information about this image. +# +# Before building the container image run: +# +# ./mvnw package -Dnative +# +# Then, build the image with: +# +# docker build -f src/main/docker/Dockerfile.native-micro -t quarkus/micrometer-quickstart . +# +# Then run the container using: +# +# docker run -i --rm -p 8080:8080 quarkus/micrometer-quickstart +# +### +FROM quay.io/quarkus/quarkus-micro-image:2.0 +WORKDIR /work/ +RUN chown 1001 /work \ + && chmod "g+rwX" /work \ + && chown 1001:root /work +COPY --chown=1001:root target/*-runner /work/application + +EXPOSE 8080 +USER 1001 + +ENTRYPOINT ["./application", "-Dquarkus.http.host=0.0.0.0"] diff --git a/quarkus/tests/docker/micrometer-quickstart/src/main/java/org/acme/micrometer/ExampleResource.java b/quarkus/tests/docker/micrometer-quickstart/src/main/java/org/acme/micrometer/ExampleResource.java new file mode 100644 index 0000000000000..0629a31574f79 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/src/main/java/org/acme/micrometer/ExampleResource.java @@ -0,0 +1,81 @@ +package org.acme.micrometer; + +import java.util.LinkedList; +import java.util.NoSuchElementException; + +import jakarta.ws.rs.GET; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.Produces; + +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.Tags; +import io.micrometer.core.instrument.Timer; + +@Path("/example") +@Produces("text/plain") +public class ExampleResource { + + private final MeterRegistry registry; + + LinkedList list = new LinkedList<>(); + + // Update the constructor to create the gauge + ExampleResource(MeterRegistry registry) { + this.registry = registry; + registry.gaugeCollectionSize("example.list.size", Tags.empty(), list); + } + + @GET + @Path("gauge/{number}") + public Long checkListSize(long number) { + if (number == 2 || number % 2 == 0) { + // add even numbers to the list + list.add(number); + } else { + // remove items from the list for odd numbers + try { + number = list.removeFirst(); + } catch (NoSuchElementException nse) { + number = 0; + } + } + return number; + } + + @GET + @Path("prime/{number}") + public String checkIfPrime(long number) { + if (number < 1) { + registry.counter("example.prime.number", "type", "not-natural").increment(); + return "Only natural numbers can be prime numbers."; + } + if (number == 1) { + registry.counter("example.prime.number", "type", "one").increment(); + return number + " is not prime."; + } + if (number == 2 || number % 2 == 0) { + registry.counter("example.prime.number", "type", "even").increment(); + return number + " is not prime."; + } + + if (testPrimeNumber(number)) { + registry.counter("example.prime.number", "type", "prime").increment(); + return number + " is prime."; + } else { + registry.counter("example.prime.number", "type", "not-prime").increment(); + return number + " is not prime."; + } + } + + protected boolean testPrimeNumber(long number) { + Timer timer = registry.timer("example.prime.number.test"); + return timer.record(() -> { + for (int i = 3; i < Math.floor(Math.sqrt(number)) + 1; i = i + 2) { + if (number % i == 0) { + return false; + } + } + return true; + }); + } +} diff --git a/quarkus/tests/docker/micrometer-quickstart/src/test/java/org/acme/micrometer/ExampleResourceIT.java b/quarkus/tests/docker/micrometer-quickstart/src/test/java/org/acme/micrometer/ExampleResourceIT.java new file mode 100644 index 0000000000000..6c24500cf3963 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/src/test/java/org/acme/micrometer/ExampleResourceIT.java @@ -0,0 +1,8 @@ +package org.acme.micrometer; + +import io.quarkus.test.junit.QuarkusIntegrationTest; + +@QuarkusIntegrationTest +public class ExampleResourceIT extends ExampleResourceTest { + +} diff --git a/quarkus/tests/docker/micrometer-quickstart/src/test/java/org/acme/micrometer/ExampleResourceTest.java b/quarkus/tests/docker/micrometer-quickstart/src/test/java/org/acme/micrometer/ExampleResourceTest.java new file mode 100644 index 0000000000000..f0e5b8f1a34a7 --- /dev/null +++ b/quarkus/tests/docker/micrometer-quickstart/src/test/java/org/acme/micrometer/ExampleResourceTest.java @@ -0,0 +1,68 @@ +package org.acme.micrometer; + +import static io.restassured.RestAssured.get; +import static io.restassured.RestAssured.when; +import static org.hamcrest.CoreMatchers.containsString; + +import org.junit.jupiter.api.Test; + +import io.quarkus.test.junit.QuarkusTest; +import io.restassured.http.Header; + +@QuarkusTest +public class ExampleResourceTest { + + @Test + void testGaugeExample() { + when().get("/example/gauge/1").then().statusCode(200); + when().get("/example/gauge/2").then().statusCode(200); + when().get("/example/gauge/4").then().statusCode(200); + when().get("/q/metrics").then().statusCode(200) + .body(containsString( + "example_list_size 2.0")); + when().get("/example/gauge/6").then().statusCode(200); + when().get("/example/gauge/5").then().statusCode(200); + when().get("/example/gauge/7").then().statusCode(200); + when().get("/q/metrics").then().statusCode(200) + .body(containsString( + "example_list_size 1.0")); + } + + @Test + void testCounterExample() { + when().get("/example/prime/-1").then().statusCode(200); + when().get("/example/prime/0").then().statusCode(200); + when().get("/example/prime/1").then().statusCode(200); + when().get("/example/prime/2").then().statusCode(200); + when().get("/example/prime/3").then().statusCode(200); + when().get("/example/prime/15").then().statusCode(200); + + when().get("/q/metrics").then().statusCode(200) + .body(containsString( + "example_prime_number_total{type=\"prime\"}")) + .body(containsString( + "example_prime_number_total{type=\"not-prime\"}")) + .body(containsString( + "example_prime_number_total{type=\"one\"}")) + .body(containsString( + "example_prime_number_total{type=\"even\"}")) + .body(containsString( + "example_prime_number_total{type=\"not-natural\"}")); + } + + @Test + void testTimerExample() { + when().get("/example/prime/257").then().statusCode(200); + when().get("/q/metrics").then().statusCode(200) + .body(containsString( + "example_prime_number_test_seconds_sum")) + .body(containsString( + "example_prime_number_test_seconds_max")) + .body(containsString( + "example_prime_number_test_seconds_count 1.0")); + when().get("/example/prime/7919").then().statusCode(200); + when().get("/q/metrics").then().statusCode(200) + .body(containsString( + "example_prime_number_test_seconds_count 2.0")); + } +} diff --git a/quarkus/tests/fixtures/quarkus_auto_metrics.txt b/quarkus/tests/fixtures/quarkus_auto_metrics.txt new file mode 100644 index 0000000000000..fb35ed0b0fb1f --- /dev/null +++ b/quarkus/tests/fixtures/quarkus_auto_metrics.txt @@ -0,0 +1,241 @@ +# TYPE worker_pool_rejected counter +# HELP worker_pool_rejected Number of times submissions to the pool have been rejected +worker_pool_rejected_total{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_rejected_total{pool_name="vert.x-worker-thread",pool_type="worker"} 0.0 +# TYPE worker_pool_completed counter +# HELP worker_pool_completed Number of times resources from the pool have been acquired +worker_pool_completed_total{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_completed_total{pool_name="vert.x-worker-thread",pool_type="worker"} 5.0 +# TYPE jvm_gc_memory_promoted_bytes counter +# HELP jvm_gc_memory_promoted_bytes Count of positive increases in the size of the old generation memory pool before GC to after GC +jvm_gc_memory_promoted_bytes_total 0.0 +# TYPE netty_allocator_pooled_cache_size gauge +# HELP netty_allocator_pooled_cache_size +netty_allocator_pooled_cache_size{allocator_type="PooledByteBufAllocator",cache_type="normal",id="298568580"} 64.0 +netty_allocator_pooled_cache_size{allocator_type="PooledByteBufAllocator",cache_type="normal",id="1612048265"} 64.0 +netty_allocator_pooled_cache_size{allocator_type="PooledByteBufAllocator",cache_type="small",id="298568580"} 256.0 +netty_allocator_pooled_cache_size{allocator_type="PooledByteBufAllocator",cache_type="small",id="1612048265"} 256.0 +# TYPE worker_pool_queue_delay_seconds_max gauge +# HELP worker_pool_queue_delay_seconds_max Time spent in the waiting queue before being processed +worker_pool_queue_delay_seconds_max{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_queue_delay_seconds_max{pool_name="vert.x-worker-thread",pool_type="worker"} 0.001048665 +# TYPE worker_pool_queue_delay_seconds summary +# HELP worker_pool_queue_delay_seconds Time spent in the waiting queue before being processed +worker_pool_queue_delay_seconds_count{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_queue_delay_seconds_sum{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_queue_delay_seconds_count{pool_name="vert.x-worker-thread",pool_type="worker"} 6.0 +worker_pool_queue_delay_seconds_sum{pool_name="vert.x-worker-thread",pool_type="worker"} 0.002354759 +# TYPE jvm_memory_committed_bytes gauge +# HELP jvm_memory_committed_bytes The amount of memory in bytes that is committed for the Java virtual machine to use +jvm_memory_committed_bytes{area="heap",id="G1 Survivor Space"} 1.2582912E7 +jvm_memory_committed_bytes{area="heap",id="G1 Old Gen"} 5.8720256E7 +jvm_memory_committed_bytes{area="nonheap",id="Metaspace"} 5.1576832E7 +jvm_memory_committed_bytes{area="nonheap",id="CodeCache"} 1.3369344E7 +jvm_memory_committed_bytes{area="heap",id="G1 Eden Space"} 9.0177536E7 +jvm_memory_committed_bytes{area="nonheap",id="Compressed Class Space"} 7602176.0 +# TYPE process_uptime_seconds gauge +# HELP process_uptime_seconds The uptime of the Java virtual machine +process_uptime_seconds 99.172 +# TYPE jvm_threads_daemon_threads gauge +# HELP jvm_threads_daemon_threads The current number of live daemon threads +jvm_threads_daemon_threads 12.0 +# TYPE http_server_connections_seconds_max gauge +# HELP http_server_connections_seconds_max The duration of the connections +http_server_connections_seconds_max 0.003109493 +# TYPE http_server_connections_seconds summary +# HELP http_server_connections_seconds The duration of the connections +http_server_connections_seconds_active_count 1.0 +http_server_connections_seconds_duration_sum 0.003101871 +# TYPE process_start_time_seconds gauge +# HELP process_start_time_seconds Start time of the process since unix epoch. +process_start_time_seconds 1.734088355036E9 +# TYPE http_server_bytes_read summary +# HELP http_server_bytes_read Number of bytes received by the server +http_server_bytes_read_count 0.0 +http_server_bytes_read_sum 0.0 +# TYPE http_server_bytes_read_max gauge +# HELP http_server_bytes_read_max Number of bytes received by the server +http_server_bytes_read_max 0.0 +# TYPE jvm_threads_live_threads gauge +# HELP jvm_threads_live_threads The current number of live threads including both daemon and non-daemon threads +jvm_threads_live_threads 21.0 +# TYPE http_server_requests_seconds summary +# HELP http_server_requests_seconds HTTP server request processing time +http_server_requests_seconds_count{method="GET",outcome="SUCCESS",status="200",uri="/example/prime/{number}"} 1.0 +http_server_requests_seconds_sum{method="GET",outcome="SUCCESS",status="200",uri="/example/prime/{number}"} 0.010070499 +http_server_requests_seconds_count{method="GET",outcome="CLIENT_ERROR",status="404",uri="NOT_FOUND"} 1.0 +http_server_requests_seconds_sum{method="GET",outcome="CLIENT_ERROR",status="404",uri="NOT_FOUND"} 0.028919085 +# TYPE http_server_requests_seconds_max gauge +# HELP http_server_requests_seconds_max HTTP server request processing time +http_server_requests_seconds_max{method="GET",outcome="SUCCESS",status="200",uri="/example/prime/{number}"} 0.010070499 +http_server_requests_seconds_max{method="GET",outcome="CLIENT_ERROR",status="404",uri="NOT_FOUND"} 0.028919085 +# TYPE system_cpu_usage gauge +# HELP system_cpu_usage The \"recent cpu usage\" of the system the application is running in +system_cpu_usage 6.443298969072165E-4 +# TYPE jvm_gc_overhead gauge +# HELP jvm_gc_overhead An approximation of the percent of CPU time used by GC activities over the last lookback period or since monitoring began, whichever is shorter, in the range [0..1] +jvm_gc_overhead 0.0 +# TYPE worker_pool_active gauge +# HELP worker_pool_active The number of resources from the pool currently used +worker_pool_active{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_active{pool_name="vert.x-worker-thread",pool_type="worker"} 1.0 +# TYPE jvm_threads_states_threads gauge +# HELP jvm_threads_states_threads The current number of threads +jvm_threads_states_threads{state="runnable"} 11.0 +jvm_threads_states_threads{state="blocked"} 0.0 +jvm_threads_states_threads{state="waiting"} 7.0 +jvm_threads_states_threads{state="timed-waiting"} 3.0 +jvm_threads_states_threads{state="new"} 0.0 +jvm_threads_states_threads{state="terminated"} 0.0 +# TYPE netty_allocator_memory_pinned gauge +# HELP netty_allocator_memory_pinned +netty_allocator_memory_pinned{allocator_type="PooledByteBufAllocator",id="1612048265",memory_type="heap"} 0.0 +netty_allocator_memory_pinned{allocator_type="PooledByteBufAllocator",id="1612048265",memory_type="direct"} 0.0 +netty_allocator_memory_pinned{allocator_type="PooledByteBufAllocator",id="298568580",memory_type="heap"} 0.0 +netty_allocator_memory_pinned{allocator_type="PooledByteBufAllocator",id="298568580",memory_type="direct"} 0.0 +# TYPE system_cpu_count gauge +# HELP system_cpu_count The number of processors available to the Java virtual machine +system_cpu_count 4.0 +# TYPE jvm_info counter +# HELP jvm_info JVM version info +jvm_info_total{runtime="OpenJDK Runtime Environment",vendor="Eclipse Adoptium",version="21.0.5+11-LTS"} 1.0 +# TYPE jvm_buffer_memory_used_bytes gauge +# HELP jvm_buffer_memory_used_bytes An estimate of the memory that the Java virtual machine is using for this buffer pool +jvm_buffer_memory_used_bytes{id="mapped - 'non-volatile memory'"} 0.0 +jvm_buffer_memory_used_bytes{id="mapped"} 0.0 +jvm_buffer_memory_used_bytes{id="direct"} 265988.0 +# TYPE netty_eventexecutor_tasks_pending gauge +# HELP netty_eventexecutor_tasks_pending +netty_eventexecutor_tasks_pending{name="vert.x-eventloop-thread-2"} 0.0 +netty_eventexecutor_tasks_pending{name="vert.x-eventloop-thread-1"} 0.0 +netty_eventexecutor_tasks_pending{name="vert.x-eventloop-thread-0"} 0.0 +netty_eventexecutor_tasks_pending{name="vert.x-acceptor-thread-0"} 0.0 +netty_eventexecutor_tasks_pending{name="vert.x-eventloop-thread-3"} 0.0 +# TYPE jvm_buffer_total_capacity_bytes gauge +# HELP jvm_buffer_total_capacity_bytes An estimate of the total capacity of the buffers in this pool +jvm_buffer_total_capacity_bytes{id="mapped - 'non-volatile memory'"} 0.0 +jvm_buffer_total_capacity_bytes{id="mapped"} 0.0 +jvm_buffer_total_capacity_bytes{id="direct"} 265987.0 +# TYPE jvm_gc_max_data_size_bytes gauge +# HELP jvm_gc_max_data_size_bytes Max size of long-lived heap memory pool +jvm_gc_max_data_size_bytes 4.192206848E9 +# TYPE jvm_memory_usage_after_gc gauge +# HELP jvm_memory_usage_after_gc The percentage of long-lived heap pool used after the last GC event, in the range [0..1] +jvm_memory_usage_after_gc{area="heap",pool="long-lived"} 0.0 +# TYPE http_server_bytes_written_max gauge +# HELP http_server_bytes_written_max Number of bytes sent by the server +http_server_bytes_written_max 12288.0 +# TYPE http_server_bytes_written summary +# HELP http_server_bytes_written Number of bytes sent by the server +http_server_bytes_written_count 4.0 +http_server_bytes_written_sum 16571.0 +# TYPE worker_pool_idle gauge +# HELP worker_pool_idle The number of resources from the pool currently used +worker_pool_idle{pool_name="vert.x-internal-blocking",pool_type="worker"} 20.0 +worker_pool_idle{pool_name="vert.x-worker-thread",pool_type="worker"} 199.0 +# TYPE worker_pool_ratio gauge +# HELP worker_pool_ratio Pool usage ratio +worker_pool_ratio{pool_name="vert.x-internal-blocking",pool_type="worker"} NaN +worker_pool_ratio{pool_name="vert.x-worker-thread",pool_type="worker"} 0.005 +# TYPE jvm_memory_max_bytes gauge +# HELP jvm_memory_max_bytes The maximum amount of memory in bytes that can be used for memory management +jvm_memory_max_bytes{area="heap",id="G1 Survivor Space"} -1.0 +jvm_memory_max_bytes{area="heap",id="G1 Old Gen"} 4.192206848E9 +jvm_memory_max_bytes{area="nonheap",id="Metaspace"} -1.0 +jvm_memory_max_bytes{area="nonheap",id="CodeCache"} 5.0331648E7 +jvm_memory_max_bytes{area="heap",id="G1 Eden Space"} -1.0 +jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space"} 1.073741824E9 +# TYPE jvm_memory_used_bytes gauge +# HELP jvm_memory_used_bytes The amount of used memory +jvm_memory_used_bytes{area="heap",id="G1 Survivor Space"} 1.1491696E7 +jvm_memory_used_bytes{area="heap",id="G1 Old Gen"} 4.188796E7 +jvm_memory_used_bytes{area="nonheap",id="Metaspace"} 5.0020504E7 +jvm_memory_used_bytes{area="nonheap",id="CodeCache"} 1.2352896E7 +jvm_memory_used_bytes{area="heap",id="G1 Eden Space"} 5.6623104E7 +jvm_memory_used_bytes{area="nonheap",id="Compressed Class Space"} 6877464.0 +# TYPE netty_allocator_pooled_arenas gauge +# HELP netty_allocator_pooled_arenas +netty_allocator_pooled_arenas{allocator_type="PooledByteBufAllocator",id="1612048265",memory_type="heap"} 8.0 +netty_allocator_pooled_arenas{allocator_type="PooledByteBufAllocator",id="1612048265",memory_type="direct"} 8.0 +netty_allocator_pooled_arenas{allocator_type="PooledByteBufAllocator",id="298568580",memory_type="heap"} 8.0 +netty_allocator_pooled_arenas{allocator_type="PooledByteBufAllocator",id="298568580",memory_type="direct"} 8.0 +# TYPE example_list_size gauge +# HELP example_list_size +example_list_size 0.0 +# TYPE process_cpu_time_ns counter +# HELP process_cpu_time_ns The \"cpu time\" used by the Java Virtual Machine process +process_cpu_time_ns_total 6.28E9 +# TYPE jvm_gc_memory_allocated_bytes counter +# HELP jvm_gc_memory_allocated_bytes Incremented for an increase in the size of the (young) heap memory pool after one GC to before the next +jvm_gc_memory_allocated_bytes_total 0.0 +# TYPE process_files_max_files gauge +# HELP process_files_max_files The maximum file descriptor count +process_files_max_files 1048576.0 +# TYPE http_server_active_requests gauge +# HELP http_server_active_requests +http_server_active_requests 1.0 +# TYPE jvm_classes_unloaded_classes counter +# HELP jvm_classes_unloaded_classes The total number of classes unloaded since the Java virtual machine has started execution +jvm_classes_unloaded_classes_total 7.0 +# TYPE netty_allocator_memory_used gauge +# HELP netty_allocator_memory_used +netty_allocator_memory_used{allocator_type="PooledByteBufAllocator",id="1612048265",memory_type="heap"} 0.0 +netty_allocator_memory_used{allocator_type="PooledByteBufAllocator",id="1612048265",memory_type="direct"} 0.0 +netty_allocator_memory_used{allocator_type="UnpooledByteBufAllocator",id="2051878706",memory_type="direct"} 31.0 +netty_allocator_memory_used{allocator_type="UnpooledByteBufAllocator",id="125603901",memory_type="heap"} 0.0 +netty_allocator_memory_used{allocator_type="PooledByteBufAllocator",id="298568580",memory_type="heap"} 0.0 +netty_allocator_memory_used{allocator_type="PooledByteBufAllocator",id="298568580",memory_type="direct"} 196608.0 +netty_allocator_memory_used{allocator_type="UnpooledByteBufAllocator",id="125603901",memory_type="direct"} 0.0 +netty_allocator_memory_used{allocator_type="UnpooledByteBufAllocator",id="2051878706",memory_type="heap"} 128.0 +# TYPE system_load_average_1m gauge +# HELP system_load_average_1m The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time +system_load_average_1m 0.12939453125 +# TYPE worker_pool_usage_seconds summary +# HELP worker_pool_usage_seconds Time spent using resources from the pool +worker_pool_usage_seconds_count{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_usage_seconds_sum{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_usage_seconds_count{pool_name="vert.x-worker-thread",pool_type="worker"} 5.0 +worker_pool_usage_seconds_sum{pool_name="vert.x-worker-thread",pool_type="worker"} 0.020086393 +# TYPE worker_pool_usage_seconds_max gauge +# HELP worker_pool_usage_seconds_max Time spent using resources from the pool +worker_pool_usage_seconds_max{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_usage_seconds_max{pool_name="vert.x-worker-thread",pool_type="worker"} 0.015867397 +# TYPE process_cpu_usage gauge +# HELP process_cpu_usage The \"recent cpu usage\" for the Java Virtual Machine process +process_cpu_usage 5.638340716874748E-4 +# TYPE jvm_classes_loaded_classes gauge +# HELP jvm_classes_loaded_classes The number of classes that are currently loaded in the Java virtual machine +jvm_classes_loaded_classes 11776.0 +# TYPE jvm_gc_live_data_size_bytes gauge +# HELP jvm_gc_live_data_size_bytes Size of long-lived heap memory pool after reclamation +jvm_gc_live_data_size_bytes 0.0 +# TYPE jvm_threads_peak_threads gauge +# HELP jvm_threads_peak_threads The peak live thread count since the Java virtual machine started or peak was reset +jvm_threads_peak_threads 83.0 +# TYPE jvm_threads_started_threads counter +# HELP jvm_threads_started_threads The total number of application threads started in the JVM +jvm_threads_started_threads_total 95.0 +# TYPE jvm_buffer_count_buffers gauge +# HELP jvm_buffer_count_buffers An estimate of the number of buffers in the pool +jvm_buffer_count_buffers{id="mapped - 'non-volatile memory'"} 0.0 +jvm_buffer_count_buffers{id="mapped"} 0.0 +jvm_buffer_count_buffers{id="direct"} 13.0 +# TYPE example_prime_number counter +# HELP example_prime_number +example_prime_number_total{type="even"} 1.0 +# TYPE worker_pool_queue_size gauge +# HELP worker_pool_queue_size Number of pending elements in the waiting queue +worker_pool_queue_size{pool_name="vert.x-internal-blocking",pool_type="worker"} 0.0 +worker_pool_queue_size{pool_name="vert.x-worker-thread",pool_type="worker"} 0.0 +# TYPE netty_allocator_pooled_chunk_size gauge +# HELP netty_allocator_pooled_chunk_size +netty_allocator_pooled_chunk_size{allocator_type="PooledByteBufAllocator",id="1612048265"} 65536.0 +netty_allocator_pooled_chunk_size{allocator_type="PooledByteBufAllocator",id="298568580"} 65536.0 +# TYPE process_files_open_files gauge +# HELP process_files_open_files The open file descriptor count +process_files_open_files 417.0 +# TYPE netty_allocator_pooled_threadlocal_caches gauge +# HELP netty_allocator_pooled_threadlocal_caches +netty_allocator_pooled_threadlocal_caches{allocator_type="PooledByteBufAllocator",id="1612048265"} 0.0 +netty_allocator_pooled_threadlocal_caches{allocator_type="PooledByteBufAllocator",id="298568580"} 2.0 +# EOF diff --git a/quarkus/tests/test_e2e.py b/quarkus/tests/test_e2e.py new file mode 100644 index 0000000000000..9897eef7cee99 --- /dev/null +++ b/quarkus/tests/test_e2e.py @@ -0,0 +1,12 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) +from datadog_checks.base.constants import ServiceCheck +from datadog_checks.dev.utils import assert_service_checks + + +def test_metrics(dd_agent_check, dd_environment): + aggregator = dd_agent_check() + aggregator.assert_metric('quarkus.process.cpu.usage') + aggregator.assert_service_check('quarkus.openmetrics.health', ServiceCheck.OK, count=1) + assert_service_checks(aggregator) diff --git a/quarkus/tests/test_unit.py b/quarkus/tests/test_unit.py new file mode 100644 index 0000000000000..9f96137aa2e8c --- /dev/null +++ b/quarkus/tests/test_unit.py @@ -0,0 +1,89 @@ +# (C) Datadog, Inc. 2024-present +# All rights reserved +# Licensed under a 3-clause BSD style license (see LICENSE) + +from pathlib import Path + +import pytest + +from datadog_checks.dev.utils import get_metadata_metrics +from datadog_checks.quarkus import QuarkusCheck + +EXPECTED_METRICS = [ + 'http_server.requests.seconds.max', + 'http_server.active_requests', + 'http_server.bytes_read.max', + 'http_server.bytes_written.max', + 'http_server.connections.seconds.max', + 'jvm.buffer.count_buffers', + 'jvm.buffer.memory_used.bytes', + 'jvm.buffer.total_capacity.bytes', + 'jvm.classes.loaded_classes', + 'jvm.gc.live_data_size.bytes', + 'jvm.gc.max_data_size.bytes', + 'jvm.gc.overhead', + 'jvm.memory.committed.bytes', + 'jvm.memory.max.bytes', + 'jvm.memory.usage_after_gc', + 'jvm.memory.used.bytes', + 'jvm.threads.daemon_threads', + 'jvm.threads.live_threads', + 'jvm.threads.peak_threads', + 'jvm.threads.states_threads', + 'netty.allocator.memory.pinned', + 'netty.allocator.memory.used', + 'netty.allocator.pooled.arenas', + 'netty.allocator.pooled.cache_size', + 'netty.allocator.pooled.chunk_size', + 'netty.allocator.pooled.threadlocal_caches', + 'netty.eventexecutor.tasks_pending', + 'process.cpu.usage', + 'process.files.max_files', + 'process.files.open_files', + 'process.uptime.seconds', + 'system.cpu.count', + 'system.cpu.usage', + 'system.load_average_1m', + 'worker_pool.active', + 'worker_pool.idle', + 'worker_pool.queue.delay.seconds.max', + 'worker_pool.queue.size', + 'worker_pool.ratio', + 'worker_pool.usage.seconds.max', +] + + +EXPECTED_SUMMARIES = [ + 'http_server.requests.seconds', + 'http_server.bytes_read', + 'http_server.bytes_written', + 'worker_pool.queue.delay.seconds', + 'worker_pool.usage.seconds', +] + + +def test_check(dd_run_check, aggregator, instance, mock_http_response): + # Given + mock_http_response(file_path=Path(__file__).parent.absolute() / "fixtures" / "quarkus_auto_metrics.txt") + check = QuarkusCheck('quarkus', {}, [instance]) + # When + dd_run_check(check) + # Then + for m in EXPECTED_METRICS: + aggregator.assert_metric('quarkus.' + m) + for sm in EXPECTED_SUMMARIES: + aggregator.assert_metric('quarkus.' + sm + '.count') + aggregator.assert_metric('quarkus.' + sm + '.sum') + aggregator.assert_all_metrics_covered() + aggregator.assert_metrics_using_metadata(get_metadata_metrics()) + + +def test_emits_critical_service_check_when_service_is_down(dd_run_check, aggregator, instance, mock_http_response): + # Given + mock_http_response(status_code=404) + check = QuarkusCheck('quarkus', {}, [instance]) + # When + with pytest.raises(Exception, match="requests.exceptions.HTTPError"): + dd_run_check(check) + # Then + aggregator.assert_service_check('quarkus.openmetrics.health', QuarkusCheck.CRITICAL)