diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index a7e3bd2b6..57778f680 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1,9 +1,9 @@ -# Copyright 2022 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. name: Tests concurrency: - group: ${{ github.workflow }}-${{ github.ref }} + group: "${{ github.workflow }}-${{ github.ref }}" cancel-in-progress: true on: @@ -16,7 +16,7 @@ on: jobs: lint: name: Lint - uses: canonical/data-platform-workflows/.github/workflows/lint.yaml@v13.1.1 + uses: canonical/data-platform-workflows/.github/workflows/lint.yaml@v13.2.0 unit-test: name: Unit test charm @@ -61,22 +61,22 @@ jobs: path: - . - ./tests/integration/relations/opensearch_provider/application-charm/ - uses: canonical/data-platform-workflows/.github/workflows/build_charm.yaml@v13.1.1 + uses: canonical/data-platform-workflows/.github/workflows/build_charm.yaml@v13.2.0 with: path-to-charm-directory: ${{ matrix.path }} cache: true integration-test: - name: Integration test charm + name: Integration test charm | 3.4.2 needs: - lint - unit-test - build - uses: canonical/data-platform-workflows/.github/workflows/integration_test_charm.yaml@v13.1.1 + uses: canonical/data-platform-workflows/.github/workflows/integration_test_charm.yaml@v13.2.0 with: artifact-prefix: packed-charm-cache-true cloud: lxd - juju-snap-channel: 3.3/stable + juju-agent-version: 3.4.2 secrets: # GitHub appears to redact each line of a multi-line secret # Avoid putting `{` or `}` on a line by itself so that it doesn't get redacted in logs diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 04f5ae7f7..8c5d5d44b 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. name: Release to latest/edge @@ -32,13 +32,13 @@ jobs: build: name: Build charm - uses: canonical/data-platform-workflows/.github/workflows/build_charm.yaml@v13.1.1 + uses: canonical/data-platform-workflows/.github/workflows/build_charm.yaml@v13.2.0 release: name: Release charm needs: - build - uses: canonical/data-platform-workflows/.github/workflows/release_charm.yaml@v13.1.1 + uses: canonical/data-platform-workflows/.github/workflows/release_charm.yaml@v13.2.0 with: channel: 2/edge artifact-prefix: ${{ needs.build.outputs.artifact-prefix }} diff --git a/.github/workflows/sync_issue_to_jira.yaml b/.github/workflows/sync_issue_to_jira.yaml index 05441ab16..0b02129d9 100644 --- a/.github/workflows/sync_issue_to_jira.yaml +++ b/.github/workflows/sync_issue_to_jira.yaml @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. name: Sync issue to Jira diff --git a/lib/charms/data_platform_libs/v0/data_interfaces.py b/lib/charms/data_platform_libs/v0/data_interfaces.py index 3ce69e155..b331bdce8 100644 --- a/lib/charms/data_platform_libs/v0/data_interfaces.py +++ b/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -331,7 +331,7 @@ def _on_topic_requested(self, event: TopicRequestedEvent): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 34 +LIBPATCH = 36 PYDEPS = ["ops>=2.0.0"] @@ -642,8 +642,8 @@ def _move_to_new_label_if_needed(self): return # Create a new secret with the new label - old_meta = self._secret_meta content = self._secret_meta.get_content() + self._secret_uri = None # I wish we could just check if we are the owners of the secret... try: @@ -651,7 +651,7 @@ def _move_to_new_label_if_needed(self): except ModelError as err: if "this unit is not the leader" not in str(err): raise - old_meta.remove_all_revisions() + self.current_label = None def set_content(self, content: Dict[str, str]) -> None: """Setting cached secret content.""" @@ -1586,7 +1586,7 @@ def _register_secret_to_relation( """ label = self._generate_secret_label(relation_name, relation_id, group) - # Fetchin the Secret's meta information ensuring that it's locally getting registered with + # Fetching the Secret's meta information ensuring that it's locally getting registered with CachedSecret(self._model, self.component, label, secret_id).meta def _register_secrets_to_relation(self, relation: Relation, params_name_list: List[str]): @@ -2309,7 +2309,7 @@ def _secrets(self) -> dict: return self._cached_secrets def _get_secret(self, group) -> Optional[Dict[str, str]]: - """Retrieveing secrets.""" + """Retrieving secrets.""" if not self.app: return if not self._secrets.get(group): @@ -3016,7 +3016,7 @@ class KafkaRequiresEvents(CharmEvents): # Kafka Provides and Requires -class KafkaProvidesData(ProviderData): +class KafkaProviderData(ProviderData): """Provider-side of the Kafka relation.""" def __init__(self, model: Model, relation_name: str) -> None: @@ -3059,12 +3059,12 @@ def set_zookeeper_uris(self, relation_id: int, zookeeper_uris: str) -> None: self.update_relation_data(relation_id, {"zookeeper-uris": zookeeper_uris}) -class KafkaProvidesEventHandlers(EventHandlers): +class KafkaProviderEventHandlers(EventHandlers): """Provider-side of the Kafka relation.""" on = KafkaProvidesEvents() # pyright: ignore [reportAssignmentType] - def __init__(self, charm: CharmBase, relation_data: KafkaProvidesData) -> None: + def __init__(self, charm: CharmBase, relation_data: KafkaProviderData) -> None: super().__init__(charm, relation_data) # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above self.relation_data = relation_data @@ -3086,15 +3086,15 @@ def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: ) -class KafkaProvides(KafkaProvidesData, KafkaProvidesEventHandlers): +class KafkaProvides(KafkaProviderData, KafkaProviderEventHandlers): """Provider-side of the Kafka relation.""" def __init__(self, charm: CharmBase, relation_name: str) -> None: - KafkaProvidesData.__init__(self, charm.model, relation_name) - KafkaProvidesEventHandlers.__init__(self, charm, self) + KafkaProviderData.__init__(self, charm.model, relation_name) + KafkaProviderEventHandlers.__init__(self, charm, self) -class KafkaRequiresData(RequirerData): +class KafkaRequirerData(RequirerData): """Requirer-side of the Kafka relation.""" def __init__( @@ -3124,12 +3124,12 @@ def topic(self, value): self._topic = value -class KafkaRequiresEventHandlers(RequirerEventHandlers): +class KafkaRequirerEventHandlers(RequirerEventHandlers): """Requires-side of the Kafka relation.""" on = KafkaRequiresEvents() # pyright: ignore [reportAssignmentType] - def __init__(self, charm: CharmBase, relation_data: KafkaRequiresData) -> None: + def __init__(self, charm: CharmBase, relation_data: KafkaRequirerData) -> None: super().__init__(charm, relation_data) # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above self.relation_data = relation_data @@ -3142,10 +3142,13 @@ def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: return # Sets topic, extra user roles, and "consumer-group-prefix" in the relation - relation_data = { - f: getattr(self, f.replace("-", "_"), "") - for f in ["consumer-group-prefix", "extra-user-roles", "topic"] - } + relation_data = {"topic": self.relation_data.topic} + + if self.relation_data.extra_user_roles: + relation_data["extra-user-roles"] = self.relation_data.extra_user_roles + + if self.relation_data.consumer_group_prefix: + relation_data["consumer-group-prefix"] = self.relation_data.consumer_group_prefix self.relation_data.update_relation_data(event.relation.id, relation_data) @@ -3188,7 +3191,7 @@ def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: return -class KafkaRequires(KafkaRequiresData, KafkaRequiresEventHandlers): +class KafkaRequires(KafkaRequirerData, KafkaRequirerEventHandlers): """Provider-side of the Kafka relation.""" def __init__( @@ -3200,7 +3203,7 @@ def __init__( consumer_group_prefix: Optional[str] = None, additional_secret_fields: Optional[List[str]] = [], ) -> None: - KafkaRequiresData.__init__( + KafkaRequirerData.__init__( self, charm.model, relation_name, @@ -3209,7 +3212,7 @@ def __init__( consumer_group_prefix, additional_secret_fields, ) - KafkaRequiresEventHandlers.__init__(self, charm, self) + KafkaRequirerEventHandlers.__init__(self, charm, self) # Opensearch related events diff --git a/lib/charms/data_platform_libs/v0/s3.py b/lib/charms/data_platform_libs/v0/s3.py index 7beb113b6..f5614aaf6 100644 --- a/lib/charms/data_platform_libs/v0/s3.py +++ b/lib/charms/data_platform_libs/v0/s3.py @@ -137,7 +137,7 @@ def _on_credential_gone(self, event: CredentialsGoneEvent): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 4 +LIBPATCH = 5 logger = logging.getLogger(__name__) @@ -212,7 +212,7 @@ class S3CredentialEvents(CharmEvents): class S3Provider(Object): """A provider handler for communicating S3 credentials to consumers.""" - on = S3CredentialEvents() # pyright: ignore [reportGeneralTypeIssues] + on = S3CredentialEvents() # pyright: ignore [reportAssignmentType] def __init__( self, @@ -481,6 +481,18 @@ def set_s3_api_version(self, relation_id: int, s3_api_version: str) -> None: """ self.update_connection_info(relation_id, {"s3-api-version": s3_api_version}) + def set_delete_older_than_days(self, relation_id: int, days: int) -> None: + """Sets the retention days for full backups in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + days: the value. + """ + self.update_connection_info(relation_id, {"delete-older-than-days": str(days)}) + def set_attributes(self, relation_id: int, attributes: List[str]) -> None: """Sets the connection attributes in application databag. @@ -580,6 +592,17 @@ def s3_api_version(self) -> Optional[str]: return self.relation.data[self.relation.app].get("s3-api-version") + @property + def delete_older_than_days(self) -> Optional[int]: + """Returns the retention days for full backups.""" + if not self.relation.app: + return None + + days = self.relation.data[self.relation.app].get("delete-older-than-days") + if days is None: + return None + return int(days) + @property def attributes(self) -> Optional[List[str]]: """Returns the attributes.""" @@ -613,7 +636,7 @@ class S3CredentialRequiresEvents(ObjectEvents): class S3Requirer(Object): """Requires-side of the s3 relation.""" - on = S3CredentialRequiresEvents() # pyright: ignore[reportGeneralTypeIssues] + on = S3CredentialRequiresEvents() # pyright: ignore[reportAssignmentType] def __init__( self, charm: ops.charm.CharmBase, relation_name: str, bucket_name: Optional[str] = None diff --git a/lib/charms/opensearch/v0/constants_charm.py b/lib/charms/opensearch/v0/constants_charm.py index 51c3fd542..abda63ef3 100644 --- a/lib/charms/opensearch/v0/constants_charm.py +++ b/lib/charms/opensearch/v0/constants_charm.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """In this file we declare the constants and enums used by the charm.""" @@ -42,6 +42,9 @@ ClusterHealthYellow = ( "1 or more 'replica' shards are not assigned, please scale your application up." ) +ClusterHealthRedUpgrade = ( + "1 or more 'primary' shards are not assigned in the cluster. Fix unhealthy units" +) IndexCreationFailed = "failed to create {index} index - deferring index-requested event..." UserCreationFailed = "failed to create users for {rel_name} relation {id}" PluginConfigChangeError = "Failed to apply config changes on the plugin." @@ -61,6 +64,10 @@ ) PluginConfigError = "Unexpected error during plugin configuration, check the logs" BackupSetupFailed = "Backup setup failed, check logs for details" +S3RelMissing = "Backup failover cluster missing S3 relation." +S3RelShouldNotExist = "This unit should not be related to S3" +S3RelDataIncomplete = "S3 relation data missing or incomplete." +S3RelUneligible = "Only orchestrator clusters should relate to S3." # Wait status RequestUnitServiceOps = "Requesting lock on operation: {}" @@ -101,7 +108,7 @@ KibanaserverRole = "kibana_server" # Opensearch Snap revision -OPENSEARCH_SNAP_REVISION = 40 # Keep in sync with `workload_version` file +OPENSEARCH_SNAP_REVISION = 50 # Keep in sync with `workload_version` file # User-face Backup ID format OPENSEARCH_BACKUP_ID_FORMAT = "%Y-%m-%dT%H:%M:%SZ" diff --git a/lib/charms/opensearch/v0/constants_secrets.py b/lib/charms/opensearch/v0/constants_secrets.py index b4ce902ff..bfda01597 100644 --- a/lib/charms/opensearch/v0/constants_secrets.py +++ b/lib/charms/opensearch/v0/constants_secrets.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """In this file we declare the constants and enums used by Juju secrets in Opensearch.""" diff --git a/lib/charms/opensearch/v0/constants_tls.py b/lib/charms/opensearch/v0/constants_tls.py index e3cf8e14e..583114311 100644 --- a/lib/charms/opensearch/v0/constants_tls.py +++ b/lib/charms/opensearch/v0/constants_tls.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """In this file we declare the constants and enums used by TLS related components.""" diff --git a/lib/charms/opensearch/v0/helper_charm.py b/lib/charms/opensearch/v0/helper_charm.py index fb288e623..db887613f 100644 --- a/lib/charms/opensearch/v0/helper_charm.py +++ b/lib/charms/opensearch/v0/helper_charm.py @@ -1,15 +1,18 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Utility functions for charms related operations.""" import re -import typing +from time import time_ns +from typing import TYPE_CHECKING +from charms.data_platform_libs.v0.data_interfaces import Scope +from charms.opensearch.v0.constants_charm import PeerRelationName from charms.opensearch.v0.helper_enums import BaseStrEnum from ops import CharmBase from ops.model import ActiveStatus, StatusBase -if typing.TYPE_CHECKING: +if TYPE_CHECKING: from charms.opensearch.v0.opensearch_base_charm import OpenSearchBaseCharm # The unique Charmhub library identifier, never change it @@ -115,3 +118,22 @@ def relation_departure_reason(charm: CharmBase, relation_name: str) -> RelDepart return RelDepartureReason.SCALE_DOWN return RelDepartureReason.REL_BROKEN + + +def trigger_peer_rel_changed( + charm: "OpenSearchBaseCharm", + only_by_leader: bool = False, + on_other_units: bool = True, + on_current_unit: bool = False, +) -> None: + """Force trigger a peer rel changed event.""" + if only_by_leader and not charm.unit.is_leader(): + return + + if on_other_units or not on_current_unit: + charm.peers_data.put(Scope.APP if only_by_leader else Scope.UNIT, "update-ts", time_ns()) + + if on_current_unit: + charm.on[PeerRelationName].relation_changed.emit( + charm.model.get_relation(PeerRelationName) + ) diff --git a/lib/charms/opensearch/v0/helper_cluster.py b/lib/charms/opensearch/v0/helper_cluster.py index b7195f883..837455ad4 100644 --- a/lib/charms/opensearch/v0/helper_cluster.py +++ b/lib/charms/opensearch/v0/helper_cluster.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Utility classes and methods for getting cluster info, configuration info and suggestions.""" @@ -35,18 +35,8 @@ class ClusterTopology: """Class for creating the best possible configuration for a Node.""" @staticmethod - def suggest_roles(nodes: List[Node], planned_units: int) -> List[str]: - """Get roles for a Node. - - This method should be read in the context of a "rolling" start - - only 1 unit at a time will call this. - - For now, we don't allow to end-user control roles. - The logic here is, if number of planned units is: - — odd: "all" the nodes are cm_eligible nodes. - — even: "all - 1" are cm_eligible and 1 data node. - """ - # TODO: remove in https://github.com/canonical/opensearch-operator/issues/230 + def generated_roles() -> List[str]: + """Get generated roles for a Node.""" return ["data", "ingest", "ml", "coordinating_only", "cluster_manager"] @staticmethod @@ -68,7 +58,6 @@ def get_cluster_settings( @staticmethod def recompute_nodes_conf(app_name: str, nodes: List[Node]) -> Dict[str, Node]: - # TODO: remove in https://github.com/canonical/opensearch-operator/issues/230 """Recompute the configuration of all the nodes (cluster set to auto-generate roles).""" if not nodes: return {} @@ -85,7 +74,7 @@ def recompute_nodes_conf(app_name: str, nodes: List[Node]) -> Dict[str, Node]: nodes_by_name[node.name] = Node( name=node.name, # we do this in order to remove any non-default role / add any missing default role - roles=["data", "ingest", "ml", "coordinating_only", "cluster_manager"], + roles=ClusterTopology.generated_roles(), ip=node.ip, app_name=node.app_name, unit_number=node.unit_number, @@ -178,7 +167,6 @@ def nodes( temperature=obj.get("attributes", {}).get("temp"), ) nodes.append(node) - return nodes diff --git a/lib/charms/opensearch/v0/helper_conf_setter.py b/lib/charms/opensearch/v0/helper_conf_setter.py index 357c78745..099f96811 100755 --- a/lib/charms/opensearch/v0/helper_conf_setter.py +++ b/lib/charms/opensearch/v0/helper_conf_setter.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Utilities for editing yaml config files at any depth level and maintaining comments.""" diff --git a/lib/charms/opensearch/v0/helper_enums.py b/lib/charms/opensearch/v0/helper_enums.py index 6d8689651..105b9faeb 100644 --- a/lib/charms/opensearch/v0/helper_enums.py +++ b/lib/charms/opensearch/v0/helper_enums.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """In this file we declare the base enum types with string and other types' representations.""" diff --git a/lib/charms/opensearch/v0/helper_http.py b/lib/charms/opensearch/v0/helper_http.py index c10db9cbc..57a09bfcf 100644 --- a/lib/charms/opensearch/v0/helper_http.py +++ b/lib/charms/opensearch/v0/helper_http.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """File containing http related helpers.""" diff --git a/lib/charms/opensearch/v0/helper_networking.py b/lib/charms/opensearch/v0/helper_networking.py index 2aab1a423..5b1ed3ae4 100644 --- a/lib/charms/opensearch/v0/helper_networking.py +++ b/lib/charms/opensearch/v0/helper_networking.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Helpers for networking related operations.""" diff --git a/lib/charms/opensearch/v0/helper_security.py b/lib/charms/opensearch/v0/helper_security.py index b88253403..ad2af1920 100644 --- a/lib/charms/opensearch/v0/helper_security.py +++ b/lib/charms/opensearch/v0/helper_security.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Helpers for security related operations, such as password generation etc.""" diff --git a/lib/charms/opensearch/v0/models.py b/lib/charms/opensearch/v0/models.py index 487d92ce7..620f156ab 100644 --- a/lib/charms/opensearch/v0/models.py +++ b/lib/charms/opensearch/v0/models.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Cluster-related data structures / model classes.""" @@ -196,19 +196,30 @@ class DeploymentDescription(Model): @root_validator def set_promotion_time(cls, values): # noqa: N805 """Set promotion time of a failover to a main CM.""" - if values["typ"] == DeploymentType.MAIN_ORCHESTRATOR: + if not values["promotion_time"] and values["typ"] == DeploymentType.MAIN_ORCHESTRATOR: values["promotion_time"] = datetime.now().timestamp() return values +class S3RelDataCredentials(Model): + """Model class for credentials passed on the PCluster relation.""" + + access_key: str + secret_key: str + + class PeerClusterRelDataCredentials(Model): """Model class for credentials passed on the PCluster relation.""" admin_username: str admin_password: str admin_password_hash: str + kibana_password: str + kibana_password_hash: str + monitor_password: str admin_tls: Dict[str, Optional[str]] + s3: Optional[S3RelDataCredentials] class PeerClusterRelData(Model): diff --git a/lib/charms/opensearch/v0/opensearch_backups.py b/lib/charms/opensearch/v0/opensearch_backups.py index c9e535bdd..1376bdafc 100644 --- a/lib/charms/opensearch/v0/opensearch_backups.py +++ b/lib/charms/opensearch/v0/opensearch_backups.py @@ -1,10 +1,16 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """OpenSearch Backup. -This file holds the implementation of the OpenSearchBackup class, as well as the state enum -and configuration. +This library holds the implementation of the OpenSearchBackup class, as well as the state enum +and configuration. It contains all the components for both small and large deployments. + +########################################################################################### +# +# Small deployments +# +########################################################################################### The OpenSearchBackup class listens to both relation changes from S3_RELATION and API calls and responses. The OpenSearchBackupPlugin holds the configuration info. The classes together @@ -41,12 +47,32 @@ class OpenSearchBaseCharm(CharmBase): def __init__(...): ... - self.backup = OpenSearchBackup(self) + self.backup = OpenSearchBackupFactory(self) + +########################################################################################### +# +# Large deployments +# +########################################################################################### + +For developers, there is no meaningful difference between small and large deployments. +They both use the same backup_factory() to return the correct object for their case. + +The large deployments expands the original concept of OpenSearchBackup to include other +juju applications that are not cluster_manager. This means a cluster may be a data-only or +even a failover cluster-manager and still interacts with s3-integrator at a certain level. + +The baseline is that every unit in the cluster must import the S3 credentials. The main +orchestrator will share these credentials via the peer-cluster relation. Failover and data +clusters will import that information from the peer-cluster relation. + +To implement the points above without causing too much disruption to the existing code, +a factory pattern has been adopted, where the main charm receives a OpenSearchBackupBase +object that corresponds to its own case (cluster-manager, failover, data, etc). """ import json import logging -import typing from datetime import datetime from typing import Any, Dict, List, Optional, Set, Tuple @@ -58,26 +84,31 @@ def __init__(...): BackupInDisabling, BackupSetupFailed, BackupSetupStart, + PeerClusterRelationName, PluginConfigError, RestoreInProgress, + S3RelMissing, + S3RelShouldNotExist, ) from charms.opensearch.v0.helper_cluster import ClusterState, IndexStateEnum from charms.opensearch.v0.helper_enums import BaseStrEnum +from charms.opensearch.v0.models import DeploymentType, PeerClusterRelData from charms.opensearch.v0.opensearch_exceptions import ( OpenSearchError, OpenSearchHttpError, OpenSearchNotFullyReadyError, ) from charms.opensearch.v0.opensearch_locking import OpenSearchNodeLock -from charms.opensearch.v0.opensearch_plugins import OpenSearchBackupPlugin, PluginState -from ops.charm import ActionEvent +from charms.opensearch.v0.opensearch_plugins import ( + OpenSearchBackupPlugin, + OpenSearchPluginConfig, + PluginState, +) +from ops.charm import ActionEvent, CharmBase from ops.framework import EventBase, Object from ops.model import BlockedStatus, MaintenanceStatus, WaitingStatus from tenacity import RetryError, Retrying, stop_after_attempt, wait_fixed -if typing.TYPE_CHECKING: - from charms.opensearch.v0.opensearch_base_charm import OpenSearchBaseCharm - # The unique Charmhub library identifier, never change it LIBID = "d301deee4d2c4c1b8e30cd3df8034be2" @@ -94,6 +125,7 @@ def __init__(...): # OpenSearch Backups S3_RELATION = "s3-credentials" S3_REPOSITORY = "s3-repository" +PEER_CLUSTER_S3_CONFIG_KEY = "s3_credentials" S3_REPO_BASE_PATH = "/" @@ -153,15 +185,305 @@ class BackupServiceState(BaseStrEnum): SNAPSHOT_FAILED_UNKNOWN = "snapshot failed for unknown reason" -class OpenSearchBackup(Object): +class OpenSearchBackupBase(Object): + """Works as parent for all backup classes. + + This class does a smooth transition between orchestrator and non-orchestrator clusters. + """ + + def __init__(self, charm: Object, relation_name: str = PeerClusterRelationName): + """Initializes the opensearch backup base. + + This class will not hold a s3_client object, as it is not intended to really + manage the relation besides waiting for the deployment description. + """ + super().__init__(charm, relation_name) + self.charm = charm + + for event in [ + self.charm.on[S3_RELATION].relation_created, + self.charm.on[S3_RELATION].relation_joined, + self.charm.on[S3_RELATION].relation_changed, + self.charm.on[S3_RELATION].relation_departed, + self.charm.on[S3_RELATION].relation_broken, + ]: + self.framework.observe(event, self._on_s3_relation_event) + for event in [ + self.charm.on.create_backup_action, + self.charm.on.list_backups_action, + self.charm.on.restore_action, + ]: + self.framework.observe(event, self._on_s3_relation_action) + + def _on_s3_relation_event(self, event: EventBase) -> None: + """Defers the s3 relation events.""" + logger.info("Deployment description not yet available, deferring s3 relation event") + event.defer() + + def _on_s3_relation_action(self, event: EventBase) -> None: + """No deployment description yet, fail any actions.""" + logger.info("Deployment description not yet available, failing actions.") + event.fail("Failed: deployment description not yet available") + + def _request(self, *args, **kwargs) -> dict[str, Any] | None: + """Returns the output of OpenSearchDistribution.request() or throws an error. + + Request method can return one of many: Union[Dict[str, any], List[any], int] + and raise multiple types of errors. + + If int is returned, then throws an exception informing the HTTP request failed. + If the request fails, returns the error text or None if only status code is found. + + Raises: + - ValueError + """ + if "retries" not in kwargs.keys(): + kwargs["retries"] = 6 + if "timeout" not in kwargs.keys(): + kwargs["timeout"] = 10 + # We are interested to see the entire response + kwargs["resp_status_code"] = False + try: + result = self.charm.opensearch.request(*args, **kwargs) + except OpenSearchHttpError as e: + return e.response_body if e.response_body else None + return result if isinstance(result, dict) else None + + def _is_restore_in_progress(self) -> bool: + """Checks if the restore is currently in progress. + + Two options: + 1) no restore requested: return False + 2) check for each index shard: for all type=SNAPSHOT and stage=DONE, return False. + """ + indices_status = self._request("GET", "/_recovery?human") or {} + for info in indices_status.values(): + # Now, check the status of each shard + for shard in info["shards"]: + if shard["type"] == "SNAPSHOT" and shard["stage"] != "DONE": + return True + return False + + def is_backup_in_progress(self) -> bool: + """Returns True if backup is in progress, False otherwise. + + We filter the _query_backup_status() and seek for the following states: + - SNAPSHOT_IN_PROGRESS + """ + if self._query_backup_status() in [ + BackupServiceState.SNAPSHOT_IN_PROGRESS, + BackupServiceState.RESPONSE_FAILED_NETWORK, + ]: + # We have a backup in progress or we cannot reach the API + # taking the "safe path" of informing a backup is in progress + return True + return False + + def _query_backup_status(self, backup_id: Optional[str] = None) -> BackupServiceState: + try: + for attempt in Retrying(stop=stop_after_attempt(5), wait=wait_fixed(5)): + with attempt: + target = f"_snapshot/{S3_REPOSITORY}/" + target += f"{backup_id.lower()}" if backup_id else "_all" + output = self._request("GET", target) + logger.debug(f"Backup status: {output}") + except RetryError as e: + logger.error(f"_request failed with: {e}") + return BackupServiceState.RESPONSE_FAILED_NETWORK + return self.get_service_status(output) + + def get_service_status( # noqa: C901 + self, response: dict[str, Any] | None + ) -> BackupServiceState: + """Returns the response status in a Enum. + + Based on: + https://github.com/opensearch-project/OpenSearch/blob/ + ba78d93acf1da6dae16952d8978de87cb4df2c61/ + server/src/main/java/org/opensearch/OpenSearchServerException.java#L837 + https://github.com/opensearch-project/OpenSearch/blob/ + ba78d93acf1da6dae16952d8978de87cb4df2c61/ + plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml + """ + if not response: + return BackupServiceState.SNAPSHOT_FAILED_UNKNOWN + + try: + if "error" not in response: + return BackupServiceState.SUCCESS + type = response["error"]["root_cause"][0]["type"] + reason = response["error"]["root_cause"][0]["reason"] + except KeyError as e: + logger.exception(e) + logger.error("response contained unknown error code") + return BackupServiceState.RESPONSE_FAILED_NETWORK + # Check if we error'ed b/c s3 repo is not configured, hence we are still + # waiting for the plugin to be configured + if type == "repository_exception" and REPO_NOT_CREATED_ERR in reason: + return BackupServiceState.REPO_NOT_CREATED + if type == "repository_exception" and REPO_CREATING_ERR in reason: + return BackupServiceState.REPO_CREATION_ERR + if type == "repository_exception": + return BackupServiceState.REPO_ERR_UNKNOWN + if type == "repository_missing_exception": + return BackupServiceState.REPO_MISSING + if type == "repository_verification_exception" and REPO_NOT_ACCESS_ERR in reason: + return BackupServiceState.REPO_S3_UNREACHABLE + if type == "illegal_argument_exception": + return BackupServiceState.ILLEGAL_ARGUMENT + if type == "snapshot_missing_exception": + return BackupServiceState.SNAPSHOT_MISSING + if type == "snapshot_restore_exception" and RESTORE_OPEN_INDEX_WITH_SAME_NAME in reason: + return BackupServiceState.SNAPSHOT_RESTORE_ERROR_INDEX_NOT_CLOSED + if type == "snapshot_restore_exception": + return BackupServiceState.SNAPSHOT_RESTORE_ERROR + return self.get_snapshot_status(response) + + def get_snapshot_status(self, response: Dict[str, Any] | None) -> BackupServiceState: + """Returns the snapshot status.""" + if not response: + return BackupServiceState.SNAPSHOT_FAILED_UNKNOWN + # Now, check snapshot status: + r_str = str(response) + if "IN_PROGRESS" in r_str: + return BackupServiceState.SNAPSHOT_IN_PROGRESS + if "PARTIAL" in r_str: + return BackupServiceState.SNAPSHOT_PARTIALLY_TAKEN + if "INCOMPATIBLE" in r_str: + return BackupServiceState.SNAPSHOT_INCOMPATIBILITY + if "FAILED" in r_str: + return BackupServiceState.SNAPSHOT_FAILED_UNKNOWN + return BackupServiceState.SUCCESS + + def is_idle_or_not_set(self) -> bool: + """Checks if the backup system is idle or not yet configured. + + "idle": configured but there are no backups nor restores in progress. + "not_set": set by the children classes + """ + return not (self.is_backup_in_progress() or self._is_restore_in_progress()) + + +class OpenSearchNonOrchestratorClusterBackup(OpenSearchBackupBase): + """Simpler implementation of backup relation for non-orchestrator clusters. + + In a nutshell, non-orchstrator clusters should receive the backup information via + peer-cluster relation instead; and must fail any action or major s3-relation events. + """ + + def __init__(self, charm: Object, relation_name: str = PeerClusterRelationName): + """Manager of OpenSearch backup relations.""" + super().__init__(charm, relation_name) + self.framework.observe( + self.charm.on[PeerClusterRelationName].relation_changed, + self._on_peer_relation_changed, + ) + self.framework.observe( + self.charm.on[S3_RELATION].relation_broken, self._on_s3_relation_broken + ) + + def _on_peer_relation_changed(self, event) -> None: + """Processes the non-orchestrator cluster events.""" + if not self.charm.plugin_manager.check_plugin_manager_ready(): + logger.warning("s3-changed: cluster not ready yet") + event.defer() + return + + if not (data := event.relation.data.get(event.app)) or not data.get("data"): + return + data = PeerClusterRelData.from_str(data["data"]) + s3_credentials = data.credentials.s3 + if not s3_credentials or not s3_credentials.access_key or not s3_credentials.secret_key: + # Just abandon this event, as the relation is not fully ready yet + return + + # https://github.com/canonical/opensearch-operator/issues/252 + # We need the repository-s3 to support two main relations: s3 OR peer-cluster + # Meanwhile, create the plugin manually and apply it + try: + plugin = OpenSearchPluginConfig( + secret_entries_to_del=[ + "s3.client.default.access_key", + "s3.client.default.secret_key", + ], + ) + self.charm.plugin_manager.apply_config(plugin) + except OpenSearchError as e: + logger.warning( + f"s3-changed: failed disabling with {str(e)}\n" + "repository-s3 maybe it was not enabled yet" + ) + # It must be able to enable the plugin + try: + plugin = OpenSearchPluginConfig( + secret_entries_to_add={ + "s3.client.default.access_key": s3_credentials.access_key, + "s3.client.default.secret_key": s3_credentials.secret_key, + }, + ) + self.charm.plugin_manager.apply_config(plugin) + except OpenSearchError as e: + self.charm.status.set(BlockedStatus(S3RelMissing)) + # There was an unexpected error, log it and block the unit + logger.error(e) + event.defer() + return + self.charm.status.clear(S3RelMissing) + + def _on_s3_relation_event(self, event: EventBase) -> None: + """Processes the non-orchestrator cluster events.""" + if self.charm.unit.is_leader(): + self.charm.status.set(BlockedStatus(S3RelShouldNotExist), app=True) + logger.info("Non-orchestrator cluster, abandon s3 relation event") + return + + def _on_s3_relation_broken(self, event: EventBase) -> None: + """Processes the non-orchestrator cluster events.""" + self.charm.status.clear(S3RelMissing) + if self.charm.unit.is_leader(): + self.charm.status.clear(S3RelShouldNotExist, app=True) + logger.info("Non-orchestrator cluster, abandon s3 relation event") + return + + def _on_s3_relation_action(self, event: EventBase) -> None: + """Deployment description available, non-orchestrator, fail any actions.""" + event.fail("Failed: execute the action on the orchestrator cluster instead.") + + def _is_restore_in_progress(self) -> bool: + """Checks if the restore is currently in progress. + + Two options: + 1) no restore requested: return False + 2) check for each index shard: for all type=SNAPSHOT and stage=DONE, return False. + """ + try: + indices_status = self.charm.opensearch.request("GET", "/_recovery?human") or {} + except OpenSearchHttpError: + # Defaults to True if we have a failure, to avoid any actions due to + # intermittent connection issues. + logger.warning( + "_is_restore_in_progress: failed to get indices status" + " - assuming restore is in progress" + ) + return True + + for info in indices_status.values(): + # Now, check the status of each shard + for shard in info["shards"]: + if shard["type"] == "SNAPSHOT" and shard["stage"] != "DONE": + return True + return False + + +class OpenSearchBackup(OpenSearchBackupBase): """Implements backup relation and API management.""" - def __init__(self, charm: "OpenSearchBaseCharm"): + def __init__(self, charm: Object, relation_name: str = S3_RELATION): """Manager of OpenSearch backup relations.""" - super().__init__(charm, S3_RELATION) - self.charm = charm + super().__init__(charm, relation_name) + self.s3_client = S3Requirer(self.charm, relation_name) + # s3 relation handles the config options for s3 backups - self.s3_client = S3Requirer(self.charm, S3_RELATION) self.framework.observe(self.charm.on[S3_RELATION].relation_created, self._on_s3_created) self.framework.observe(self.charm.on[S3_RELATION].relation_broken, self._on_s3_broken) self.framework.observe( @@ -171,6 +493,17 @@ def __init__(self, charm: "OpenSearchBaseCharm"): self.framework.observe(self.charm.on.list_backups_action, self._on_list_backups_action) self.framework.observe(self.charm.on.restore_action, self._on_restore_backup_action) + def _on_s3_relation_event(self, event: EventBase) -> None: + """Overrides the parent method to process the s3 relation events, as we use s3_client. + + We run the peer cluster orchestrator's refresh on every new s3 information. + """ + self.charm.peer_cluster_provider.refresh_relation_data(event) + + def _on_s3_relation_action(self, event: EventBase) -> None: + """Just overloads the base method, as we process each action in this class.""" + pass + @property def _plugin_status(self): return self.charm.plugin_manager.get_plugin_status(OpenSearchBackupPlugin) @@ -305,6 +638,21 @@ def _restore(self, backup_id: int) -> Dict[str, Any]: return output["snapshot"] + def is_idle_or_not_set(self) -> bool: + """Checks if the backup system is idle or not yet configured. + + "idle": configured but there are no backups nor restores in progress. + "not_set": the `get_service_status` returns REPO_NOT_CREATED or REPO_MISSING. + + Raises: + OpenSearchHttpError: cluster is unreachable + """ + output = self._request("GET", f"_snapshot/{S3_REPOSITORY}") + return self.get_service_status(output) in [ + BackupServiceState.REPO_NOT_CREATED, + BackupServiceState.REPO_MISSING, + ] or not (self.is_backup_in_progress() or self._is_restore_in_progress()) + def _is_restore_complete(self) -> bool: """Checks if the restore is finished. @@ -312,13 +660,9 @@ def _is_restore_complete(self) -> bool: """ indices_status = self._request("GET", "/_recovery?human") if not indices_status: + # No restore has happened. Raise an exception raise OpenSearchRestoreCheckError("_is_restore_complete: failed to get indices status") - for info in indices_status.values(): - # Now, check the status of each shard - for shard in info["shards"]: - if shard["type"] == "SNAPSHOT" and shard["stage"] != "DONE": - return False - return True + return not self._is_restore_in_progress() def _is_backup_available_for_restore(self, backup_id: int) -> bool: """Checks if the backup_id exists and is ready for a restore.""" @@ -469,34 +813,6 @@ def _list_backups(self) -> Dict[int, str]: for snapshot in response.get("snapshots", []) } - def is_backup_in_progress(self) -> bool: - """Returns True if backup is in progress, False otherwise. - - We filter the _query_backup_status() and seek for the following states: - - SNAPSHOT_IN_PROGRESS - """ - if self._query_backup_status() in [ - BackupServiceState.SNAPSHOT_IN_PROGRESS, - BackupServiceState.RESPONSE_FAILED_NETWORK, - ]: - # We have a backup in progress or we cannot reach the API - # taking the "safe path" of informing a backup is in progress - return True - return False - - def _query_backup_status(self, backup_id: Optional[str] = None) -> BackupServiceState: - try: - for attempt in Retrying(stop=stop_after_attempt(5), wait=wait_fixed(5)): - with attempt: - target = f"_snapshot/{S3_REPOSITORY}/" - target += f"{backup_id.lower()}" if backup_id else "_all" - output = self._request("GET", target) - logger.debug(f"Backup status: {output}") - except RetryError as e: - logger.error(f"_request failed with: {e}") - return BackupServiceState.RESPONSE_FAILED_NETWORK - return self.get_service_status(output) - def _on_s3_credentials_changed(self, event: EventBase) -> None: # noqa: C901 """Calls the plugin manager config handler. @@ -570,10 +886,12 @@ def apply_api_config_if_needed(self) -> None: # (3) based on the response, set the message status if state != BackupServiceState.SUCCESS: logger.error(f"Failed to setup backup service with state {state}") - self.charm.status.set(BlockedStatus(BackupSetupFailed), app=True) + if self.charm.unit.is_leader(): + self.charm.status.set(BlockedStatus(BackupSetupFailed), app=True) self.charm.status.clear(BackupConfigureStart) return - self.charm.status.clear(BackupSetupFailed, app=True) + if self.charm.unit.is_leader(): + self.charm.status.clear(BackupSetupFailed, app=True) self.charm.status.clear(BackupConfigureStart) def _on_s3_created(self, _): @@ -727,75 +1045,12 @@ def can_use_s3_repository(self) -> bool: return False return True - def _request(self, *args, **kwargs) -> dict[str, Any] | None: - """Returns the output of OpenSearchDistribution.request() or throws an error. - - Request method can return one of many: Union[Dict[str, any], List[any], int] - and raise multiple types of errors. - - If int is returned, then throws an exception informing the HTTP request failed. - If the request fails, returns the error text or None if only status code is found. - - Raises: - - ValueError - """ - if "retries" not in kwargs.keys(): - kwargs["retries"] = 6 - if "timeout" not in kwargs.keys(): - kwargs["timeout"] = 10 - # We are interested to see the entire response - kwargs["resp_status_code"] = False - try: - result = self.charm.opensearch.request(*args, **kwargs) - except OpenSearchHttpError as e: - return e.response_body if e.response_body else None - return result if isinstance(result, dict) else None - def get_service_status( # noqa: C901 self, response: dict[str, Any] | None ) -> BackupServiceState: - """Returns the response status in a Enum. - - Based on: - https://github.com/opensearch-project/OpenSearch/blob/ - ba78d93acf1da6dae16952d8978de87cb4df2c61/ - server/src/main/java/org/opensearch/OpenSearchServerException.java#L837 - https://github.com/opensearch-project/OpenSearch/blob/ - ba78d93acf1da6dae16952d8978de87cb4df2c61/ - plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml - """ - if not response: - return BackupServiceState.SNAPSHOT_FAILED_UNKNOWN - - try: - if "error" not in response: - return BackupServiceState.SUCCESS - type = response["error"]["root_cause"][0]["type"] - reason = response["error"]["root_cause"][0]["reason"] - except KeyError as e: - logger.exception(e) - logger.error("response contained unknown error code") - return BackupServiceState.RESPONSE_FAILED_NETWORK - # Check if we error'ed b/c s3 repo is not configured, hence we are still - # waiting for the plugin to be configured - if type == "repository_exception" and REPO_NOT_CREATED_ERR in reason: - return BackupServiceState.REPO_NOT_CREATED - if type == "repository_exception" and REPO_CREATING_ERR in reason: - return BackupServiceState.REPO_CREATION_ERR - if type == "repository_exception": - return BackupServiceState.REPO_ERR_UNKNOWN - if type == "repository_missing_exception": - return BackupServiceState.REPO_MISSING - if type == "repository_verification_exception" and REPO_NOT_ACCESS_ERR in reason: - return BackupServiceState.REPO_S3_UNREACHABLE - if type == "illegal_argument_exception": - return BackupServiceState.ILLEGAL_ARGUMENT - if type == "snapshot_missing_exception": - return BackupServiceState.SNAPSHOT_MISSING - if type == "snapshot_restore_exception" and RESTORE_OPEN_INDEX_WITH_SAME_NAME in reason: - return BackupServiceState.SNAPSHOT_RESTORE_ERROR_INDEX_NOT_CLOSED - if type == "snapshot_restore_exception": - return BackupServiceState.SNAPSHOT_RESTORE_ERROR + """Returns the response status in a Enum.""" + if (status := super().get_service_status(response)) == BackupServiceState.SUCCESS: + return BackupServiceState.SUCCESS if ( "bucket" in self.s3_client.get_s3_connection_info() and S3_REPOSITORY in response @@ -804,21 +1059,25 @@ def get_service_status( # noqa: C901 == response[S3_REPOSITORY]["settings"]["bucket"] ): return BackupServiceState.REPO_NOT_CREATED_ALREADY_EXISTS - # Ensure this is not containing any information about snapshots, return SUCCESS - return self.get_snapshot_status(response) + return status - def get_snapshot_status(self, response: Dict[str, Any] | None) -> BackupServiceState: - """Returns the snapshot status.""" - if not response: - return BackupServiceState.SNAPSHOT_FAILED_UNKNOWN - # Now, check snapshot status: - r_str = str(response) - if "IN_PROGRESS" in r_str: - return BackupServiceState.SNAPSHOT_IN_PROGRESS - if "PARTIAL" in r_str: - return BackupServiceState.SNAPSHOT_PARTIALLY_TAKEN - if "INCOMPATIBLE" in r_str: - return BackupServiceState.SNAPSHOT_INCOMPATIBILITY - if "FAILED" in r_str: - return BackupServiceState.SNAPSHOT_FAILED_UNKNOWN - return BackupServiceState.SUCCESS + +def backup(charm: CharmBase) -> OpenSearchBackupBase: + """Implements the logic that returns the correct class according to the cluster type. + + This class is solely responsible for the creation of the correct S3 client manager. + + If this cluster is an orchestrator or failover cluster, then return the OpenSearchBackup. + Otherwise, return the OpenSearchNonOrchestratorBackup. + + There is also the condition where the deployment description does not exist yet. In this + case, return the base class OpenSearchBackupBase. This class solely defers all s3-related + events until the deployment description is available and the actual S3 object is allocated. + """ + if not charm.opensearch_peer_cm.deployment_desc(): + # Temporary condition: we are waiting for CM to show up and define which type + # of cluster are we. Once we have that defined, then we will process. + return OpenSearchBackupBase(charm) + elif charm.opensearch_peer_cm.deployment_desc().typ == DeploymentType.MAIN_ORCHESTRATOR: + return OpenSearchBackup(charm) + return OpenSearchNonOrchestratorClusterBackup(charm) diff --git a/lib/charms/opensearch/v0/opensearch_base_charm.py b/lib/charms/opensearch/v0/opensearch_base_charm.py index 562d5ee8e..2f5fd23a9 100644 --- a/lib/charms/opensearch/v0/opensearch_base_charm.py +++ b/lib/charms/opensearch/v0/opensearch_base_charm.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Base class for the OpenSearch Operators.""" @@ -54,7 +54,7 @@ generate_password, ) from charms.opensearch.v0.models import DeploymentDescription, DeploymentType -from charms.opensearch.v0.opensearch_backups import OpenSearchBackup +from charms.opensearch.v0.opensearch_backups import backup from charms.opensearch.v0.opensearch_config import OpenSearchConfig from charms.opensearch.v0.opensearch_distro import OpenSearchDistribution from charms.opensearch.v0.opensearch_exceptions import ( @@ -69,6 +69,7 @@ from charms.opensearch.v0.opensearch_fixes import OpenSearchFixes from charms.opensearch.v0.opensearch_health import HealthColors, OpenSearchHealth from charms.opensearch.v0.opensearch_internal_data import RelationDataStore, Scope +from charms.opensearch.v0.opensearch_keystore import OpenSearchKeystoreNotReadyYetError from charms.opensearch.v0.opensearch_locking import OpenSearchNodeLock from charms.opensearch.v0.opensearch_nodes_exclusions import ( ALLOCS_TO_DELETE, @@ -89,7 +90,10 @@ from charms.opensearch.v0.opensearch_relation_provider import OpenSearchProvider from charms.opensearch.v0.opensearch_secrets import OpenSearchSecrets from charms.opensearch.v0.opensearch_tls import OpenSearchTLS -from charms.opensearch.v0.opensearch_users import OpenSearchUserManager +from charms.opensearch.v0.opensearch_users import ( + OpenSearchUserManager, + OpenSearchUserMgmtError, +) from charms.tls_certificates_interface.v3.tls_certificates import ( CertificateAvailableEvent, ) @@ -138,16 +142,18 @@ class _StartOpenSearch(EventBase): This event will be deferred until OpenSearch starts. """ - def __init__(self, handle, *, ignore_lock=False): + def __init__(self, handle, *, ignore_lock=False, after_upgrade=False): super().__init__(handle) # Only used for force upgrade self.ignore_lock = ignore_lock + self.after_upgrade = after_upgrade def snapshot(self) -> Dict[str, Any]: - return {"ignore_lock": self.ignore_lock} + return {"ignore_lock": self.ignore_lock, "after_upgrade": self.after_upgrade} def restore(self, snapshot: Dict[str, Any]): self.ignore_lock = snapshot["ignore_lock"] + self.after_upgrade = snapshot["after_upgrade"] class _RestartOpenSearch(EventBase): @@ -164,6 +170,9 @@ class _UpgradeOpenSearch(_StartOpenSearch): `_StartOpenSearch` will be emitted. """ + def __init__(self, handle, *, ignore_lock=False): + super().__init__(handle, ignore_lock=ignore_lock) + class OpenSearchBaseCharm(CharmBase, abc.ABC): """Base class for OpenSearch charms.""" @@ -203,7 +212,7 @@ def __init__(self, *args, distro: Type[OpenSearchDistribution] = None): ) self.plugin_manager = OpenSearchPluginManager(self) - self.backup = OpenSearchBackup(self) + self.backup = backup(self) self.user_manager = OpenSearchUserManager(self) self.opensearch_provider = OpenSearchProvider(self) @@ -367,7 +376,9 @@ def _apply_peer_cm_directives_and_check_if_can_start(self) -> bool: return True if self.unit.is_leader(): - self.opensearch_peer_cm.apply_status_if_needed(deployment_desc) + self.opensearch_peer_cm.apply_status_if_needed( + deployment_desc, show_status_only_once=False + ) return False @@ -488,6 +499,8 @@ def _on_peer_relation_departed(self, event: RelationDepartedEvent): if node.name != event.departing_unit.name.replace("/", "-") ] + self.health.apply(wait_for_green_first=True) + if len(remaining_nodes) == self.app.planned_units(): self._compute_and_broadcast_updated_topology(remaining_nodes) else: @@ -524,7 +537,7 @@ def _on_opensearch_data_storage_detaching(self, _: StorageDetachingEvent): # no # we attempt to flush the translog to disk if self.opensearch.is_node_up(): try: - self.opensearch.request("POST", "/_flush?wait_for_ongoing") + self.opensearch.request("POST", "/_flush") except OpenSearchHttpError: # if it's a failed attempt we move on pass @@ -546,7 +559,7 @@ def _on_opensearch_data_storage_detaching(self, _: StorageDetachingEvent): # no # release lock self.node_lock.release() - def _on_update_status(self, event: UpdateStatusEvent): + def _on_update_status(self, event: UpdateStatusEvent): # noqa: C901 """On update status event. We want to periodically check for the following: @@ -571,8 +584,10 @@ def _on_update_status(self, event: UpdateStatusEvent): if self.unit.is_leader(): self.opensearch_exclusions.cleanup() - health = self.health.apply() - if health not in [HealthColors.GREEN, HealthColors.IGNORE]: + if (health := self.health.apply(wait_for_green_first=True)) not in [ + HealthColors.GREEN, + HealthColors.IGNORE, + ]: event.defer() if health == HealthColors.UNKNOWN: @@ -595,6 +610,7 @@ def _on_update_status(self, event: UpdateStatusEvent): def _on_config_changed(self, event: ConfigChangedEvent): # noqa C901 """On config changed event. Useful for IP changes or for user provided config changes.""" + restart_requested = False if self.opensearch_config.update_host_if_needed(): self.status.set(MaintenanceStatus(TLSNewCertsRequested)) self._delete_stored_tls_resources() @@ -603,42 +619,41 @@ def _on_config_changed(self, event: ConfigChangedEvent): # noqa C901 # since when an IP change happens, "_on_peer_relation_joined" won't be called, # we need to alert the leader that it must recompute the node roles for any unit whose # roles were changed while the current unit was cut-off from the rest of the network - self._on_peer_relation_joined(event) + self._on_peer_relation_joined( + RelationJoinedEvent(event.handle, PeerRelationName, self.app, self.unit) + ) + restart_requested = True previous_deployment_desc = self.opensearch_peer_cm.deployment_desc() if self.unit.is_leader(): # run peer cluster manager processing + # todo add check here if the diff can be known from now on already self.opensearch_peer_cm.run() # handle cluster change to main-orchestrator (i.e: init_hold: true -> false) self._handle_change_to_main_orchestrator_if_needed(event, previous_deployment_desc) - elif not previous_deployment_desc: - # deployment desc not initialized yet by leader - event.defer() - return try: - if self.upgrade_in_progress: - logger.warning( - "Changing config during an upgrade is not supported. The charm may be in a broken, unrecoverable state" - ) - event.defer() - return if self.unit.is_leader(): self.status.set(MaintenanceStatus(PluginConfigCheck), app=True) - if self.plugin_manager.run(): - if not self.opensearch.is_node_up(): - # We applied the config changes to a charm that is not yet started - # we can simply return here + if self.plugin_manager.run() and not restart_requested: + if self.upgrade_in_progress: + logger.warning( + "Changing config during an upgrade is not supported. The charm may be in a broken, unrecoverable state" + ) + event.defer() return self._restart_opensearch_event.emit() - except OpenSearchPluginError: + + except (OpenSearchPluginError, OpenSearchKeystoreNotReadyYetError) as e: if self.unit.is_leader(): self.status.clear(PluginConfigCheck, app=True) - self.status.set(BlockedStatus(PluginConfigChangeError), app=True) + if isinstance(e, OpenSearchPluginError): + self.status.set(BlockedStatus(PluginConfigChangeError), app=True) event.defer() return + if self.unit.is_leader(): self.status.clear(PluginConfigCheck, app=True) self.status.clear(PluginConfigChangeError, app=True) @@ -796,7 +811,7 @@ def _handle_change_to_main_orchestrator_if_needed( # we check if we need to create the admin user if not self.is_admin_user_configured(): - self._put_admin_user() + self._put_or_update_internal_user_leader(AdminUser) # we check if we need to generate the admin certificate if missing if not self.is_tls_fully_configured(): @@ -808,6 +823,24 @@ def _handle_change_to_main_orchestrator_if_needed( def _start_opensearch(self, event: _StartOpenSearch) -> None: # noqa: C901 """Start OpenSearch, with a generated or passed conf, if all resources configured.""" + if self.opensearch.is_started(): + try: + self._post_start_init(event) + except ( + OpenSearchHttpError, + OpenSearchNotFullyReadyError, + ): + event.defer() + except OpenSearchUserMgmtError as e: + # Either generic start failure or cluster is not read to create the internal users + logger.warning(e) + self.node_lock.release() + self.status.set(BlockedStatus(ServiceStartError)) + event.defer() + return + + self.peers_data.delete(Scope.UNIT, "started") + if not self.node_lock.acquired: # (Attempt to acquire lock even if `event.ignore_lock`) if event.ignore_lock: @@ -817,13 +850,6 @@ def _start_opensearch(self, event: _StartOpenSearch) -> None: # noqa: C901 logger.debug("Lock to start opensearch not acquired. Will retry next event") event.defer() return - self.peers_data.delete(Scope.UNIT, "started") - if self.opensearch.is_started(): - try: - self._post_start_init(event) - except (OpenSearchHttpError, OpenSearchNotFullyReadyError): - event.defer() - return if not self._can_service_start(): self.node_lock.release() @@ -866,15 +892,15 @@ def _start_opensearch(self, event: _StartOpenSearch) -> None: # noqa: C901 ) ) self._post_start_init(event) - except (OpenSearchStartTimeoutError, OpenSearchNotFullyReadyError): + except (OpenSearchHttpError, OpenSearchStartTimeoutError, OpenSearchNotFullyReadyError): event.defer() - except OpenSearchStartError as e: - logger.exception(e) + except (OpenSearchStartError, OpenSearchUserMgmtError) as e: + logger.warning(e) self.node_lock.release() self.status.set(BlockedStatus(ServiceStartError)) event.defer() - def _post_start_init(self, event: EventBase): + def _post_start_init(self, event: _StartOpenSearch): # noqa: C901 """Initialization post OpenSearch start.""" # initialize the security index if needed (and certs written on disk etc.) if self.unit.is_leader() and not self.peers_data.get( @@ -890,6 +916,18 @@ def _post_start_init(self, event: EventBase): if not self.opensearch.is_node_up(): raise OpenSearchNotFullyReadyError("Node started but not full ready yet.") + try: + nodes = self._get_nodes(use_localhost=not self.alt_hosts) + except OpenSearchHttpError: + logger.exception("Failed to get online nodes") + event.defer() + return + for node in nodes: + if node.name == self.unit_name: + break + else: + raise OpenSearchNotFullyReadyError("Node online but not in cluster.") + # cleanup bootstrap conf in the node if self.peers_data.get(Scope.UNIT, "bootstrap_contributor"): self._cleanup_bootstrap_conf_if_applies() @@ -899,8 +937,18 @@ def _post_start_init(self, event: EventBase): self.node_lock.release() - self._upgrade.unit_state = upgrade.UnitState.HEALTHY - self._reconcile_upgrade() + if event.after_upgrade: + try: + self.opensearch.request( + "PUT", + "/_cluster/settings", + # Reset to default value + payload={"persistent": {"cluster.routing.allocation.enable": None}}, + ) + except OpenSearchHttpError: + logger.exception("Failed to re-enable allocation after upgrade") + event.defer() + return self.peers_data.put(Scope.UNIT, "started", True) @@ -908,27 +956,83 @@ def _post_start_init(self, event: EventBase): self.opensearch_fixes.apply_on_start() # apply cluster health - self.health.apply() + self.health.apply(wait_for_green_first=True, app=self.unit.is_leader()) - if self.unit.is_leader(): + if ( + self.unit.is_leader() + and self.opensearch_peer_cm.deployment_desc().typ == DeploymentType.MAIN_ORCHESTRATOR + ): # Creating the monitoring user - self._put_or_update_internal_user_leader(COSUser) + self._put_or_update_internal_user_leader(COSUser, update=False) + + self.unit.open_port("tcp", 9200) # clear waiting to start status self.status.clear(WaitingToStart) - self.unit.open_port("tcp", 9200) + if event.after_upgrade: + health = self.health.get(local_app_only=False, wait_for_green_first=True) + self.health.apply_for_unit_during_upgrade(health) + + # Cluster is considered healthy if green or yellow + # TODO future improvement: try to narrow scope to just green or green + yellow in + # specific cases + # https://github.com/canonical/opensearch-operator/issues/268 + # See https://chat.canonical.com/canonical/pl/s5j64ekxwi8epq53kzhd8fhrco and + # https://chat.canonical.com/canonical/pl/zaizx3bu3j8ftfcw67qozw9dbo + # For now, we need to allow yellow because + # "During a rolling upgrade, primary shards assigned to a node running the new + # version cannot have their replicas assigned to a node with the old version. The new + # version might have a different data format that is not understood by the old + # version. + # + # "If it is not possible to assign the replica shards to another node (there is only + # one upgraded node in the cluster), the replica shards remain unassigned and status + # stays `yellow`. + # + # "In this case, you can proceed once there are no initializing or relocating shards + # (check the `init` and `relo` columns). + # + # "As soon as another node is upgraded, the replicas can be assigned and the status + # will change to `green`." + # + # from + # https://www.elastic.co/guide/en/elastic-stack/8.13/upgrading-elasticsearch.html#upgrading-elasticsearch + # + # If `health_ == HealthColors.YELLOW`, no shards are initializing or relocating + # (otherwise `health_` would be `HealthColors.YELLOW_TEMP`) + if health not in (HealthColors.GREEN, HealthColors.YELLOW): + logger.error( + "Cluster is not healthy after upgrade. Manual intervention required. To rollback, `juju refresh` to the previous revision" + ) + event.defer() + return + elif health == HealthColors.YELLOW: + # TODO future improvement: + # https://github.com/canonical/opensearch-operator/issues/268 + logger.warning( + "Cluster is yellow. Upgrade may cause data loss if cluster is yellow for reason other than primary shards on upgraded unit & not enough upgraded units available for replica shards" + ) + else: + # apply cluster health + self.health.apply() + + self._upgrade.unit_state = upgrade.UnitState.HEALTHY + logger.debug("Set upgrade unit state to healthy") + self._reconcile_upgrade() # update the peer cluster rel data with new IP in case of main cluster manager if self.opensearch_peer_cm.deployment_desc().typ != DeploymentType.OTHER: if self.opensearch_peer_cm.is_peer_cluster_orchestrator_relation_set(): self.peer_cluster_provider.refresh_relation_data(event) - def _stop_opensearch(self) -> None: + def _stop_opensearch(self, *, restart=False) -> None: """Stop OpenSearch if possible.""" self.status.set(WaitingStatus(ServiceIsStopping)) if self.opensearch.is_node_up(): + # TODO: we should probably NOT have any exclusion on restart + # https://chat.canonical.com/canonical/pl/bgndmrfxr7fbpgmwpdk3hin93c # 1. Add current node to the voting + alloc exclusions self.opensearch_exclusions.add_current() @@ -936,9 +1040,12 @@ def _stop_opensearch(self) -> None: # 2. stop the service self.opensearch.stop() + self.peers_data.delete(Scope.UNIT, "started") self.status.set(WaitingStatus(ServiceStopped)) # 3. Remove the exclusions + # TODO: we should probably NOT have any exclusion on restart + # https://chat.canonical.com/canonical/pl/bgndmrfxr7fbpgmwpdk3hin93c self.opensearch_exclusions.delete_current() def _restart_opensearch(self, event: _RestartOpenSearch) -> None: @@ -949,7 +1056,7 @@ def _restart_opensearch(self, event: _RestartOpenSearch) -> None: return try: - self._stop_opensearch() + self._stop_opensearch(restart=True) except OpenSearchStopError as e: logger.exception(e) self.node_lock.release() @@ -959,7 +1066,7 @@ def _restart_opensearch(self, event: _RestartOpenSearch) -> None: self._start_opensearch_event.emit() - def _upgrade_opensearch(self, event: _UpgradeOpenSearch) -> None: + def _upgrade_opensearch(self, event: _UpgradeOpenSearch) -> None: # noqa: C901 """Upgrade OpenSearch.""" logger.debug("Attempting to acquire lock for upgrade") if not self.node_lock.acquired: @@ -972,9 +1079,26 @@ def _upgrade_opensearch(self, event: _UpgradeOpenSearch) -> None: return logger.debug("Acquired lock for upgrade") + # https://www.elastic.co/guide/en/elastic-stack/8.13/upgrading-elasticsearch.html + try: + self.opensearch.request( + "PUT", + "/_cluster/settings", + payload={"persistent": {"cluster.routing.allocation.enable": "primaries"}}, + ) + except OpenSearchHttpError: + logger.exception("Failed to disable shard allocation before upgrade") + self.node_lock.release() + event.defer() + return + try: + self.opensearch.request("POST", "/_flush", retries=3) + except OpenSearchHttpError as e: + logger.debug("Failed to flush before upgrade", exc_info=e) + logger.debug("Stopping OpenSearch before upgrade") try: - self._stop_opensearch() + self._stop_opensearch(restart=True) except OpenSearchStopError as e: logger.exception(e) self.node_lock.release() @@ -986,7 +1110,7 @@ def _upgrade_opensearch(self, event: _UpgradeOpenSearch) -> None: self._upgrade.upgrade_unit(snap=self.opensearch) logger.debug("Starting OpenSearch after upgrade") - self._start_opensearch_event.emit(ignore_lock=event.ignore_lock) + self._start_opensearch_event.emit(ignore_lock=event.ignore_lock, after_upgrade=True) def _can_service_start(self) -> bool: """Return if the opensearch service can start.""" @@ -1009,7 +1133,10 @@ def _can_service_start(self) -> bool: # overloading the cluster, units must be started one at a time. So we defer starting # opensearch until all shards in other units are in a "started" or "unassigned" state. try: - if self.health.apply(use_localhost=False, app=False) == HealthColors.YELLOW_TEMP: + if ( + self.health.apply(wait_for_green_first=True, use_localhost=False, app=False) + == HealthColors.YELLOW_TEMP + ): return False except OpenSearchHttpError: # this means that the leader unit is not reachable (not started yet), @@ -1085,18 +1212,25 @@ def _purge_users(self): if user != "_meta": self.opensearch.config.delete("opensearch-security/internal_users.yml", user) - def _put_or_update_internal_user_leader(self, user: str, pwd: Optional[str] = None) -> None: + def _put_or_update_internal_user_leader( + self, user: str, pwd: Optional[str] = None, update: bool = True + ) -> None: """Create system user or update it with a new password.""" # Leader is to set new password and hash, others populate existing hash locally if not self.unit.is_leader(): logger.error("Credential change can be only performed by the leader unit.") return + secret = self.secrets.get(Scope.APP, self.secrets.password_key(user)) + if secret and not update: + self._put_or_update_internal_user_unit(user) + return + hashed_pwd, pwd = generate_hashed_password(pwd) # Updating security index # We need to do this for all credential changes - if secret := self.secrets.get(Scope.APP, self.secrets.password_key(user)): + if secret: self.user_manager.update_user_password(user, hashed_pwd) # In case it's a new user, OR it's a system user (that has an entry in internal_users.yml) @@ -1115,7 +1249,7 @@ def _put_or_update_internal_user_leader(self, user: str, pwd: Optional[str] = No if user == AdminUser: self.peers_data.put(Scope.APP, "admin_user_initialized", True) - def _put_or_update_internal_user_unit(self, user: str, pwd: Optional[str] = None) -> None: + def _put_or_update_internal_user_unit(self, user: str) -> None: """Create system user or update it with a new password.""" # Leader is to set new password and hash, others populate existing hash locally hashed_pwd = self.secrets.get(Scope.APP, self.secrets.hash_key(user)) @@ -1168,14 +1302,7 @@ def _get_nodes(self, use_localhost: bool) -> List[Node]: def _set_node_conf(self, nodes: List[Node]) -> None: """Set the configuration of the current node / unit.""" - # retrieve the updated conf if exists - update_conf = (self.peers_data.get_object(Scope.APP, "nodes_config") or {}).get( - self.unit_name - ) - if update_conf: - update_conf = Node.from_dict(update_conf) - - # set default generated roles, or the ones passed in the updated conf + # set user provided roles if any, else generate base roles if ( deployment_desc := self.opensearch_peer_cm.deployment_desc() ).start == StartMode.WITH_PROVIDED_ROLES: @@ -1184,6 +1311,7 @@ def _set_node_conf(self, nodes: List[Node]) -> None: # This is the case where the 1st and main orchestrator to be deployed with no # "data" role in the provided roles, we need to add the role to be able to create # and store the security index + # todo: rework: delay sec index init until 1st data node / handle red health if ( self.unit.is_leader() and deployment_desc.typ == DeploymentType.MAIN_ORCHESTRATOR @@ -1193,11 +1321,7 @@ def _set_node_conf(self, nodes: List[Node]) -> None: computed_roles.append("data") self.peers_data.put(Scope.UNIT, "remove-data-role", True) else: - computed_roles = ( - update_conf.roles - if update_conf - else ClusterTopology.suggest_roles(nodes, self.app.planned_units()) - ) + computed_roles = ClusterTopology.generated_roles() cm_names = ClusterTopology.get_cluster_managers_names(nodes) cm_ips = ClusterTopology.get_cluster_managers_ips(nodes) @@ -1285,7 +1409,13 @@ def _recompute_roles_if_needed(self, event: RelationChangedEvent): try: nodes = self._get_nodes(self.opensearch.is_node_up()) if len(nodes) < self.app.planned_units(): + if self._is_peer_rel_changed_deferred: + # We already deferred this event during this Juju event. Retry on the next + # Juju event. + return event.defer() + # If the handler is called again within this Juju hook, we will abandon the event + self._is_peer_rel_changed_deferred = True return self._compute_and_broadcast_updated_topology(nodes) diff --git a/lib/charms/opensearch/v0/opensearch_config.py b/lib/charms/opensearch/v0/opensearch_config.py index eee1f1a12..4b51c0da5 100644 --- a/lib/charms/opensearch/v0/opensearch_config.py +++ b/lib/charms/opensearch/v0/opensearch_config.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Class for Setting configuration in opensearch config files.""" diff --git a/lib/charms/opensearch/v0/opensearch_distro.py b/lib/charms/opensearch/v0/opensearch_distro.py index 4c9e15d95..960029e39 100644 --- a/lib/charms/opensearch/v0/opensearch_distro.py +++ b/lib/charms/opensearch/v0/opensearch_distro.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Base class for Opensearch distributions.""" @@ -107,7 +107,7 @@ def _is_connected(): self._start_service() start = datetime.now() - while not _is_connected() and (datetime.now() - start).seconds < 75: + while not _is_connected() and (datetime.now() - start).seconds < 180: time.sleep(3) else: raise OpenSearchStartTimeoutError() diff --git a/lib/charms/opensearch/v0/opensearch_exceptions.py b/lib/charms/opensearch/v0/opensearch_exceptions.py index 64c85e4e0..509513f9b 100644 --- a/lib/charms/opensearch/v0/opensearch_exceptions.py +++ b/lib/charms/opensearch/v0/opensearch_exceptions.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """File containing all OpenSearch related exceptions.""" diff --git a/lib/charms/opensearch/v0/opensearch_fixes.py b/lib/charms/opensearch/v0/opensearch_fixes.py index e2a29c4f8..e243446f4 100644 --- a/lib/charms/opensearch/v0/opensearch_fixes.py +++ b/lib/charms/opensearch/v0/opensearch_fixes.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Base class for the OpenSearch Fixes of bugs introduced by upstream.""" @@ -43,4 +43,4 @@ def _reconfigure_replicas_of_builtin_indices(self): ) except OpenSearchHttpError as e: if e.response_code != 404: - raise + continue diff --git a/lib/charms/opensearch/v0/opensearch_health.py b/lib/charms/opensearch/v0/opensearch_health.py index 0513d9cb7..4b4f131e7 100644 --- a/lib/charms/opensearch/v0/opensearch_health.py +++ b/lib/charms/opensearch/v0/opensearch_health.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Base class for the OpenSearch Health management.""" @@ -7,16 +7,20 @@ from charms.opensearch.v0.constants_charm import ( ClusterHealthRed, + ClusterHealthRedUpgrade, ClusterHealthYellow, WaitingForBusyShards, WaitingForSpecificBusyShards, ) -from charms.opensearch.v0.helper_charm import Status +from charms.opensearch.v0.helper_charm import Status, trigger_peer_rel_changed from charms.opensearch.v0.helper_cluster import ClusterState from charms.opensearch.v0.models import StartMode -from charms.opensearch.v0.opensearch_exceptions import OpenSearchHttpError -from charms.opensearch.v0.opensearch_internal_data import Scope -from ops.model import BlockedStatus, WaitingStatus +from charms.opensearch.v0.opensearch_exceptions import ( + OpenSearchHAError, + OpenSearchHttpError, +) +from ops.model import BlockedStatus, MaintenanceStatus, WaitingStatus +from tenacity import retry, stop_after_attempt, wait_fixed # The unique Charmhub library identifier, never change it LIBID = "93d2c27f38974a59b3bbe39fb27ac98d" @@ -57,42 +61,87 @@ def apply( app: bool = True, ) -> str: """Fetch cluster health and set it on the app status.""" - try: - host = self._charm.unit_ip if use_localhost else None - status = self._fetch_status(host, wait_for_green_first) - if not status: - return HealthColors.UNKNOWN - - # the health depends on data nodes, for large deployments: an ML cluster - # may not be concerned about reporting or relying on the health of the - # data nodes in other clusters. We should therefore get this info from - # the deployment descriptor which has an overview of all the cluster - if not (deployment_desc := self._charm.opensearch_peer_cm.deployment_desc()): - return HealthColors.UNKNOWN - - # compute health only in clusters where data nodes exist - compute_health = ( - deployment_desc.start == StartMode.WITH_GENERATED_ROLES - or "data" in deployment_desc.config.roles - ) - if not compute_health: - return HealthColors.IGNORE + status = self.get( + wait_for_green_first=wait_for_green_first, + use_localhost=use_localhost, + ) + logger.info(f"Current health of cluster: {status}") + + if app: + self._apply_for_app(status) + else: + self._apply_for_unit(status) + + return status + + def get( # noqa: C901 + self, + wait_for_green_first: bool = False, + use_localhost: bool = True, + local_app_only: bool = True, + ) -> str: + """Fetch the current cluster status.""" + if not (deployment_desc := self._charm.opensearch_peer_cm.deployment_desc()): + return HealthColors.UNKNOWN + + # the health depends on data nodes, for large deployments: an ML cluster + # may not be concerned about reporting or relying on the health of the + # data nodes in other clusters. We should therefore get this info from + # the deployment descriptor which has an overview of all the cluster. + # compute health only in clusters where data nodes exist + compute_health = ( + deployment_desc.start == StartMode.WITH_GENERATED_ROLES + or "data" in deployment_desc.config.roles + or not local_app_only + ) + if not compute_health: + return HealthColors.IGNORE - if app: - self.apply_for_app(status) - else: - self.apply_for_unit(status) + host = self._charm.unit_ip if use_localhost else None + response = self._health(host, wait_for_green_first) + if wait_for_green_first and not response: + response = self._health(host, False) + if not response: + return HealthColors.UNKNOWN + + logger.info(f"Health: {response}") + try: + status = response["status"].lower() + except AttributeError as e: + logger.error(e) # means the status was reported as an int (i.e: 503) + return HealthColors.UNKNOWN + + if status != HealthColors.YELLOW: return status + + try: + logger.debug( + f"\n\nHealth: {status} -- Shards: {ClusterState.shards(self._opensearch, host)}\n\n" + ) except OpenSearchHttpError: - return HealthColors.UNKNOWN + pass + + # we differentiate between a temp yellow (moving shards) and a permanent + # one (such as: missing replicas) + if response["initializing_shards"] > 0 or response["relocating_shards"] > 0: + return HealthColors.YELLOW_TEMP + return HealthColors.YELLOW + + @retry(stop=stop_after_attempt(15), wait=wait_fixed(5), reraise=True) + def wait_for_shards_relocation(self) -> None: + """Blocking function until the shards relocation completes in the cluster.""" + if self.get(wait_for_green_first=True) != HealthColors.YELLOW_TEMP: + return + + # we throw an error because various operations should NOT start while data + # is being relocated. Examples are: simple stop, unit removal, upgrade + raise OpenSearchHAError("Shards haven't completed relocating.") - def apply_for_app(self, status: str) -> None: + def _apply_for_app(self, status: str) -> None: """Cluster wide / app status.""" if not self._charm.unit.is_leader(): - # this is needed in case the leader is in an error state and doesn't - # report the status itself - self._charm.peers_data.put(Scope.UNIT, "health", status) + trigger_peer_rel_changed(self._charm, on_other_units=True) return if status == HealthColors.GREEN: @@ -105,12 +154,12 @@ def apply_for_app(self, status: str) -> None: self._charm.status.set(BlockedStatus(ClusterHealthRed), app=True) elif status == HealthColors.YELLOW_TEMP: # health is yellow but temporarily (shards are relocating or initializing) - self._charm.status.set(WaitingStatus(WaitingForBusyShards), app=True) - else: + self._charm.status.set(MaintenanceStatus(WaitingForBusyShards), app=True) + elif status == HealthColors.YELLOW: # health is yellow permanently (some replica shards are unassigned) self._charm.status.set(BlockedStatus(ClusterHealthYellow), app=True) - def apply_for_unit(self, status: str, host: Optional[str] = None): + def _apply_for_unit(self, status: str, host: Optional[str] = None): """Apply the health status on the current unit.""" if status != HealthColors.YELLOW_TEMP: self._charm.status.clear( @@ -131,47 +180,42 @@ def apply_for_unit(self, status: str, host: Optional[str] = None): message = WaitingForSpecificBusyShards.format(" - ".join(message)) self._charm.status.set(WaitingStatus(message)) - def _fetch_status(self, host: Optional[str] = None, wait_for_green_first: bool = False): - """Fetch the current cluster status.""" - response: Optional[Dict[str, any]] = None - if wait_for_green_first: - try: - response = ClusterState.health( - self._opensearch, - wait_for_green=True, - host=host, - alt_hosts=self._charm.alt_hosts, - ) - except OpenSearchHttpError: - # it timed out, settle with current status, fetched next without - # the 1min wait - pass + def apply_for_unit_during_upgrade(self, status: str) -> None: + """Set cluster wide status on unit during upgrade + + During upgrade, app status is used to show upgrade progress + And, unit checking cluster wide status may not be leader + """ + if status in (HealthColors.GREEN, HealthColors.YELLOW): + # health green or yellow: cluster healthy + # TODO future improvement: + # https://github.com/canonical/opensearch-operator/issues/268 + self._charm.status.clear(ClusterHealthRedUpgrade) + self._charm.status.clear(WaitingForBusyShards) + elif status == HealthColors.RED: + # health RED: some primary shards are unassigned + self._charm.status.set(BlockedStatus(ClusterHealthRedUpgrade)) + elif status == HealthColors.YELLOW_TEMP: + # health is yellow but temporarily (shards are relocating or initializing) + self._charm.status.set(MaintenanceStatus(WaitingForBusyShards)) - if not response: - response = ClusterState.health( - self._opensearch, - wait_for_green=False, - host=host, - alt_hosts=self._charm.alt_hosts, - ) + def _health(self, host: str, wait_for_green: bool) -> Optional[Dict[str, any]]: + """Fetch the cluster health.""" + endpoint = "/_cluster/health" - if not response: - return None + timeout = 5 + if wait_for_green: + endpoint = f"{endpoint}?wait_for_status=green&timeout=1m" + timeout = 61 - logger.info(f"Health: {response}") try: - status = response["status"].lower() - except AttributeError as e: - logger.error(e) # means the status was reported as an int (i.e: 503) + return self._opensearch.request( + "GET", + endpoint, + host=host, + alt_hosts=self._charm.alt_hosts, + timeout=timeout, + retries=3, + ) + except OpenSearchHttpError: return None - - if status != HealthColors.YELLOW: - return status - - # we differentiate between a temp yellow (moving shards) and a permanent - # one (such as: missing replicas) - shards_by_state = ClusterState.shards_by_state( - self._opensearch, host=host, alt_hosts=self._charm.alt_hosts - ) - busy_shards = shards_by_state.get("INITIALIZING", 0) + shards_by_state.get("RELOCATING", 0) - return HealthColors.YELLOW_TEMP if busy_shards > 0 else HealthColors.YELLOW diff --git a/lib/charms/opensearch/v0/opensearch_internal_data.py b/lib/charms/opensearch/v0/opensearch_internal_data.py index 09b1113c5..f089baef3 100644 --- a/lib/charms/opensearch/v0/opensearch_internal_data.py +++ b/lib/charms/opensearch/v0/opensearch_internal_data.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Utility classes for app / unit data bag related operations.""" diff --git a/lib/charms/opensearch/v0/opensearch_keystore.py b/lib/charms/opensearch/v0/opensearch_keystore.py index 8eff4a39a..e914b2308 100644 --- a/lib/charms/opensearch/v0/opensearch_keystore.py +++ b/lib/charms/opensearch/v0/opensearch_keystore.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Implements the keystore logic. @@ -34,16 +34,20 @@ class OpenSearchKeystoreError(OpenSearchError): """Exception thrown when an opensearch keystore is invalid.""" +class OpenSearchKeystoreNotReadyYetError(OpenSearchKeystoreError): + """Exception thrown when the keystore is not ready yet.""" + + class Keystore(ABC): """Abstract class that represents the keystore.""" - def __init__(self, charm): + def __init__(self, charm, password: str = None): """Creates the keystore manager class.""" self._charm = charm self._opensearch = charm.opensearch self._keytool = charm.opensearch.paths.jdk + "/bin/keytool" self._keystore = "" - self._password = None + self._password = password @property def password(self) -> str: @@ -62,7 +66,7 @@ def update_password(self, old_pwd: str, pwd: str) -> None: if not os.path.exists(self._keystore): raise OpenSearchKeystoreError(f"{self._keystore} not found") try: - self._opensearch._run_cmd( + self._opensearch.run_bin( self._keytool, f"-storepasswd -new {pwd} -keystore {self._keystore} " f"-storepass {old_pwd}", ) @@ -73,7 +77,7 @@ def list(self, alias: str = None) -> List[str]: """Lists the keys available in opensearch's keystore.""" try: # Not using OPENSEARCH_BIN path - return self._opensearch._run_cmd(self._keytool, f"-v -list -keystore {self._keystore}") + return self._opensearch.run_bin(self._keytool, f"-v -list -keystore {self._keystore}") except OpenSearchCmdError as e: raise OpenSearchKeystoreError(str(e)) @@ -93,7 +97,7 @@ def add(self, entries: Dict[str, str]) -> None: pass try: # Not using OPENSEARCH_BIN path - self._opensearch._run_cmd( + self._opensearch.run_bin( self._keytool, f"-import -alias {key} " f"-file {filename} -storetype JKS " @@ -111,7 +115,7 @@ def delete(self, entries: List[str]) -> None: for key in entries: try: # Not using OPENSEARCH_BIN path - self._opensearch._run_cmd( + self._opensearch.run_bin( self._keytool, f"-delete -alias {key} " f"-keystore {self._keystore} " @@ -133,9 +137,13 @@ def __init__(self, charm): """Creates the keystore manager class.""" super().__init__(charm) self._keytool = "opensearch-keystore" + self.keystore = charm._opensearch.paths.conf + "/opensearch.keystore" def add(self, entries: Dict[str, str]) -> None: """Adds a given key to the "opensearch" keystore.""" + if not os.path.exists(self.keystore): + raise OpenSearchKeystoreNotReadyYetError() + if not entries: return # no key/value to add, no need to request reload of keystore either for key, value in entries.items(): diff --git a/lib/charms/opensearch/v0/opensearch_locking.py b/lib/charms/opensearch/v0/opensearch_locking.py index 447512663..711cabb17 100644 --- a/lib/charms/opensearch/v0/opensearch_locking.py +++ b/lib/charms/opensearch/v0/opensearch_locking.py @@ -1,17 +1,18 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Ensure that only one node (re)starts, joins the cluster, or leaves the cluster at a time.""" import json import logging import os -import typing +from typing import TYPE_CHECKING, List, Optional import ops -from charms.opensearch.v0.helper_cluster import ClusterTopology +from charms.opensearch.v0.constants_charm import PeerRelationName +from charms.opensearch.v0.helper_cluster import ClusterState, ClusterTopology from charms.opensearch.v0.opensearch_exceptions import OpenSearchHttpError -if typing.TYPE_CHECKING: +if TYPE_CHECKING: import charms.opensearch.v0.opensearch_base_charm as opensearch_base_charm # The unique Charmhub library identifier, never change it @@ -127,9 +128,9 @@ def _unit_with_lock(self, value: str): # the lock. # `JUJU_CONTEXT_ID` is unique for each Juju event # (https://matrix.to/#/!xdClnUGkurzjxqiQcN:ubuntu.com/$yEGjGlDaIPBtCi8uB3fH6ZaXUjN7GF-Y2s9YwvtPM-o?via=ubuntu.com&via=matrix.org&via=cutefunny.art) - self._relation.data[self._charm.app][ - "leader-acquired-lock-after-juju-event-id" - ] = os.environ["JUJU_CONTEXT_ID"] + self._relation.data[self._charm.app]["leader-acquired-lock-after-juju-event-id"] = ( + os.environ["JUJU_CONTEXT_ID"] + ) self._relation.data[self._charm.app]["unit-with-lock"] = value @_unit_with_lock.deleter @@ -235,30 +236,23 @@ def acquired(self) -> bool: # noqa: C901 return False logger.debug(f"[Node lock] Opensearch {online_nodes=}") assert online_nodes > 0 - if online_nodes >= 2: + try: + unit = self._unit_with_lock(host) + except OpenSearchHttpError: + logger.exception("Error checking which unit has OpenSearch lock") + return False + # If online_nodes == 1, we should acquire the lock via the peer databag. + # If we acquired the lock via OpenSearch and this unit was stopping, we would be unable + # to release the OpenSearch lock. For example, when scaling to 0. + # Then, when 1+ OpenSearch nodes are online, a unit that no longer exists could hold + # the lock. + if not unit and online_nodes > 0: logger.debug("[Node lock] Attempting to acquire opensearch lock") # Acquire opensearch lock # Create index if it doesn't exist - try: - self._opensearch.request( - "PUT", - endpoint=f"/{self.OPENSEARCH_INDEX}", - host=host, - alt_hosts=alt_hosts, - retries=3, - payload={"settings": {"index": {"auto_expand_replicas": "0-all"}}}, - ) - except OpenSearchHttpError as e: - if ( - e.response_code == 400 - and e.response_body.get("error", {}).get("type") - == "resource_already_exists_exception" - ): - # Index already created - pass - else: - logger.exception("Error creating OpenSearch lock index") - return False + if not self._create_lock_index_if_needed(host, alt_hosts): + return False + # Attempt to create document id 0 try: response = self._opensearch.request( @@ -266,7 +260,7 @@ def acquired(self) -> bool: # noqa: C901 endpoint=f"/{self.OPENSEARCH_INDEX}/_create/0?refresh=true&wait_for_active_shards=all", host=host, alt_hosts=alt_hosts, - retries=3, + retries=0, payload={"unit-name": self._charm.unit.name}, ) except OpenSearchHttpError as e: @@ -274,7 +268,10 @@ def acquired(self) -> bool: # noqa: C901 "error", {} ).get("reason", ""): # Document already created - pass + logger.debug( + "[Node lock] Another unit acquired OpenSearch lock while this unit attempted to acquire lock" + ) + return False else: logger.exception("Error creating OpenSearch lock document") return False @@ -291,13 +288,25 @@ def acquired(self) -> bool: # noqa: C901 # from # https://www.elastic.co/guide/en/elasticsearch/reference/8.13/docs-index_.html#index-wait-for-active-shards if response["_shards"]["failed"] > 0: - logger.error("Failed to write OpenSearch lock document to all nodes") + logger.error("Failed to write OpenSearch lock document to all nodes.") + logger.debug( + "[Node lock] Deleting OpenSearch lock after failing to write to all nodes" + ) + # Delete document id 0 + self._opensearch.request( + "DELETE", + endpoint=f"/{self.OPENSEARCH_INDEX}/_doc/0?refresh=true", + host=host, + alt_hosts=alt_hosts, + retries=10, + ) + logger.debug( + "[Node lock] Deleted OpenSearch lock after failing to write to all nodes" + ) return False - try: - unit = self._unit_with_lock(host) - except OpenSearchHttpError: - logger.exception("Error checking which unit has OpenSearch lock") - return False + # This unit has OpenSearch lock + unit = self._charm.unit.name + if unit == self._charm.unit.name: # Lock acquired # Release peer databag lock, if any @@ -305,21 +314,13 @@ def acquired(self) -> bool: # noqa: C901 self._peer.release() logger.debug("[Node lock] Released redundant peer lock (if held)") return True - if unit or online_nodes >= 2: + + if unit: # Another unit has lock - # (Or document deleted after request to create document & before request in - # `self._unit_with_lock()`) logger.debug(f"[Node lock] Not acquired. Unit with opensearch lock: {unit}") return False - # If online_nodes == 1, we should acquire the lock via the peer databag. - # If we acquired the lock via OpenSearch and this unit was stopping, we would be unable - # to release the OpenSearch lock. For example, when scaling to 0. - # Then, when 1+ OpenSearch nodes are online, a unit that no longer exists could hold - # the lock. - # Note: if online_nodes > 1, this situation is still possible (e.g. if this unit was - # stopping and another unit went offline simultaneously)—but it's an edge case we don't - # support (to reduce complexity & improve robustness in other cases). - # If online_nodes > 1, we should re-attempt to acquire the OpenSearch lock. + + assert online_nodes == 1 logger.debug("[Node lock] No unit has opensearch lock") logger.debug("[Node lock] Using peer databag for lock") # Request peer databag lock @@ -343,7 +344,16 @@ def release(self): if host or alt_hosts: logger.debug("[Node lock] Checking which unit has opensearch lock") # Check if this unit currently has lock - if self._unit_with_lock(host) == self._charm.unit.name: + # or if there is a stale lock from a unit no longer existing + # TODO: for large deployments the MAIN/FAILOVER orchestrators should broadcast info + # over non-online units in the relation. This info should be considered here as well. + unit_with_lock = self._unit_with_lock(host) + current_app_units = [ + unit.name for unit in self._charm.model.get_relation(PeerRelationName).units + ] + if unit_with_lock and ( + unit_with_lock == self._charm.unit.name or unit_with_lock not in current_app_units + ): logger.debug("[Node lock] Releasing opensearch lock") # Delete document id 0 try: @@ -361,3 +371,46 @@ def release(self): self._peer.release() logger.debug("[Node lock] Released peer lock (if held)") logger.debug("[Node lock] Released lock") + + def _create_lock_index_if_needed(self, host: str, alt_hosts: Optional[List[str]]) -> bool: + """Attempts the creation of the lock index if it doesn't exist.""" + # we do this, to circumvent opensearch raising a 429 error, + # complaining about spamming the index creation endpoint + try: + indices = ClusterState.indices(self._opensearch, host, alt_hosts) + if self.OPENSEARCH_INDEX in indices: + logger.debug( + f"{self.OPENSEARCH_INDEX} already created. Skipping creation attempt. List:{indices}" + ) + if self._charm.app.planned_units() > 1: + self._opensearch.request( + "GET", + endpoint=f"/_cluster/health/{self.OPENSEARCH_INDEX}?wait_for_status=green", + resp_status_code=True, + ) + return True + except OpenSearchHttpError: + pass + + # Create index if it doesn't exist + try: + self._opensearch.request( + "PUT", + endpoint=f"/{self.OPENSEARCH_INDEX}?wait_for_active_shards=all", + host=host, + alt_hosts=alt_hosts, + retries=3, + payload={"settings": {"index": {"auto_expand_replicas": "0-all"}}}, + ) + return True + except OpenSearchHttpError as e: + if ( + e.response_code == 400 + and e.response_body.get("error", {}).get("type") + == "resource_already_exists_exception" + ): + # Index already created + return True + else: + logger.exception("Error creating OpenSearch lock index") + return False diff --git a/lib/charms/opensearch/v0/opensearch_nodes_exclusions.py b/lib/charms/opensearch/v0/opensearch_nodes_exclusions.py index 9f9ab9f86..b06d1cded 100644 --- a/lib/charms/opensearch/v0/opensearch_nodes_exclusions.py +++ b/lib/charms/opensearch/v0/opensearch_nodes_exclusions.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Base class for OpenSearch node exclusions management.""" diff --git a/lib/charms/opensearch/v0/opensearch_peer_clusters.py b/lib/charms/opensearch/v0/opensearch_peer_clusters.py index 97923426b..c0ded0fe2 100644 --- a/lib/charms/opensearch/v0/opensearch_peer_clusters.py +++ b/lib/charms/opensearch/v0/opensearch_peer_clusters.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Class for Managing simple or large deployments and configuration related changes.""" @@ -18,6 +18,7 @@ PeerClusterOrchestratorRelationName, PeerRelationName, ) +from charms.opensearch.v0.helper_charm import trigger_peer_rel_changed from charms.opensearch.v0.helper_cluster import ClusterTopology from charms.opensearch.v0.models import ( DeploymentDescription, @@ -88,6 +89,11 @@ def run(self) -> None: Scope.APP, "deployment-description", deployment_desc.to_dict() ) + if deployment_desc.start == StartMode.WITH_GENERATED_ROLES: + # trigger roles change on the leader, other units will have their peer-rel-changed + # event triggered + trigger_peer_rel_changed(self._charm, on_other_units=False, on_current_unit=True) + self.apply_status_if_needed(deployment_desc) # TODO: once peer clusters relation implemented, we should apply all directives @@ -121,6 +127,7 @@ def run_with_relation_data( config.cluster_name = data.cluster_name pending_directives.remove(Directive.INHERIT_CLUSTER_NAME) + pending_directives.append(Directive.SHOW_STATUS) new_deployment_desc = DeploymentDescription( config=config, pending_directives=pending_directives, @@ -133,6 +140,8 @@ def run_with_relation_data( Scope.APP, "deployment-description", new_deployment_desc.to_dict() ) + self.apply_status_if_needed(new_deployment_desc) + def _user_config(self): """Build a user provided config object.""" return PeerClusterConfig( @@ -154,6 +163,7 @@ def _new_cluster_setup(self, config: PeerClusterConfig) -> DeploymentDescription deployment_state = DeploymentState( value=State.BLOCKED_WAITING_FOR_RELATION, message=PClusterNoRelation ) + directives.append(Directive.SHOW_STATUS) directives.append(Directive.WAIT_FOR_PEER_CLUSTER_RELATION) @@ -239,6 +249,7 @@ def _existing_cluster_setup( directives.append(Directive.SHOW_STATUS) directives.remove(Directive.WAIT_FOR_PEER_CLUSTER_RELATION) + deployment_type = self._deployment_type(config, start_mode) return DeploymentDescription( config=PeerClusterConfig( cluster_name=prev_deployment.config.cluster_name, @@ -248,9 +259,14 @@ def _existing_cluster_setup( ), start=start_mode, state=deployment_state, - typ=self._deployment_type(config, start_mode), + typ=deployment_type, app=self._charm.app.name, pending_directives=list(set(directives)), + promotion_time=( + prev_deployment.promotion_time + if deployment_type == DeploymentType.MAIN_ORCHESTRATOR + else None + ), ) def can_start(self, deployment_desc: Optional[DeploymentDescription] = None) -> bool: @@ -271,7 +287,9 @@ def can_start(self, deployment_desc: Optional[DeploymentDescription] = None) -> return True def apply_status_if_needed( - self, deployment_desc: Optional[DeploymentDescription] = None + self, + deployment_desc: Optional[DeploymentDescription] = None, + show_status_only_once: bool = True, ) -> None: """Resolve and applies corresponding status from the deployment state.""" if not (deployment_desc := deployment_desc or self.deployment_desc()): @@ -281,7 +299,8 @@ def apply_status_if_needed( return # remove show_status directive which is applied below - self.clear_directive(Directive.SHOW_STATUS) + if show_status_only_once: + self.clear_directive(Directive.SHOW_STATUS) blocked_status_messages = [ CMRoleRemovalForbidden, @@ -294,7 +313,7 @@ def apply_status_if_needed( ] if deployment_desc.state.message not in blocked_status_messages: for message in blocked_status_messages: - self._charm.status.clear(message) + self._charm.status.clear(message, app=True) return self._charm.app.status = BlockedStatus(deployment_desc.state.message) @@ -436,7 +455,7 @@ def _pre_validate_roles_change(self, new_roles: List[str], prev_roles: List[str] # if prev_roles None, means auto-generated roles, and will therefore include the cm role # for all the units up to the latest if even number of units, which will be voting_only - prev_roles = set(prev_roles or ["cluster_manager", "data"]) + prev_roles = set(prev_roles or ClusterTopology.generated_roles()) new_roles = set(new_roles) if "cluster_manager" in new_roles and "voting_only" in new_roles: diff --git a/lib/charms/opensearch/v0/opensearch_plugin_manager.py b/lib/charms/opensearch/v0/opensearch_plugin_manager.py index 7a4ef5396..38b205ee6 100644 --- a/lib/charms/opensearch/v0/opensearch_plugin_manager.py +++ b/lib/charms/opensearch/v0/opensearch_plugin_manager.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Implements the plugin manager class. @@ -13,14 +13,14 @@ import copy import functools import logging -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple, Type from charms.opensearch.v0.helper_cluster import ClusterTopology from charms.opensearch.v0.opensearch_exceptions import OpenSearchCmdError from charms.opensearch.v0.opensearch_health import HealthColors +from charms.opensearch.v0.opensearch_internal_data import Scope from charms.opensearch.v0.opensearch_keystore import OpenSearchKeystore from charms.opensearch.v0.opensearch_plugins import ( - OpenSearchBackupPlugin, OpenSearchKnn, OpenSearchPlugin, OpenSearchPluginConfig, @@ -53,11 +53,6 @@ "config": "plugin_opensearch_knn", "relation": None, }, - "repository-s3": { - "class": OpenSearchBackupPlugin, - "config": None, - "relation": "s3-credentials", - }, } @@ -105,14 +100,15 @@ def plugins(self) -> List[OpenSearchPlugin]: plugins_list.append(new_plugin) return plugins_list - def get_plugin(self, plugin_class: OpenSearchPlugin) -> OpenSearchPlugin: + def get_plugin(self, plugin_class: Type[OpenSearchPlugin]) -> OpenSearchPlugin: """Returns a given plugin based on its class.""" for plugin in self.plugins: if isinstance(plugin, plugin_class): return plugin + raise KeyError(f"Plugin manager did not find plugin: {plugin_class}") - def get_plugin_status(self, plugin_class: OpenSearchPlugin) -> OpenSearchPlugin: + def get_plugin_status(self, plugin_class: Type[OpenSearchPlugin]) -> PluginState: """Returns a given plugin based on its class.""" for plugin in self.plugins: if isinstance(plugin, plugin_class): @@ -136,15 +132,14 @@ def _extra_conf(self, plugin_data: Dict[str, Any]) -> Optional[Dict[str, Any]]: def check_plugin_manager_ready_for_api(self) -> bool: """Checks if the plugin manager is ready to run.""" - return ( - self._charm.opensearch.is_node_up() - and len(self._charm._get_nodes(True)) == self._charm.app.planned_units() - and self._charm.health.apply() - in [ - HealthColors.GREEN, - HealthColors.YELLOW, - ] - ) + return self._charm.peers_data.get( + Scope.APP, "security_index_initialised", False + ) and self._charm.health.apply() in [ + HealthColors.GREEN, + HealthColors.YELLOW, + HealthColors.YELLOW_TEMP, + HealthColors.IGNORE, + ] def run(self) -> bool: """Runs a check on each plugin: install, execute config changes or remove. @@ -246,7 +241,7 @@ def _configure_if_needed(self, plugin: OpenSearchPlugin) -> bool: return False return self.apply_config(plugin.config()) except KeyError as e: - raise OpenSearchPluginMissingConfigError(plugin.name, configs=[f"{e}"]) + raise OpenSearchPluginMissingConfigError(e) def _disable_if_needed(self, plugin: OpenSearchPlugin) -> bool: """If disabled, removes plugin configuration or sets it to other values.""" @@ -261,7 +256,7 @@ def _disable_if_needed(self, plugin: OpenSearchPlugin) -> bool: return False return self.apply_config(plugin.disable()) except KeyError as e: - raise OpenSearchPluginMissingConfigError(plugin.name, configs=[f"{e}"]) + raise OpenSearchPluginMissingConfigError(e) def _compute_settings( self, config: OpenSearchPluginConfig diff --git a/lib/charms/opensearch/v0/opensearch_plugins.py b/lib/charms/opensearch/v0/opensearch_plugins.py index 1cf4c594e..329b7e356 100644 --- a/lib/charms/opensearch/v0/opensearch_plugins.py +++ b/lib/charms/opensearch/v0/opensearch_plugins.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """OpenSearch Plugin Model. diff --git a/lib/charms/opensearch/v0/opensearch_relation_peer_cluster.py b/lib/charms/opensearch/v0/opensearch_relation_peer_cluster.py index b52874efa..b57c364fe 100644 --- a/lib/charms/opensearch/v0/opensearch_relation_peer_cluster.py +++ b/lib/charms/opensearch/v0/opensearch_relation_peer_cluster.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Peer clusters relation related classes for OpenSearch.""" @@ -7,6 +7,9 @@ from typing import TYPE_CHECKING, Any, Dict, List, MutableMapping, Optional, Union from charms.opensearch.v0.constants_charm import ( + AdminUser, + COSUser, + KibanaserverUser, PeerClusterOrchestratorRelationName, PeerClusterRelationName, ) @@ -24,7 +27,9 @@ PeerClusterRelData, PeerClusterRelDataCredentials, PeerClusterRelErrorData, + S3RelDataCredentials, ) +from charms.opensearch.v0.opensearch_backups import S3_RELATION from charms.opensearch.v0.opensearch_exceptions import OpenSearchHttpError from charms.opensearch.v0.opensearch_internal_data import Scope from ops import ( @@ -155,7 +160,9 @@ def _on_peer_cluster_relation_changed(self, event: RelationChangedEvent): return # get list of relations with this orchestrator - target_relation_ids = [rel.id for rel in self.charm.model.relations[self.relation_name]] + target_relation_ids = [ + rel.id for rel in self.charm.model.relations[self.relation_name] if len(rel.units) > 0 + ] # fetch emitting app planned units and broadcast self._put_planned_units( @@ -195,7 +202,11 @@ def _on_peer_cluster_relation_departed(self, event: RelationDepartedEvent) -> No return # we need to update the fleet planned units - target_relation_ids = [rel.id for rel in self.charm.model.relations[self.relation_name]] + target_relation_ids = [ + rel.id + for rel in self.charm.model.relations[self.relation_name] + if rel.id != event.relation.id and len(rel.units) > 0 + ] self._put_planned_units(event.app.name, 0, target_relation_ids) def refresh_relation_data(self, event: EventBase) -> None: @@ -204,7 +215,9 @@ def refresh_relation_data(self, event: EventBase) -> None: return # all relations with the current orchestrator - all_relation_ids = [rel.id for rel in self.charm.model.relations[self.relation_name]] + all_relation_ids = [ + rel.id for rel in self.charm.model.relations[self.relation_name] if len(rel.units) > 0 + ] # get deployment descriptor of current app deployment_desc = self.charm.opensearch_peer_cm.deployment_desc() @@ -299,6 +312,31 @@ def _put_planned_units(self, app: str, count: int, target_relation_ids: List[int Scope.APP, "cluster_fleet_planned_units", cluster_fleet_planned_units ) + def _s3_credentials( + self, deployment_desc: DeploymentDescription + ) -> Optional[S3RelDataCredentials]: + if deployment_desc.typ == DeploymentType.MAIN_ORCHESTRATOR: + if not self.charm.model.get_relation(S3_RELATION): + return None + + if not self.charm.backup.s3_client.get_s3_connection_info().get("access-key"): + return None + + # As the main orchestrator, this application must set the S3 information. + return S3RelDataCredentials( + access_key=self.charm.backup.s3_client.get_s3_connection_info().get("access-key"), + secret_key=self.charm.backup.s3_client.get_s3_connection_info().get("secret-key"), + ) + + if not self.charm.secrets.get(Scope.APP, "access-key"): + return None + + # Return what we have received from the peer relation + return S3RelDataCredentials( + access_key=self.charm.secrets.get(Scope.APP, "access-key"), + secret_key=self.charm.secrets.get(Scope.APP, "secret-key"), + ) + def _rel_data( self, deployment_desc: DeploymentDescription, orchestrators: PeerClusterOrchestrators ) -> Union[PeerClusterRelData, PeerClusterRelErrorData]: @@ -315,10 +353,16 @@ def _rel_data( cluster_name=deployment_desc.config.cluster_name, cm_nodes=self._fetch_local_cm_nodes(), credentials=PeerClusterRelDataCredentials( - admin_username="admin", - admin_password=secrets.get(Scope.APP, secrets.password_key("admin")), - admin_password_hash=secrets.get(Scope.APP, secrets.hash_key("admin")), + admin_username=AdminUser, + admin_password=secrets.get(Scope.APP, secrets.password_key(AdminUser)), + admin_password_hash=secrets.get(Scope.APP, secrets.hash_key(AdminUser)), + kibana_password=secrets.get(Scope.APP, secrets.password_key(KibanaserverUser)), + kibana_password_hash=secrets.get( + Scope.APP, secrets.hash_key(KibanaserverUser) + ), + monitor_password=secrets.get(Scope.APP, secrets.password_key(COSUser)), admin_tls=secrets.get_object(Scope.APP, CertType.APP_ADMIN.val), + s3=self._s3_credentials(deployment_desc), ), deployment_desc=deployment_desc, ) @@ -360,6 +404,8 @@ def _rel_err_data( # noqa: C901 blocked_msg = f"Security index not initialized {message_suffix}." elif not self.charm.is_every_unit_marked_as_started(): blocked_msg = f"Waiting for every unit {message_suffix} to start." + elif not self.charm.secrets.get(Scope.APP, self.charm.secrets.password_key(COSUser)): + blocked_msg = f"'{COSUser}' user not created yet." else: try: if not self._fetch_local_cm_nodes(): @@ -477,8 +523,16 @@ def _set_security_conf(self, data: PeerClusterRelData) -> None: """Store security related config.""" # set admin secrets secrets = self.charm.secrets - secrets.put(Scope.APP, secrets.password_key("admin"), data.credentials.admin_password) - secrets.put(Scope.APP, secrets.hash_key("admin"), data.credentials.admin_password_hash) + secrets.put(Scope.APP, secrets.password_key(AdminUser), data.credentials.admin_password) + secrets.put(Scope.APP, secrets.hash_key(AdminUser), data.credentials.admin_password_hash) + secrets.put( + Scope.APP, secrets.password_key(KibanaserverUser), data.credentials.kibana_password + ) + secrets.put( + Scope.APP, secrets.hash_key(KibanaserverUser), data.credentials.kibana_password_hash + ) + secrets.put(Scope.APP, secrets.password_key(COSUser), data.credentials.monitor_password) + secrets.put_object(Scope.APP, CertType.APP_ADMIN.val, data.credentials.admin_tls) # store the app admin TLS resources if not stored @@ -488,6 +542,10 @@ def _set_security_conf(self, data: PeerClusterRelData) -> None: self.charm.peers_data.put(Scope.APP, "admin_user_initialized", True) self.charm.peers_data.put(Scope.APP, "security_index_initialised", True) + if s3_creds := data.credentials.s3: + self.charm.secrets.put(Scope.APP, "access-key", s3_creds.access_key) + self.charm.secrets.put(Scope.APP, "secret-key", s3_creds.secret_key) + def _orchestrators( self, event: RelationChangedEvent, @@ -719,6 +777,8 @@ def _error_set_from_requirer( blocked_msg = ( "A cluster can only be related to 1 main and 1 failover-clusters at most." ) + elif peer_cluster_rel_data.cluster_name != deployment_desc.config.cluster_name: + blocked_msg = "Cannot relate 2 clusters with different 'cluster_name' values." if not blocked_msg: self._clear_errors(f"error_from_requirer-{event_rel_id}") @@ -779,5 +839,6 @@ def _set_error(self, label: str, error: Optional[Dict[str, Any]]) -> None: def _clear_errors(self, *error_labels: str): """Clear previously set Peer clusters related statuses.""" for error_label in error_labels: - self.charm.status.clear(error_label) + error = self.charm.peers_data.get(Scope.APP, error_label, "") + self.charm.status.clear(error, app=True) self.charm.peers_data.delete(Scope.APP, error_label) diff --git a/lib/charms/opensearch/v0/opensearch_relation_provider.py b/lib/charms/opensearch/v0/opensearch_relation_provider.py index b899cd53d..7c9467c26 100644 --- a/lib/charms/opensearch/v0/opensearch_relation_provider.py +++ b/lib/charms/opensearch/v0/opensearch_relation_provider.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """OpenSearch client relation hooks & helpers. @@ -151,6 +151,9 @@ def __init__(self, charm: "OpenSearchBaseCharm") -> None: self.framework.observe( self.opensearch_provides.on.index_requested, self._on_index_requested ) + self.framework.observe( + charm.on[self.relation_name].relation_changed, self._on_relation_changed + ) self.framework.observe( charm.on[self.relation_name].relation_departed, self._on_relation_departed ) @@ -372,6 +375,7 @@ def update_certs(self, relation_id, ca_chain=None): def _on_relation_changed(self, event: RelationChangedEvent) -> None: if not self.unit.is_leader(): return + if self.opensearch.is_node_up(): self.update_endpoints(event.relation) else: diff --git a/lib/charms/opensearch/v0/opensearch_secrets.py b/lib/charms/opensearch/v0/opensearch_secrets.py index d3df4b59e..11fd5d7e3 100644 --- a/lib/charms/opensearch/v0/opensearch_secrets.py +++ b/lib/charms/opensearch/v0/opensearch_secrets.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """This class is to handle Juju3 Secrets. diff --git a/lib/charms/opensearch/v0/opensearch_tls.py b/lib/charms/opensearch/v0/opensearch_tls.py index e887d819c..a6bdc91d1 100644 --- a/lib/charms/opensearch/v0/opensearch_tls.py +++ b/lib/charms/opensearch/v0/opensearch_tls.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """In this class we manage certificates relation. @@ -18,7 +18,7 @@ import re import socket import typing -from typing import Dict, List, Optional, Tuple +from typing import Dict, List, Optional, Tuple, Union from charms.opensearch.v0.constants_tls import TLS_RELATION, CertType from charms.opensearch.v0.helper_networking import get_host_public_ip @@ -28,6 +28,7 @@ from charms.tls_certificates_interface.v3.tls_certificates import ( CertificateAvailableEvent, CertificateExpiringEvent, + CertificateInvalidatedEvent, TLSCertificatesRequiresV3, generate_csr, generate_private_key, @@ -74,6 +75,9 @@ def __init__(self, charm: "OpenSearchBaseCharm", peer_relation: str): self.framework.observe(self.certs.on.certificate_available, self._on_certificate_available) self.framework.observe(self.certs.on.certificate_expiring, self._on_certificate_expiring) + self.framework.observe( + self.certs.on.certificate_invalidated, self._on_certificate_invalidated + ) def _on_set_tls_private_key(self, event: ActionEvent) -> None: """Set the TLS private key, which will be used for requesting the certificate.""" @@ -186,7 +190,9 @@ def _on_certificate_available(self, event: CertificateAvailableEvent) -> None: logger.exception(e) event.defer() - def _on_certificate_expiring(self, event: CertificateExpiringEvent) -> None: + def _on_certificate_expiring( + self, event: Union[CertificateExpiringEvent, CertificateInvalidatedEvent] + ) -> None: """Request the new certificate when old certificate is expiring.""" self.charm.peers_data.delete(Scope.UNIT, "tls_configured") try: @@ -198,6 +204,11 @@ def _on_certificate_expiring(self, event: CertificateExpiringEvent) -> None: self._request_certificate_renewal(scope, cert_type, secrets) + def _on_certificate_invalidated(self, event: CertificateInvalidatedEvent) -> None: + """Handle a cert that was revoked or has expired""" + logger.debug(f"Received certificate invalidation. Reason: {event.reason}") + self._on_certificate_expiring(event) + def _request_certificate( self, scope: Scope, diff --git a/lib/charms/opensearch/v0/opensearch_users.py b/lib/charms/opensearch/v0/opensearch_users.py index 7950a442f..9e2b6cb73 100644 --- a/lib/charms/opensearch/v0/opensearch_users.py +++ b/lib/charms/opensearch/v0/opensearch_users.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """OpenSearch user helper functions. @@ -178,7 +178,9 @@ def create_user( logger.error(f"Couldn't create user {str(e)}") raise OpenSearchUserMgmtError(e) - if resp.get("status") != "CREATED": + if resp.get("status") != "CREATED" and not ( + resp.get("status") == "OK" and "updated" in resp.get("message") + ): raise OpenSearchUserMgmtError(f"creating user {user_name} failed") return resp diff --git a/lib/charms/operator_libs_linux/v1/snap.py b/lib/charms/operator_libs_linux/v1/snap.py deleted file mode 100644 index 71cdee39c..000000000 --- a/lib/charms/operator_libs_linux/v1/snap.py +++ /dev/null @@ -1,1065 +0,0 @@ -# Copyright 2021 Canonical Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Representations of the system's Snaps, and abstractions around managing them. - -The `snap` module provides convenience methods for listing, installing, refreshing, and removing -Snap packages, in addition to setting and getting configuration options for them. - -In the `snap` module, `SnapCache` creates a dict-like mapping of `Snap` objects at when -instantiated. Installed snaps are fully populated, and available snaps are lazily-loaded upon -request. This module relies on an installed and running `snapd` daemon to perform operations over -the `snapd` HTTP API. - -`SnapCache` objects can be used to install or modify Snap packages by name in a manner similar to -using the `snap` command from the commandline. - -An example of adding Juju to the system with `SnapCache` and setting a config value: - -```python -try: - cache = snap.SnapCache() - juju = cache["juju"] - - if not juju.present: - juju.ensure(snap.SnapState.Latest, channel="beta") - juju.set({"some.key": "value", "some.key2": "value2"}) -except snap.SnapError as e: - logger.error("An exception occurred when installing charmcraft. Reason: %s", e.message) -``` - -In addition, the `snap` module provides "bare" methods which can act on Snap packages as -simple function calls. :meth:`add`, :meth:`remove`, and :meth:`ensure` are provided, as -well as :meth:`add_local` for installing directly from a local `.snap` file. These return -`Snap` objects. - -As an example of installing several Snaps and checking details: - -```python -try: - nextcloud, charmcraft = snap.add(["nextcloud", "charmcraft"]) - if nextcloud.get("mode") != "production": - nextcloud.set({"mode": "production"}) -except snap.SnapError as e: - logger.error("An exception occurred when installing snaps. Reason: %s" % e.message) -``` -""" - -import http.client -import json -import logging -import os -import re -import socket -import subprocess -import sys -import urllib.error -import urllib.parse -import urllib.request -from collections.abc import Mapping -from datetime import datetime, timedelta, timezone -from enum import Enum -from subprocess import CalledProcessError, CompletedProcess -from typing import Any, Dict, Iterable, List, Optional, Union - -logger = logging.getLogger(__name__) - -# The unique Charmhub library identifier, never change it -LIBID = "05394e5893f94f2d90feb7cbe6b633cd" - -# Increment this major API version when introducing breaking changes -LIBAPI = 1 - -# Increment this PATCH version before using `charmcraft publish-lib` or reset -# to 0 if you are raising the major API version -LIBPATCH = 12 - - -# Regex to locate 7-bit C1 ANSI sequences -ansi_filter = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") - - -def _cache_init(func): - def inner(*args, **kwargs): - if _Cache.cache is None: - _Cache.cache = SnapCache() - return func(*args, **kwargs) - - return inner - - -# recursive hints seems to error out pytest -JSONType = Union[Dict[str, Any], List[Any], str, int, float] - - -class SnapService: - """Data wrapper for snap services.""" - - def __init__( - self, - daemon: Optional[str] = None, - daemon_scope: Optional[str] = None, - enabled: bool = False, - active: bool = False, - activators: List[str] = [], - **kwargs, - ): - self.daemon = daemon - self.daemon_scope = kwargs.get("daemon-scope", None) or daemon_scope - self.enabled = enabled - self.active = active - self.activators = activators - - def as_dict(self) -> Dict: - """Return instance representation as dict.""" - return { - "daemon": self.daemon, - "daemon_scope": self.daemon_scope, - "enabled": self.enabled, - "active": self.active, - "activators": self.activators, - } - - -class MetaCache(type): - """MetaCache class used for initialising the snap cache.""" - - @property - def cache(cls) -> "SnapCache": - """Property for returning the snap cache.""" - return cls._cache - - @cache.setter - def cache(cls, cache: "SnapCache") -> None: - """Setter for the snap cache.""" - cls._cache = cache - - def __getitem__(cls, name) -> "Snap": - """Snap cache getter.""" - return cls._cache[name] - - -class _Cache(object, metaclass=MetaCache): - _cache = None - - -class Error(Exception): - """Base class of most errors raised by this library.""" - - def __repr__(self): - """Represent the Error class.""" - return "<{}.{} {}>".format(type(self).__module__, type(self).__name__, self.args) - - @property - def name(self): - """Return a string representation of the model plus class.""" - return "<{}.{}>".format(type(self).__module__, type(self).__name__) - - @property - def message(self): - """Return the message passed as an argument.""" - return self.args[0] - - -class SnapAPIError(Error): - """Raised when an HTTP API error occurs talking to the Snapd server.""" - - def __init__(self, body: Dict, code: int, status: str, message: str): - super().__init__(message) # Makes str(e) return message - self.body = body - self.code = code - self.status = status - self._message = message - - def __repr__(self): - """Represent the SnapAPIError class.""" - return "APIError({!r}, {!r}, {!r}, {!r})".format( - self.body, self.code, self.status, self._message - ) - - -class SnapState(Enum): - """The state of a snap on the system or in the cache.""" - - Present = "present" - Absent = "absent" - Latest = "latest" - Available = "available" - - -class SnapError(Error): - """Raised when there's an error running snap control commands.""" - - -class SnapNotFoundError(Error): - """Raised when a requested snap is not known to the system.""" - - -class Snap(object): - """Represents a snap package and its properties. - - `Snap` exposes the following properties about a snap: - - name: the name of the snap - - state: a `SnapState` representation of its install status - - channel: "stable", "candidate", "beta", and "edge" are common - - revision: a string representing the snap's revision - - confinement: "classic" or "strict" - """ - - def __init__( - self, - name, - state: SnapState, - channel: str, - revision: int, - confinement: str, - apps: Optional[List[Dict[str, str]]] = None, - cohort: Optional[str] = "", - ) -> None: - self._name = name - self._state = state - self._channel = channel - self._revision = revision - self._confinement = confinement - self._cohort = cohort - self._apps = apps or [] - self._snap_client = SnapClient() - - def __eq__(self, other) -> bool: - """Equality for comparison.""" - return isinstance(other, self.__class__) and ( - self._name, - self._revision, - ) == (other._name, other._revision) - - def __hash__(self): - """Calculate a hash for this snap.""" - return hash((self._name, self._revision)) - - def __repr__(self): - """Represent the object such that it can be reconstructed.""" - return "<{}.{}: {}>".format(self.__module__, self.__class__.__name__, self.__dict__) - - def __str__(self): - """Represent the snap object as a string.""" - return "<{}: {}-{}.{} -- {}>".format( - self.__class__.__name__, - self._name, - self._revision, - self._channel, - str(self._state), - ) - - def _snap(self, command: str, optargs: Optional[Iterable[str]] = None) -> str: - """Perform a snap operation. - - Args: - command: the snap command to execute - optargs: an (optional) list of additional arguments to pass, - commonly confinement or channel - - Raises: - SnapError if there is a problem encountered - """ - optargs = optargs or [] - _cmd = ["snap", command, self._name, *optargs] - try: - return subprocess.check_output(_cmd, universal_newlines=True) - except CalledProcessError as e: - raise SnapError( - "Snap: {!r}; command {!r} failed with output = {!r}".format( - self._name, _cmd, e.output - ) - ) - - def _snap_daemons( - self, - command: List[str], - services: Optional[List[str]] = None, - ) -> CompletedProcess: - """Perform snap app commands. - - Args: - command: the snap command to execute - services: the snap service to execute command on - - Raises: - SnapError if there is a problem encountered - """ - if services: - # an attempt to keep the command constrained to the snap instance's services - services = ["{}.{}".format(self._name, service) for service in services] - else: - services = [self._name] - - _cmd = ["snap", *command, *services] - - try: - return subprocess.run(_cmd, universal_newlines=True, check=True, capture_output=True) - except CalledProcessError as e: - raise SnapError("Could not {} for snap [{}]: {}".format(_cmd, self._name, e.stderr)) - - def get(self, key) -> str: - """Fetch a snap configuration value. - - Args: - key: the key to retrieve - """ - return self._snap("get", [key]).strip() - - def set(self, config: Dict) -> str: - """Set a snap configuration value. - - Args: - config: a dictionary containing keys and values specifying the config to set. - """ - args = ['{}="{}"'.format(key, val) for key, val in config.items()] - - return self._snap("set", [*args]) - - def unset(self, key) -> str: - """Unset a snap configuration value. - - Args: - key: the key to unset - """ - return self._snap("unset", [key]) - - def start(self, services: Optional[List[str]] = None, enable: Optional[bool] = False) -> None: - """Start a snap's services. - - Args: - services (list): (optional) list of individual snap services to start (otherwise all) - enable (bool): (optional) flag to enable snap services on start. Default `false` - """ - args = ["start", "--enable"] if enable else ["start"] - self._snap_daemons(args, services) - - def stop(self, services: Optional[List[str]] = None, disable: Optional[bool] = False) -> None: - """Stop a snap's services. - - Args: - services (list): (optional) list of individual snap services to stop (otherwise all) - disable (bool): (optional) flag to disable snap services on stop. Default `False` - """ - args = ["stop", "--disable"] if disable else ["stop"] - self._snap_daemons(args, services) - - def logs(self, services: Optional[List[str]] = None, num_lines: Optional[int] = 10) -> str: - """Fetch a snap services' logs. - - Args: - services (list): (optional) list of individual snap services to show logs from - (otherwise all) - num_lines (int): (optional) integer number of log lines to return. Default `10` - """ - args = ["logs", "-n={}".format(num_lines)] if num_lines else ["logs"] - return self._snap_daemons(args, services).stdout - - def connect( - self, plug: str, service: Optional[str] = None, slot: Optional[str] = None - ) -> None: - """Connect a plug to a slot. - - Args: - plug (str): the plug to connect - service (str): (optional) the snap service name to plug into - slot (str): (optional) the snap service slot to plug in to - - Raises: - SnapError if there is a problem encountered - """ - command = ["connect", "{}:{}".format(self._name, plug)] - - if service and slot: - command = command + ["{}:{}".format(service, slot)] - elif slot: - command = command + [slot] - - _cmd = ["snap", *command] - try: - subprocess.run(_cmd, universal_newlines=True, check=True, capture_output=True) - except CalledProcessError as e: - raise SnapError("Could not {} for snap [{}]: {}".format(_cmd, self._name, e.stderr)) - - def hold(self, duration: Optional[timedelta] = None) -> None: - """Add a refresh hold to a snap. - - Args: - duration: duration for the hold, or None (the default) to hold this snap indefinitely. - """ - hold_str = "forever" - if duration is not None: - seconds = round(duration.total_seconds()) - hold_str = f"{seconds}s" - self._snap("refresh", [f"--hold={hold_str}"]) - - def unhold(self) -> None: - """Remove the refresh hold of a snap.""" - self._snap("refresh", ["--unhold"]) - - def restart( - self, services: Optional[List[str]] = None, reload: Optional[bool] = False - ) -> None: - """Restarts a snap's services. - - Args: - services (list): (optional) list of individual snap services to show logs from. - (otherwise all) - reload (bool): (optional) flag to use the service reload command, if available. - Default `False` - """ - args = ["restart", "--reload"] if reload else ["restart"] - self._snap_daemons(args, services) - - def _install( - self, - channel: Optional[str] = "", - cohort: Optional[str] = "", - revision: Optional[int] = None, - ) -> None: - """Add a snap to the system. - - Args: - channel: the channel to install from - cohort: optional, the key of a cohort that this snap belongs to - revision: optional, the revision of the snap to install - """ - cohort = cohort or self._cohort - - args = [] - if self.confinement == "classic": - args.append("--classic") - if channel: - args.append('--channel="{}"'.format(channel)) - if revision: - args.append('--revision="{}"'.format(revision)) - if cohort: - args.append('--cohort="{}"'.format(cohort)) - - self._snap("install", args) - - def _refresh( - self, - channel: Optional[str] = "", - cohort: Optional[str] = "", - revision: Optional[int] = None, - leave_cohort: Optional[bool] = False, - ) -> None: - """Refresh a snap. - - Args: - channel: the channel to install from - cohort: optionally, specify a cohort. - revision: optionally, specify the revision of the snap to refresh - leave_cohort: leave the current cohort. - """ - args = [] - if channel: - args.append('--channel="{}"'.format(channel)) - - if revision: - args.append('--revision="{}"'.format(revision)) - - if not cohort: - cohort = self._cohort - - if leave_cohort: - self._cohort = "" - args.append("--leave-cohort") - elif cohort: - args.append('--cohort="{}"'.format(cohort)) - - self._snap("refresh", args) - - def _remove(self) -> str: - """Remove a snap from the system.""" - return self._snap("remove") - - @property - def name(self) -> str: - """Returns the name of the snap.""" - return self._name - - def ensure( - self, - state: SnapState, - classic: Optional[bool] = False, - channel: Optional[str] = "", - cohort: Optional[str] = "", - revision: Optional[int] = None, - ): - """Ensure that a snap is in a given state. - - Args: - state: a `SnapState` to reconcile to. - classic: an (Optional) boolean indicating whether classic confinement should be used - channel: the channel to install from - cohort: optional. Specify the key of a snap cohort. - revision: optional. the revision of the snap to install/refresh - - While both channel and revision could be specified, the underlying snap install/refresh - command will determine which one takes precedence (revision at this time) - - Raises: - SnapError if an error is encountered - """ - self._confinement = "classic" if classic or self._confinement == "classic" else "" - - if state not in (SnapState.Present, SnapState.Latest): - # We are attempting to remove this snap. - if self._state in (SnapState.Present, SnapState.Latest): - # The snap is installed, so we run _remove. - self._remove() - else: - # The snap is not installed -- no need to do anything. - pass - else: - # We are installing or refreshing a snap. - if self._state not in (SnapState.Present, SnapState.Latest): - # The snap is not installed, so we install it. - self._install(channel, cohort, revision) - else: - # The snap is installed, but we are changing it (e.g., switching channels). - self._refresh(channel, cohort, revision) - - self._update_snap_apps() - self._state = state - - def _update_snap_apps(self) -> None: - """Update a snap's apps after snap changes state.""" - try: - self._apps = self._snap_client.get_installed_snap_apps(self._name) - except SnapAPIError: - logger.debug("Unable to retrieve snap apps for {}".format(self._name)) - self._apps = [] - - @property - def present(self) -> bool: - """Report whether or not a snap is present.""" - return self._state in (SnapState.Present, SnapState.Latest) - - @property - def latest(self) -> bool: - """Report whether the snap is the most recent version.""" - return self._state is SnapState.Latest - - @property - def state(self) -> SnapState: - """Report the current snap state.""" - return self._state - - @state.setter - def state(self, state: SnapState) -> None: - """Set the snap state to a given value. - - Args: - state: a `SnapState` to reconcile the snap to. - - Raises: - SnapError if an error is encountered - """ - if self._state is not state: - self.ensure(state) - self._state = state - - @property - def revision(self) -> int: - """Returns the revision for a snap.""" - return self._revision - - @property - def channel(self) -> str: - """Returns the channel for a snap.""" - return self._channel - - @property - def confinement(self) -> str: - """Returns the confinement for a snap.""" - return self._confinement - - @property - def apps(self) -> List: - """Returns (if any) the installed apps of the snap.""" - self._update_snap_apps() - return self._apps - - @property - def services(self) -> Dict: - """Returns (if any) the installed services of the snap.""" - self._update_snap_apps() - services = {} - for app in self._apps: - if "daemon" in app: - services[app["name"]] = SnapService(**app).as_dict() - - return services - - @property - def held(self) -> bool: - """Report whether the snap has a hold.""" - info = self._snap("info") - return "hold:" in info - - -class _UnixSocketConnection(http.client.HTTPConnection): - """Implementation of HTTPConnection that connects to a named Unix socket.""" - - def __init__(self, host, timeout=None, socket_path=None): - if timeout is None: - super().__init__(host) - else: - super().__init__(host, timeout=timeout) - self.socket_path = socket_path - - def connect(self): - """Override connect to use Unix socket (instead of TCP socket).""" - if not hasattr(socket, "AF_UNIX"): - raise NotImplementedError("Unix sockets not supported on {}".format(sys.platform)) - self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - self.sock.connect(self.socket_path) - if self.timeout is not None: - self.sock.settimeout(self.timeout) - - -class _UnixSocketHandler(urllib.request.AbstractHTTPHandler): - """Implementation of HTTPHandler that uses a named Unix socket.""" - - def __init__(self, socket_path: str): - super().__init__() - self.socket_path = socket_path - - def http_open(self, req) -> http.client.HTTPResponse: - """Override http_open to use a Unix socket connection (instead of TCP).""" - return self.do_open(_UnixSocketConnection, req, socket_path=self.socket_path) - - -class SnapClient: - """Snapd API client to talk to HTTP over UNIX sockets. - - In order to avoid shelling out and/or involving sudo in calling the snapd API, - use a wrapper based on the Pebble Client, trimmed down to only the utility methods - needed for talking to snapd. - """ - - def __init__( - self, - socket_path: str = "/run/snapd.socket", - opener: Optional[urllib.request.OpenerDirector] = None, - base_url: str = "http://localhost/v2/", - timeout: float = 5.0, - ): - """Initialize a client instance. - - Args: - socket_path: a path to the socket on the filesystem. Defaults to /run/snap/snapd.socket - opener: specifies an opener for unix socket, if unspecified a default is used - base_url: base url for making requests to the snap client. Defaults to - http://localhost/v2/ - timeout: timeout in seconds to use when making requests to the API. Default is 5.0s. - """ - if opener is None: - opener = self._get_default_opener(socket_path) - self.opener = opener - self.base_url = base_url - self.timeout = timeout - - @classmethod - def _get_default_opener(cls, socket_path): - """Build the default opener to use for requests (HTTP over Unix socket).""" - opener = urllib.request.OpenerDirector() - opener.add_handler(_UnixSocketHandler(socket_path)) - opener.add_handler(urllib.request.HTTPDefaultErrorHandler()) - opener.add_handler(urllib.request.HTTPRedirectHandler()) - opener.add_handler(urllib.request.HTTPErrorProcessor()) - return opener - - def _request( - self, - method: str, - path: str, - query: Dict = None, - body: Dict = None, - ) -> JSONType: - """Make a JSON request to the Snapd server with the given HTTP method and path. - - If query dict is provided, it is encoded and appended as a query string - to the URL. If body dict is provided, it is serialied as JSON and used - as the HTTP body (with Content-Type: "application/json"). The resulting - body is decoded from JSON. - """ - headers = {"Accept": "application/json"} - data = None - if body is not None: - data = json.dumps(body).encode("utf-8") - headers["Content-Type"] = "application/json" - - response = self._request_raw(method, path, query, headers, data) - return json.loads(response.read().decode())["result"] - - def _request_raw( - self, - method: str, - path: str, - query: Dict = None, - headers: Dict = None, - data: bytes = None, - ) -> http.client.HTTPResponse: - """Make a request to the Snapd server; return the raw HTTPResponse object.""" - url = self.base_url + path - if query: - url = url + "?" + urllib.parse.urlencode(query) - - if headers is None: - headers = {} - request = urllib.request.Request(url, method=method, data=data, headers=headers) - - try: - response = self.opener.open(request, timeout=self.timeout) - except urllib.error.HTTPError as e: - code = e.code - status = e.reason - message = "" - try: - body = json.loads(e.read().decode())["result"] - except (IOError, ValueError, KeyError) as e2: - # Will only happen on read error or if Pebble sends invalid JSON. - body = {} - message = "{} - {}".format(type(e2).__name__, e2) - raise SnapAPIError(body, code, status, message) - except urllib.error.URLError as e: - raise SnapAPIError({}, 500, "Not found", e.reason) - return response - - def get_installed_snaps(self) -> Dict: - """Get information about currently installed snaps.""" - return self._request("GET", "snaps") - - def get_snap_information(self, name: str) -> Dict: - """Query the snap server for information about single snap.""" - return self._request("GET", "find", {"name": name})[0] - - def get_installed_snap_apps(self, name: str) -> List: - """Query the snap server for apps belonging to a named, currently installed snap.""" - return self._request("GET", "apps", {"names": name, "select": "service"}) - - -class SnapCache(Mapping): - """An abstraction to represent installed/available packages. - - When instantiated, `SnapCache` iterates through the list of installed - snaps using the `snapd` HTTP API, and a list of available snaps by reading - the filesystem to populate the cache. Information about available snaps is lazily-loaded - from the `snapd` API when requested. - """ - - def __init__(self): - if not self.snapd_installed: - raise SnapError("snapd is not installed or not in /usr/bin") from None - self._snap_client = SnapClient() - self._snap_map = {} - if self.snapd_installed: - self._load_available_snaps() - self._load_installed_snaps() - - def __contains__(self, key: str) -> bool: - """Check if a given snap is in the cache.""" - return key in self._snap_map - - def __len__(self) -> int: - """Report number of items in the snap cache.""" - return len(self._snap_map) - - def __iter__(self) -> Iterable["Snap"]: - """Provide iterator for the snap cache.""" - return iter(self._snap_map.values()) - - def __getitem__(self, snap_name: str) -> Snap: - """Return either the installed version or latest version for a given snap.""" - snap = self._snap_map.get(snap_name, None) - if snap is None: - # The snapd cache file may not have existed when _snap_map was - # populated. This is normal. - try: - self._snap_map[snap_name] = self._load_info(snap_name) - except SnapAPIError: - raise SnapNotFoundError("Snap '{}' not found!".format(snap_name)) - - return self._snap_map[snap_name] - - @property - def snapd_installed(self) -> bool: - """Check whether snapd has been installled on the system.""" - return os.path.isfile("/usr/bin/snap") - - def _load_available_snaps(self) -> None: - """Load the list of available snaps from disk. - - Leave them empty and lazily load later if asked for. - """ - if not os.path.isfile("/var/cache/snapd/names"): - # The snap catalog may not be populated yet; this is normal. - # snapd updates the cache infrequently and the cache file may not - # currently exist. - return - - with open("/var/cache/snapd/names", "r") as f: - for line in f: - if line.strip(): - self._snap_map[line.strip()] = None - - def _load_installed_snaps(self) -> None: - """Load the installed snaps into the dict.""" - installed = self._snap_client.get_installed_snaps() - - for i in installed: - snap = Snap( - name=i["name"], - state=SnapState.Latest, - channel=i["channel"], - revision=int(i["revision"]), - confinement=i["confinement"], - apps=i.get("apps", None), - ) - self._snap_map[snap.name] = snap - - def _load_info(self, name) -> Snap: - """Load info for snaps which are not installed if requested. - - Args: - name: a string representing the name of the snap - """ - info = self._snap_client.get_snap_information(name) - - return Snap( - name=info["name"], - state=SnapState.Available, - channel=info["channel"], - revision=int(info["revision"]), - confinement=info["confinement"], - apps=None, - ) - - -@_cache_init -def add( - snap_names: Union[str, List[str]], - state: Union[str, SnapState] = SnapState.Latest, - channel: Optional[str] = "", - classic: Optional[bool] = False, - cohort: Optional[str] = "", - revision: Optional[int] = None, -) -> Union[Snap, List[Snap]]: - """Add a snap to the system. - - Args: - snap_names: the name or names of the snaps to install - state: a string or `SnapState` representation of the desired state, one of - [`Present` or `Latest`] - channel: an (Optional) channel as a string. Defaults to 'latest' - classic: an (Optional) boolean specifying whether it should be added with classic - confinement. Default `False` - cohort: an (Optional) string specifying the snap cohort to use - revision: an (Optional) integer specifying the snap revision to use - - Raises: - SnapError if some snaps failed to install or were not found. - """ - if not channel and not revision: - channel = "latest" - - snap_names = [snap_names] if type(snap_names) is str else snap_names - if not snap_names: - raise TypeError("Expected at least one snap to add, received zero!") - - if type(state) is str: - state = SnapState(state) - - return _wrap_snap_operations(snap_names, state, channel, classic, cohort, revision) - - -@_cache_init -def remove(snap_names: Union[str, List[str]]) -> Union[Snap, List[Snap]]: - """Remove specified snap(s) from the system. - - Args: - snap_names: the name or names of the snaps to install - - Raises: - SnapError if some snaps failed to install. - """ - snap_names = [snap_names] if type(snap_names) is str else snap_names - if not snap_names: - raise TypeError("Expected at least one snap to add, received zero!") - - return _wrap_snap_operations(snap_names, SnapState.Absent, "", False) - - -@_cache_init -def ensure( - snap_names: Union[str, List[str]], - state: str, - channel: Optional[str] = "", - classic: Optional[bool] = False, - cohort: Optional[str] = "", - revision: Optional[int] = None, -) -> Union[Snap, List[Snap]]: - """Ensure specified snaps are in a given state on the system. - - Args: - snap_names: the name(s) of the snaps to operate on - state: a string representation of the desired state, from `SnapState` - channel: an (Optional) channel as a string. Defaults to 'latest' - classic: an (Optional) boolean specifying whether it should be added with classic - confinement. Default `False` - cohort: an (Optional) string specifying the snap cohort to use - revision: an (Optional) integer specifying the snap revision to use - - When both channel and revision are specified, the underlying snap install/refresh - command will determine the precedence (revision at the time of adding this) - - Raises: - SnapError if the snap is not in the cache. - """ - if not revision and not channel: - channel = "latest" - - if state in ("present", "latest") or revision: - return add(snap_names, SnapState(state), channel, classic, cohort, revision) - else: - return remove(snap_names) - - -def _wrap_snap_operations( - snap_names: List[str], - state: SnapState, - channel: str, - classic: bool, - cohort: Optional[str] = "", - revision: Optional[int] = None, -) -> Union[Snap, List[Snap]]: - """Wrap common operations for bare commands.""" - snaps = {"success": [], "failed": []} - - op = "remove" if state is SnapState.Absent else "install or refresh" - - for s in snap_names: - try: - snap = _Cache[s] - if state is SnapState.Absent: - snap.ensure(state=SnapState.Absent) - else: - snap.ensure( - state=state, classic=classic, channel=channel, cohort=cohort, revision=revision - ) - snaps["success"].append(snap) - except SnapError as e: - logger.warning("Failed to {} snap {}: {}!".format(op, s, e.message)) - snaps["failed"].append(s) - except SnapNotFoundError: - logger.warning("Snap '{}' not found in cache!".format(s)) - snaps["failed"].append(s) - - if len(snaps["failed"]): - raise SnapError( - "Failed to install or refresh snap(s): {}".format(", ".join(list(snaps["failed"]))) - ) - - return snaps["success"] if len(snaps["success"]) > 1 else snaps["success"][0] - - -def install_local( - filename: str, classic: Optional[bool] = False, dangerous: Optional[bool] = False -) -> Snap: - """Perform a snap operation. - - Args: - filename: the path to a local .snap file to install - classic: whether to use classic confinement - dangerous: whether --dangerous should be passed to install snaps without a signature - - Raises: - SnapError if there is a problem encountered - """ - _cmd = [ - "snap", - "install", - filename, - ] - if classic: - _cmd.append("--classic") - if dangerous: - _cmd.append("--dangerous") - try: - result = subprocess.check_output(_cmd, universal_newlines=True).splitlines()[-1] - snap_name, _ = result.split(" ", 1) - snap_name = ansi_filter.sub("", snap_name) - - c = SnapCache() - - try: - return c[snap_name] - except SnapAPIError as e: - logger.error( - "Could not find snap {} when querying Snapd socket: {}".format(snap_name, e.body) - ) - raise SnapError("Failed to find snap {} in Snap cache".format(snap_name)) - except CalledProcessError as e: - raise SnapError("Could not install snap {}: {}".format(filename, e.output)) - - -def _system_set(config_item: str, value: str) -> None: - """Set system snapd config values. - - Args: - config_item: name of snap system setting. E.g. 'refresh.hold' - value: value to assign - """ - _cmd = ["snap", "set", "system", "{}={}".format(config_item, value)] - try: - subprocess.check_call(_cmd, universal_newlines=True) - except CalledProcessError: - raise SnapError("Failed setting system config '{}' to '{}'".format(config_item, value)) - - -def hold_refresh(days: int = 90, forever: bool = False) -> bool: - """Set the system-wide snap refresh hold. - - Args: - days: number of days to hold system refreshes for. Maximum 90. Set to zero to remove hold. - forever: if True, will set a hold forever. - """ - if not isinstance(forever, bool): - raise TypeError("forever must be a bool") - if not isinstance(days, int): - raise TypeError("days must be an int") - if forever: - _system_set("refresh.hold", "forever") - logger.info("Set system-wide snap refresh hold to: forever") - elif days == 0: - _system_set("refresh.hold", "") - logger.info("Removed system-wide snap refresh hold") - else: - # Currently the snap daemon can only hold for a maximum of 90 days - if not 1 <= days <= 90: - raise ValueError("days must be between 1 and 90") - # Add the number of days to current time - target_date = datetime.now(timezone.utc).astimezone() + timedelta(days=days) - # Format for the correct datetime format - hold_date = target_date.strftime("%Y-%m-%dT%H:%M:%S%z") - # Python dumps the offset in format '+0100', we need '+01:00' - hold_date = "{0}:{1}".format(hold_date[:-2], hold_date[-2:]) - # Actually set the hold date - _system_set("refresh.hold", hold_date) - logger.info("Set system-wide snap refresh hold to: %s", hold_date) diff --git a/lib/charms/operator_libs_linux/v2/snap.py b/lib/charms/operator_libs_linux/v2/snap.py index ef426775d..6d4dc385a 100644 --- a/lib/charms/operator_libs_linux/v2/snap.py +++ b/lib/charms/operator_libs_linux/v2/snap.py @@ -83,7 +83,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 5 +LIBPATCH = 6 # Regex to locate 7-bit C1 ANSI sequences @@ -584,13 +584,16 @@ def ensure( "Installing snap %s, revision %s, tracking %s", self._name, revision, channel ) self._install(channel, cohort, revision) - else: + logger.info("The snap installation completed successfully") + elif revision is None or revision != self._revision: # The snap is installed, but we are changing it (e.g., switching channels). logger.info( "Refreshing snap %s, revision %s, tracking %s", self._name, revision, channel ) self._refresh(channel=channel, cohort=cohort, revision=revision, devmode=devmode) - logger.info("The snap installation completed successfully") + logger.info("The snap refresh completed successfully") + else: + logger.info("Refresh of snap %s was unnecessary", self._name) self._update_snap_apps() self._state = state diff --git a/lib/charms/tls_certificates_interface/v3/tls_certificates.py b/lib/charms/tls_certificates_interface/v3/tls_certificates.py index cbdd80d19..2e45475a5 100644 --- a/lib/charms/tls_certificates_interface/v3/tls_certificates.py +++ b/lib/charms/tls_certificates_interface/v3/tls_certificates.py @@ -111,6 +111,7 @@ def _on_certificate_request(self, event: CertificateCreationRequestEvent) -> Non ca=ca_certificate, chain=[ca_certificate, certificate], relation_id=event.relation_id, + recommended_expiry_notification_time=720, ) def _on_certificate_revocation_request(self, event: CertificateRevocationRequestEvent) -> None: @@ -316,7 +317,7 @@ def _on_all_certificates_invalidated(self, event: AllCertificatesInvalidatedEven # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 10 +LIBPATCH = 14 PYDEPS = ["cryptography", "jsonschema"] @@ -453,11 +454,35 @@ class ProviderCertificate: ca: str chain: List[str] revoked: bool + expiry_time: datetime + expiry_notification_time: Optional[datetime] = None def chain_as_pem(self) -> str: """Return full certificate chain as a PEM string.""" return "\n\n".join(reversed(self.chain)) + def to_json(self) -> str: + """Return the object as a JSON string. + + Returns: + str: JSON representation of the object + """ + return json.dumps( + { + "relation_id": self.relation_id, + "application_name": self.application_name, + "csr": self.csr, + "certificate": self.certificate, + "ca": self.ca, + "chain": self.chain, + "revoked": self.revoked, + "expiry_time": self.expiry_time.isoformat(), + "expiry_notification_time": self.expiry_notification_time.isoformat() + if self.expiry_notification_time + else None, + } + ) + class CertificateAvailableEvent(EventBase): """Charm Event triggered when a TLS certificate is available.""" @@ -682,21 +707,49 @@ def _get_closest_future_time( ) -def _get_certificate_expiry_time(certificate: str) -> Optional[datetime]: - """Extract expiry time from a certificate string. +def calculate_expiry_notification_time( + validity_start_time: datetime, + expiry_time: datetime, + provider_recommended_notification_time: Optional[int], + requirer_recommended_notification_time: Optional[int], +) -> datetime: + """Calculate a reasonable time to notify the user about the certificate expiry. + + It takes into account the time recommended by the provider and by the requirer. + Time recommended by the provider is preferred, + then time recommended by the requirer, + then dynamically calculated time. Args: - certificate (str): x509 certificate as a string + validity_start_time: Certificate validity time + expiry_time: Certificate expiry time + provider_recommended_notification_time: + Time in hours prior to expiry to notify the user. + Recommended by the provider. + requirer_recommended_notification_time: + Time in hours prior to expiry to notify the user. + Recommended by the requirer. Returns: - Optional[datetime]: Expiry datetime or None + datetime: Time to notify the user about the certificate expiry. """ - try: - certificate_object = x509.load_pem_x509_certificate(data=certificate.encode()) - return certificate_object.not_valid_after_utc - except ValueError: - logger.warning("Could not load certificate.") - return None + if provider_recommended_notification_time is not None: + provider_recommended_notification_time = abs(provider_recommended_notification_time) + provider_recommendation_time_delta = ( + expiry_time - timedelta(hours=provider_recommended_notification_time) + ) + if validity_start_time < provider_recommendation_time_delta: + return provider_recommendation_time_delta + + if requirer_recommended_notification_time is not None: + requirer_recommended_notification_time = abs(requirer_recommended_notification_time) + requirer_recommendation_time_delta = ( + expiry_time - timedelta(hours=requirer_recommended_notification_time) + ) + if validity_start_time < requirer_recommendation_time_delta: + return requirer_recommendation_time_delta + calculated_hours = (expiry_time - validity_start_time).total_seconds() / (3600 * 3) + return expiry_time - timedelta(hours=calculated_hours) def generate_ca( @@ -965,6 +1018,8 @@ def generate_csr( # noqa: C901 organization: Optional[str] = None, email_address: Optional[str] = None, country_name: Optional[str] = None, + state_or_province_name: Optional[str] = None, + locality_name: Optional[str] = None, private_key_password: Optional[bytes] = None, sans: Optional[List[str]] = None, sans_oid: Optional[List[str]] = None, @@ -983,6 +1038,8 @@ def generate_csr( # noqa: C901 organization (str): Name of organization. email_address (str): Email address. country_name (str): Country Name. + state_or_province_name (str): State or Province Name. + locality_name (str): Locality Name. private_key_password (bytes): Private key password sans (list): Use sans_dns - this will be deprecated in a future release List of DNS subject alternative names (keeping it for now for backward compatibility) @@ -1008,6 +1065,12 @@ def generate_csr( # noqa: C901 subject_name.append(x509.NameAttribute(x509.NameOID.EMAIL_ADDRESS, email_address)) if country_name: subject_name.append(x509.NameAttribute(x509.NameOID.COUNTRY_NAME, country_name)) + if state_or_province_name: + subject_name.append( + x509.NameAttribute(x509.NameOID.STATE_OR_PROVINCE_NAME, state_or_province_name) + ) + if locality_name: + subject_name.append(x509.NameAttribute(x509.NameOID.LOCALITY_NAME, locality_name)) csr = x509.CertificateSigningRequestBuilder(subject_name=x509.Name(subject_name)) _sans: List[x509.GeneralName] = [] @@ -1135,6 +1198,7 @@ def _add_certificate( certificate_signing_request: str, ca: str, chain: List[str], + recommended_expiry_notification_time: Optional[int] = None, ) -> None: """Add certificate to relation data. @@ -1144,6 +1208,8 @@ def _add_certificate( certificate_signing_request (str): Certificate Signing Request ca (str): CA Certificate chain (list): CA Chain + recommended_expiry_notification_time (int): + Time in hours before the certificate expires to notify the user. Returns: None @@ -1161,6 +1227,7 @@ def _add_certificate( "certificate_signing_request": certificate_signing_request, "ca": ca, "chain": chain, + "recommended_expiry_notification_time": recommended_expiry_notification_time, } provider_relation_data = self._load_app_relation_data(relation) provider_certificates = provider_relation_data.get("certificates", []) @@ -1227,6 +1294,7 @@ def set_relation_certificate( ca: str, chain: List[str], relation_id: int, + recommended_expiry_notification_time: Optional[int] = None, ) -> None: """Add certificates to relation data. @@ -1236,6 +1304,8 @@ def set_relation_certificate( ca (str): CA Certificate chain (list): CA Chain relation_id (int): Juju relation ID + recommended_expiry_notification_time (int): + Recommended time in hours before the certificate expires to notify the user. Returns: None @@ -1257,6 +1327,7 @@ def set_relation_certificate( certificate_signing_request=certificate_signing_request.strip(), ca=ca.strip(), chain=[cert.strip() for cert in chain], + recommended_expiry_notification_time=recommended_expiry_notification_time, ) def remove_certificate(self, certificate: str) -> None: @@ -1310,6 +1381,13 @@ def get_provider_certificates( provider_relation_data = self._load_app_relation_data(relation) provider_certificates = provider_relation_data.get("certificates", []) for certificate in provider_certificates: + try: + certificate_object = x509.load_pem_x509_certificate( + data=certificate["certificate"].encode() + ) + except ValueError as e: + logger.error("Could not load certificate - Skipping: %s", e) + continue provider_certificate = ProviderCertificate( relation_id=relation.id, application_name=relation.app.name, @@ -1318,6 +1396,10 @@ def get_provider_certificates( ca=certificate["ca"], chain=certificate["chain"], revoked=certificate.get("revoked", False), + expiry_time=certificate_object.not_valid_after_utc, + expiry_notification_time=certificate.get( + "recommended_expiry_notification_time" + ), ) certificates.append(provider_certificate) return certificates @@ -1475,15 +1557,17 @@ def __init__( self, charm: CharmBase, relationship_name: str, - expiry_notification_time: int = 168, + expiry_notification_time: Optional[int] = None, ): """Generate/use private key and observes relation changed event. Args: charm: Charm object relationship_name: Juju relation name - expiry_notification_time (int): Time difference between now and expiry (in hours). - Used to trigger the CertificateExpiring event. Default: 7 days. + expiry_notification_time (int): Number of hours prior to certificate expiry. + Used to trigger the CertificateExpiring event. + This value is used as a recommendation only, + The actual value is calculated taking into account the provider's recommendation. """ super().__init__(charm, relationship_name) if not JujuVersion.from_environ().has_secrets: @@ -1544,9 +1628,25 @@ def get_provider_certificates(self) -> List[ProviderCertificate]: if not certificate: logger.warning("No certificate found in relation data - Skipping") continue + try: + certificate_object = x509.load_pem_x509_certificate(data=certificate.encode()) + except ValueError as e: + logger.error("Could not load certificate - Skipping: %s", e) + continue ca = provider_certificate_dict.get("ca") chain = provider_certificate_dict.get("chain", []) csr = provider_certificate_dict.get("certificate_signing_request") + recommended_expiry_notification_time = provider_certificate_dict.get( + "recommended_expiry_notification_time" + ) + expiry_time = certificate_object.not_valid_after_utc + validity_start_time = certificate_object.not_valid_before_utc + expiry_notification_time = calculate_expiry_notification_time( + validity_start_time=validity_start_time, + expiry_time=expiry_time, + provider_recommended_notification_time=recommended_expiry_notification_time, + requirer_recommended_notification_time=self.expiry_notification_time, + ) if not csr: logger.warning("No CSR found in relation data - Skipping") continue @@ -1559,6 +1659,8 @@ def get_provider_certificates(self) -> List[ProviderCertificate]: ca=ca, chain=chain, revoked=revoked, + expiry_time=expiry_time, + expiry_notification_time=expiry_notification_time, ) provider_certificates.append(provider_certificate) return provider_certificates @@ -1708,13 +1810,9 @@ def get_expiring_certificates(self) -> List[ProviderCertificate]: expiring_certificates: List[ProviderCertificate] = [] for requirer_csr in self.get_certificate_signing_requests(fulfilled_only=True): if cert := self._find_certificate_in_relation_data(requirer_csr.csr): - expiry_time = _get_certificate_expiry_time(cert.certificate) - if not expiry_time: + if not cert.expiry_time or not cert.expiry_notification_time: continue - expiry_notification_time = expiry_time - timedelta( - hours=self.expiry_notification_time - ) - if datetime.now(timezone.utc) > expiry_notification_time: + if datetime.now(timezone.utc) > cert.expiry_notification_time: expiring_certificates.append(cert) return expiring_certificates @@ -1776,6 +1874,9 @@ def _on_relation_changed(self, event: RelationChangedEvent) -> None: if certificate.csr in requirer_csrs: if certificate.revoked: with suppress(SecretNotFoundError): + logger.debug( + "Removing secret with label %s", f"{LIBID}-{certificate.csr}" + ) secret = self.model.get_secret(label=f"{LIBID}-{certificate.csr}") secret.remove_all_revisions() self.on.certificate_invalidated.emit( @@ -1787,16 +1888,22 @@ def _on_relation_changed(self, event: RelationChangedEvent) -> None: ) else: try: + logger.debug( + "Setting secret with label %s", f"{LIBID}-{certificate.csr}" + ) secret = self.model.get_secret(label=f"{LIBID}-{certificate.csr}") secret.set_content({"certificate": certificate.certificate}) secret.set_info( - expire=self._get_next_secret_expiry_time(certificate.certificate), + expire=self._get_next_secret_expiry_time(certificate), ) except SecretNotFoundError: + logger.debug( + "Creating new secret with label %s", f"{LIBID}-{certificate.csr}" + ) secret = self.charm.unit.add_secret( {"certificate": certificate.certificate}, label=f"{LIBID}-{certificate.csr}", - expire=self._get_next_secret_expiry_time(certificate.certificate), + expire=self._get_next_secret_expiry_time(certificate), ) self.on.certificate_available.emit( certificate_signing_request=certificate.csr, @@ -1805,7 +1912,7 @@ def _on_relation_changed(self, event: RelationChangedEvent) -> None: chain=certificate.chain, ) - def _get_next_secret_expiry_time(self, certificate: str) -> Optional[datetime]: + def _get_next_secret_expiry_time(self, certificate: ProviderCertificate) -> Optional[datetime]: """Return the expiry time or expiry notification time. Extracts the expiry time from the provided certificate, calculates the @@ -1813,17 +1920,18 @@ def _get_next_secret_expiry_time(self, certificate: str) -> Optional[datetime]: the future. Args: - certificate: x509 certificate + certificate: ProviderCertificate object Returns: Optional[datetime]: None if the certificate expiry time cannot be read, next expiry time otherwise. """ - expiry_time = _get_certificate_expiry_time(certificate) - if not expiry_time: + if not certificate.expiry_time or not certificate.expiry_notification_time: return None - expiry_notification_time = expiry_time - timedelta(hours=self.expiry_notification_time) - return _get_closest_future_time(expiry_notification_time, expiry_time) + return _get_closest_future_time( + certificate.expiry_notification_time, + certificate.expiry_time, + ) def _on_relation_broken(self, event: RelationBrokenEvent) -> None: """Handle Relation Broken Event. @@ -1864,20 +1972,19 @@ def _on_secret_expired(self, event: SecretExpiredEvent) -> None: event.secret.remove_all_revisions() return - expiry_time = _get_certificate_expiry_time(provider_certificate.certificate) - if not expiry_time: + if not provider_certificate.expiry_time: # A secret expired but matching certificate is invalid. Cleaning up event.secret.remove_all_revisions() return - if datetime.now(timezone.utc) < expiry_time: + if datetime.now(timezone.utc) < provider_certificate.expiry_time: logger.warning("Certificate almost expired") self.on.certificate_expiring.emit( certificate=provider_certificate.certificate, - expiry=expiry_time.isoformat(), + expiry=provider_certificate.expiry_time.isoformat(), ) event.secret.set_info( - expire=_get_certificate_expiry_time(provider_certificate.certificate), + expire=provider_certificate.expiry_time, ) else: logger.warning("Certificate is expired") diff --git a/poetry.lock b/poetry.lock index ce90e46fd..c50757079 100644 --- a/poetry.lock +++ b/poetry.lock @@ -39,38 +39,38 @@ tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "p [[package]] name = "bcrypt" -version = "4.1.2" +version = "4.1.3" description = "Modern password hashing for your software and your servers" optional = false python-versions = ">=3.7" files = [ - {file = "bcrypt-4.1.2-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:ac621c093edb28200728a9cca214d7e838529e557027ef0581685909acd28b5e"}, - {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea505c97a5c465ab8c3ba75c0805a102ce526695cd6818c6de3b1a38f6f60da1"}, - {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57fa9442758da926ed33a91644649d3e340a71e2d0a5a8de064fb621fd5a3326"}, - {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:eb3bd3321517916696233b5e0c67fd7d6281f0ef48e66812db35fc963a422a1c"}, - {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6cad43d8c63f34b26aef462b6f5e44fdcf9860b723d2453b5d391258c4c8e966"}, - {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:44290ccc827d3a24604f2c8bcd00d0da349e336e6503656cb8192133e27335e2"}, - {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:732b3920a08eacf12f93e6b04ea276c489f1c8fb49344f564cca2adb663b3e4c"}, - {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:1c28973decf4e0e69cee78c68e30a523be441972c826703bb93099868a8ff5b5"}, - {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b8df79979c5bae07f1db22dcc49cc5bccf08a0380ca5c6f391cbb5790355c0b0"}, - {file = "bcrypt-4.1.2-cp37-abi3-win32.whl", hash = "sha256:fbe188b878313d01b7718390f31528be4010fed1faa798c5a1d0469c9c48c369"}, - {file = "bcrypt-4.1.2-cp37-abi3-win_amd64.whl", hash = "sha256:9800ae5bd5077b13725e2e3934aa3c9c37e49d3ea3d06318010aa40f54c63551"}, - {file = "bcrypt-4.1.2-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:71b8be82bc46cedd61a9f4ccb6c1a493211d031415a34adde3669ee1b0afbb63"}, - {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e3c6642077b0c8092580c819c1684161262b2e30c4f45deb000c38947bf483"}, - {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:387e7e1af9a4dd636b9505a465032f2f5cb8e61ba1120e79a0e1cd0b512f3dfc"}, - {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f70d9c61f9c4ca7d57f3bfe88a5ccf62546ffbadf3681bb1e268d9d2e41c91a7"}, - {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2a298db2a8ab20056120b45e86c00a0a5eb50ec4075b6142db35f593b97cb3fb"}, - {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ba55e40de38a24e2d78d34c2d36d6e864f93e0d79d0b6ce915e4335aa81d01b1"}, - {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:3566a88234e8de2ccae31968127b0ecccbb4cddb629da744165db72b58d88ca4"}, - {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b90e216dc36864ae7132cb151ffe95155a37a14e0de3a8f64b49655dd959ff9c"}, - {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:69057b9fc5093ea1ab00dd24ede891f3e5e65bee040395fb1e66ee196f9c9b4a"}, - {file = "bcrypt-4.1.2-cp39-abi3-win32.whl", hash = "sha256:02d9ef8915f72dd6daaef40e0baeef8a017ce624369f09754baf32bb32dba25f"}, - {file = "bcrypt-4.1.2-cp39-abi3-win_amd64.whl", hash = "sha256:be3ab1071662f6065899fe08428e45c16aa36e28bc42921c4901a191fda6ee42"}, - {file = "bcrypt-4.1.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d75fc8cd0ba23f97bae88a6ec04e9e5351ff3c6ad06f38fe32ba50cbd0d11946"}, - {file = "bcrypt-4.1.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:a97e07e83e3262599434816f631cc4c7ca2aa8e9c072c1b1a7fec2ae809a1d2d"}, - {file = "bcrypt-4.1.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e51c42750b7585cee7892c2614be0d14107fad9581d1738d954a262556dd1aab"}, - {file = "bcrypt-4.1.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba4e4cc26610581a6329b3937e02d319f5ad4b85b074846bf4fef8a8cf51e7bb"}, - {file = "bcrypt-4.1.2.tar.gz", hash = "sha256:33313a1200a3ae90b75587ceac502b048b840fc69e7f7a0905b5f87fac7a1258"}, + {file = "bcrypt-4.1.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:48429c83292b57bf4af6ab75809f8f4daf52aa5d480632e53707805cc1ce9b74"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a8bea4c152b91fd8319fef4c6a790da5c07840421c2b785084989bf8bbb7455"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d3b317050a9a711a5c7214bf04e28333cf528e0ed0ec9a4e55ba628d0f07c1a"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:094fd31e08c2b102a14880ee5b3d09913ecf334cd604af27e1013c76831f7b05"}, + {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:4fb253d65da30d9269e0a6f4b0de32bd657a0208a6f4e43d3e645774fb5457f3"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:193bb49eeeb9c1e2db9ba65d09dc6384edd5608d9d672b4125e9320af9153a15"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8cbb119267068c2581ae38790e0d1fbae65d0725247a930fc9900c285d95725d"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6cac78a8d42f9d120b3987f82252bdbeb7e6e900a5e1ba37f6be6fe4e3848286"}, + {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:01746eb2c4299dd0ae1670234bf77704f581dd72cc180f444bfe74eb80495b64"}, + {file = "bcrypt-4.1.3-cp37-abi3-win32.whl", hash = "sha256:037c5bf7c196a63dcce75545c8874610c600809d5d82c305dd327cd4969995bf"}, + {file = "bcrypt-4.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:8a893d192dfb7c8e883c4576813bf18bb9d59e2cfd88b68b725990f033f1b978"}, + {file = "bcrypt-4.1.3-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d4cf6ef1525f79255ef048b3489602868c47aea61f375377f0d00514fe4a78c"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5698ce5292a4e4b9e5861f7e53b1d89242ad39d54c3da451a93cac17b61921a"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec3c2e1ca3e5c4b9edb94290b356d082b721f3f50758bce7cce11d8a7c89ce84"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3a5be252fef513363fe281bafc596c31b552cf81d04c5085bc5dac29670faa08"}, + {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5f7cd3399fbc4ec290378b541b0cf3d4398e4737a65d0f938c7c0f9d5e686611"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:c4c8d9b3e97209dd7111bf726e79f638ad9224b4691d1c7cfefa571a09b1b2d6"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:31adb9cbb8737a581a843e13df22ffb7c84638342de3708a98d5c986770f2834"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:551b320396e1d05e49cc18dd77d970accd52b322441628aca04801bbd1d52a73"}, + {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6717543d2c110a155e6821ce5670c1f512f602eabb77dba95717ca76af79867d"}, + {file = "bcrypt-4.1.3-cp39-abi3-win32.whl", hash = "sha256:6004f5229b50f8493c49232b8e75726b568535fd300e5039e255d919fc3a07f2"}, + {file = "bcrypt-4.1.3-cp39-abi3-win_amd64.whl", hash = "sha256:2505b54afb074627111b5a8dc9b6ae69d0f01fea65c2fcaea403448c503d3991"}, + {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:cb9c707c10bddaf9e5ba7cdb769f3e889e60b7d4fea22834b261f51ca2b89fed"}, + {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9f8ea645eb94fb6e7bea0cf4ba121c07a3a182ac52876493870033141aa687bc"}, + {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:f44a97780677e7ac0ca393bd7982b19dbbd8d7228c1afe10b128fd9550eef5f1"}, + {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d84702adb8f2798d813b17d8187d27076cca3cd52fe3686bb07a9083930ce650"}, + {file = "bcrypt-4.1.3.tar.gz", hash = "sha256:2ee15dd749f5952fe3f0430d0ff6b74082e159c50332a1413d51b5689cf06623"}, ] [package.extras] @@ -79,33 +79,33 @@ typecheck = ["mypy"] [[package]] name = "black" -version = "23.12.1" +version = "24.4.2" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" files = [ - {file = "black-23.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0aaf6041986767a5e0ce663c7a2f0e9eaf21e6ff87a5f95cbf3675bfd4c41d2"}, - {file = "black-23.12.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c88b3711d12905b74206227109272673edce0cb29f27e1385f33b0163c414bba"}, - {file = "black-23.12.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a920b569dc6b3472513ba6ddea21f440d4b4c699494d2e972a1753cdc25df7b0"}, - {file = "black-23.12.1-cp310-cp310-win_amd64.whl", hash = "sha256:3fa4be75ef2a6b96ea8d92b1587dd8cb3a35c7e3d51f0738ced0781c3aa3a5a3"}, - {file = "black-23.12.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8d4df77958a622f9b5a4c96edb4b8c0034f8434032ab11077ec6c56ae9f384ba"}, - {file = "black-23.12.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:602cfb1196dc692424c70b6507593a2b29aac0547c1be9a1d1365f0d964c353b"}, - {file = "black-23.12.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c4352800f14be5b4864016882cdba10755bd50805c95f728011bcb47a4afd59"}, - {file = "black-23.12.1-cp311-cp311-win_amd64.whl", hash = "sha256:0808494f2b2df923ffc5723ed3c7b096bd76341f6213989759287611e9837d50"}, - {file = "black-23.12.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:25e57fd232a6d6ff3f4478a6fd0580838e47c93c83eaf1ccc92d4faf27112c4e"}, - {file = "black-23.12.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2d9e13db441c509a3763a7a3d9a49ccc1b4e974a47be4e08ade2a228876500ec"}, - {file = "black-23.12.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d1bd9c210f8b109b1762ec9fd36592fdd528485aadb3f5849b2740ef17e674e"}, - {file = "black-23.12.1-cp312-cp312-win_amd64.whl", hash = "sha256:ae76c22bde5cbb6bfd211ec343ded2163bba7883c7bc77f6b756a1049436fbb9"}, - {file = "black-23.12.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1fa88a0f74e50e4487477bc0bb900c6781dbddfdfa32691e780bf854c3b4a47f"}, - {file = "black-23.12.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a4d6a9668e45ad99d2f8ec70d5c8c04ef4f32f648ef39048d010b0689832ec6d"}, - {file = "black-23.12.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b18fb2ae6c4bb63eebe5be6bd869ba2f14fd0259bda7d18a46b764d8fb86298a"}, - {file = "black-23.12.1-cp38-cp38-win_amd64.whl", hash = "sha256:c04b6d9d20e9c13f43eee8ea87d44156b8505ca8a3c878773f68b4e4812a421e"}, - {file = "black-23.12.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e1b38b3135fd4c025c28c55ddfc236b05af657828a8a6abe5deec419a0b7055"}, - {file = "black-23.12.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4f0031eaa7b921db76decd73636ef3a12c942ed367d8c3841a0739412b260a54"}, - {file = "black-23.12.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97e56155c6b737854e60a9ab1c598ff2533d57e7506d97af5481141671abf3ea"}, - {file = "black-23.12.1-cp39-cp39-win_amd64.whl", hash = "sha256:dd15245c8b68fe2b6bd0f32c1556509d11bb33aec9b5d0866dd8e2ed3dba09c2"}, - {file = "black-23.12.1-py3-none-any.whl", hash = "sha256:78baad24af0f033958cad29731e27363183e140962595def56423e626f4bee3e"}, - {file = "black-23.12.1.tar.gz", hash = "sha256:4ce3ef14ebe8d9509188014d96af1c456a910d5b5cbf434a09fef7e024b3d0d5"}, + {file = "black-24.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd1b5a14e417189db4c7b64a6540f31730713d173f0b63e55fabd52d61d8fdce"}, + {file = "black-24.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e537d281831ad0e71007dcdcbe50a71470b978c453fa41ce77186bbe0ed6021"}, + {file = "black-24.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaea3008c281f1038edb473c1aa8ed8143a5535ff18f978a318f10302b254063"}, + {file = "black-24.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7768a0dbf16a39aa5e9a3ded568bb545c8c2727396d063bbaf847df05b08cd96"}, + {file = "black-24.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:257d724c2c9b1660f353b36c802ccece186a30accc7742c176d29c146df6e474"}, + {file = "black-24.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bdde6f877a18f24844e381d45e9947a49e97933573ac9d4345399be37621e26c"}, + {file = "black-24.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e151054aa00bad1f4e1f04919542885f89f5f7d086b8a59e5000e6c616896ffb"}, + {file = "black-24.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:7e122b1c4fb252fd85df3ca93578732b4749d9be076593076ef4d07a0233c3e1"}, + {file = "black-24.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d"}, + {file = "black-24.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04"}, + {file = "black-24.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc"}, + {file = "black-24.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:415e686e87dbbe6f4cd5ef0fbf764af7b89f9057b97c908742b6008cc554b9c0"}, + {file = "black-24.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf10f7310db693bb62692609b397e8d67257c55f949abde4c67f9cc574492cc7"}, + {file = "black-24.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:98e123f1d5cfd42f886624d84464f7756f60ff6eab89ae845210631714f6db94"}, + {file = "black-24.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48a85f2cb5e6799a9ef05347b476cce6c182d6c71ee36925a6c194d074336ef8"}, + {file = "black-24.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:b1530ae42e9d6d5b670a34db49a94115a64596bc77710b1d05e9801e62ca0a7c"}, + {file = "black-24.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:37aae07b029fa0174d39daf02748b379399b909652a806e5708199bd93899da1"}, + {file = "black-24.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:da33a1a5e49c4122ccdfd56cd021ff1ebc4a1ec4e2d01594fef9b6f267a9e741"}, + {file = "black-24.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef703f83fc32e131e9bcc0a5094cfe85599e7109f896fe8bc96cc402f3eb4b6e"}, + {file = "black-24.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:b9176b9832e84308818a99a561e90aa479e73c523b3f77afd07913380ae2eab7"}, + {file = "black-24.4.2-py3-none-any.whl", hash = "sha256:d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c"}, + {file = "black-24.4.2.tar.gz", hash = "sha256:c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d"}, ] [package.dependencies] @@ -125,17 +125,17 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "boto3" -version = "1.34.49" +version = "1.34.112" description = "The AWS SDK for Python" optional = false -python-versions = ">= 3.8" +python-versions = ">=3.8" files = [ - {file = "boto3-1.34.49-py3-none-any.whl", hash = "sha256:ce8d1de03024f52a1810e8d71ad4dba3a5b9bb48b35567191500e3432a9130b4"}, - {file = "boto3-1.34.49.tar.gz", hash = "sha256:96b9dc85ce8d52619b56ca7b1ac1423eaf0af5ce132904bcc8aa81396eec2abf"}, + {file = "boto3-1.34.112-py3-none-any.whl", hash = "sha256:4cf28ce2c19a4e4963f1cb1f9b659a548f840f88af3e2da727b35ceb104f9223"}, + {file = "boto3-1.34.112.tar.gz", hash = "sha256:1092ac6c68acdd33051ed0d2b7cb6f5a4527c5d1535a48cda53f7012accde206"}, ] [package.dependencies] -botocore = ">=1.34.49,<1.35.0" +botocore = ">=1.34.112,<1.35.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.10.0,<0.11.0" @@ -144,32 +144,32 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.34.49" +version = "1.34.112" description = "Low-level, data-driven core of boto 3." optional = false -python-versions = ">= 3.8" +python-versions = ">=3.8" files = [ - {file = "botocore-1.34.49-py3-none-any.whl", hash = "sha256:4ed9d7603a04b5bb5bd5de63b513bc2c8a7e8b1cd0088229c5ceb461161f43b6"}, - {file = "botocore-1.34.49.tar.gz", hash = "sha256:d89410bc60673eaff1699f3f1fdcb0e3a5e1f7a6a048c0d88c3ce5c3549433ec"}, + {file = "botocore-1.34.112-py3-none-any.whl", hash = "sha256:637f568a6c3322fb7e5ee55e0c5367324a15a331e87a497783ac6209253dde30"}, + {file = "botocore-1.34.112.tar.gz", hash = "sha256:053495953910bcf95d336ab1adb13efb70edc5462932eff180560737ad069319"}, ] [package.dependencies] jmespath = ">=0.7.1,<2.0.0" python-dateutil = ">=2.1,<3.0.0" -urllib3 = {version = ">=1.25.4,<2.1", markers = "python_version >= \"3.10\""} +urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""} [package.extras] -crt = ["awscrt (==0.19.19)"] +crt = ["awscrt (==0.20.9)"] [[package]] name = "cachetools" -version = "5.3.2" +version = "5.3.3" description = "Extensible memoizing collections and decorators" optional = false python-versions = ">=3.7" files = [ - {file = "cachetools-5.3.2-py3-none-any.whl", hash = "sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1"}, - {file = "cachetools-5.3.2.tar.gz", hash = "sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2"}, + {file = "cachetools-5.3.3-py3-none-any.whl", hash = "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945"}, + {file = "cachetools-5.3.3.tar.gz", hash = "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"}, ] [[package]] @@ -390,13 +390,13 @@ files = [ [[package]] name = "cosl" -version = "0.0.8" +version = "0.0.11" description = "Utils for COS Lite charms" optional = false python-versions = ">=3.8" files = [ - {file = "cosl-0.0.8-py3-none-any.whl", hash = "sha256:71e4e73abba9029553d11a5c55dd2bae0251fb094556e0757fe1c49f74ad18ac"}, - {file = "cosl-0.0.8.tar.gz", hash = "sha256:b41f795a507d55d12f4c0cc68565543badcbbef96afe4ae8553999efc423d834"}, + {file = "cosl-0.0.11-py3-none-any.whl", hash = "sha256:46d78d6441ba628bae386cd8c10b8144558ab208115522020e7858f97837988d"}, + {file = "cosl-0.0.11.tar.gz", hash = "sha256:15cac6ed20b65e9d33cda3c3da32e299c82f9feea64e393448cd3d3cf2bef32a"}, ] [package.dependencies] @@ -406,63 +406,63 @@ typing-extensions = "*" [[package]] name = "coverage" -version = "7.4.3" +version = "7.5.1" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8580b827d4746d47294c0e0b92854c85a92c2227927433998f0d3320ae8a71b6"}, - {file = "coverage-7.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:718187eeb9849fc6cc23e0d9b092bc2348821c5e1a901c9f8975df0bc785bfd4"}, - {file = "coverage-7.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:767b35c3a246bcb55b8044fd3a43b8cd553dd1f9f2c1eeb87a302b1f8daa0524"}, - {file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae7f19afe0cce50039e2c782bff379c7e347cba335429678450b8fe81c4ef96d"}, - {file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba3a8aaed13770e970b3df46980cb068d1c24af1a1968b7818b69af8c4347efb"}, - {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ee866acc0861caebb4f2ab79f0b94dbfbdbfadc19f82e6e9c93930f74e11d7a0"}, - {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:506edb1dd49e13a2d4cac6a5173317b82a23c9d6e8df63efb4f0380de0fbccbc"}, - {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd6545d97c98a192c5ac995d21c894b581f1fd14cf389be90724d21808b657e2"}, - {file = "coverage-7.4.3-cp310-cp310-win32.whl", hash = "sha256:f6a09b360d67e589236a44f0c39218a8efba2593b6abdccc300a8862cffc2f94"}, - {file = "coverage-7.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:18d90523ce7553dd0b7e23cbb28865db23cddfd683a38fb224115f7826de78d0"}, - {file = "coverage-7.4.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cbbe5e739d45a52f3200a771c6d2c7acf89eb2524890a4a3aa1a7fa0695d2a47"}, - {file = "coverage-7.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:489763b2d037b164846ebac0cbd368b8a4ca56385c4090807ff9fad817de4113"}, - {file = "coverage-7.4.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:451f433ad901b3bb00184d83fd83d135fb682d780b38af7944c9faeecb1e0bfe"}, - {file = "coverage-7.4.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fcc66e222cf4c719fe7722a403888b1f5e1682d1679bd780e2b26c18bb648cdc"}, - {file = "coverage-7.4.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3ec74cfef2d985e145baae90d9b1b32f85e1741b04cd967aaf9cfa84c1334f3"}, - {file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:abbbd8093c5229c72d4c2926afaee0e6e3140de69d5dcd918b2921f2f0c8baba"}, - {file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:35eb581efdacf7b7422af677b92170da4ef34500467381e805944a3201df2079"}, - {file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8249b1c7334be8f8c3abcaaa996e1e4927b0e5a23b65f5bf6cfe3180d8ca7840"}, - {file = "coverage-7.4.3-cp311-cp311-win32.whl", hash = "sha256:cf30900aa1ba595312ae41978b95e256e419d8a823af79ce670835409fc02ad3"}, - {file = "coverage-7.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:18c7320695c949de11a351742ee001849912fd57e62a706d83dfc1581897fa2e"}, - {file = "coverage-7.4.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b51bfc348925e92a9bd9b2e48dad13431b57011fd1038f08316e6bf1df107d10"}, - {file = "coverage-7.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d6cdecaedea1ea9e033d8adf6a0ab11107b49571bbb9737175444cea6eb72328"}, - {file = "coverage-7.4.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b2eccb883368f9e972e216c7b4c7c06cabda925b5f06dde0650281cb7666a30"}, - {file = "coverage-7.4.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c00cdc8fa4e50e1cc1f941a7f2e3e0f26cb2a1233c9696f26963ff58445bac7"}, - {file = "coverage-7.4.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a4a8dd3dcf4cbd3165737358e4d7dfbd9d59902ad11e3b15eebb6393b0446e"}, - {file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:062b0a75d9261e2f9c6d071753f7eef0fc9caf3a2c82d36d76667ba7b6470003"}, - {file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ebe7c9e67a2d15fa97b77ea6571ce5e1e1f6b0db71d1d5e96f8d2bf134303c1d"}, - {file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c0a120238dd71c68484f02562f6d446d736adcc6ca0993712289b102705a9a3a"}, - {file = "coverage-7.4.3-cp312-cp312-win32.whl", hash = "sha256:37389611ba54fd6d278fde86eb2c013c8e50232e38f5c68235d09d0a3f8aa352"}, - {file = "coverage-7.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:d25b937a5d9ffa857d41be042b4238dd61db888533b53bc76dc082cb5a15e914"}, - {file = "coverage-7.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:28ca2098939eabab044ad68850aac8f8db6bf0b29bc7f2887d05889b17346454"}, - {file = "coverage-7.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:280459f0a03cecbe8800786cdc23067a8fc64c0bd51dc614008d9c36e1659d7e"}, - {file = "coverage-7.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c0cdedd3500e0511eac1517bf560149764b7d8e65cb800d8bf1c63ebf39edd2"}, - {file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a9babb9466fe1da12417a4aed923e90124a534736de6201794a3aea9d98484e"}, - {file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dec9de46a33cf2dd87a5254af095a409ea3bf952d85ad339751e7de6d962cde6"}, - {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:16bae383a9cc5abab9bb05c10a3e5a52e0a788325dc9ba8499e821885928968c"}, - {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2c854ce44e1ee31bda4e318af1dbcfc929026d12c5ed030095ad98197eeeaed0"}, - {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ce8c50520f57ec57aa21a63ea4f325c7b657386b3f02ccaedeccf9ebe27686e1"}, - {file = "coverage-7.4.3-cp38-cp38-win32.whl", hash = "sha256:708a3369dcf055c00ddeeaa2b20f0dd1ce664eeabde6623e516c5228b753654f"}, - {file = "coverage-7.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:1bf25fbca0c8d121a3e92a2a0555c7e5bc981aee5c3fdaf4bb7809f410f696b9"}, - {file = "coverage-7.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b253094dbe1b431d3a4ac2f053b6d7ede2664ac559705a704f621742e034f1f"}, - {file = "coverage-7.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77fbfc5720cceac9c200054b9fab50cb2a7d79660609200ab83f5db96162d20c"}, - {file = "coverage-7.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6679060424faa9c11808598504c3ab472de4531c571ab2befa32f4971835788e"}, - {file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4af154d617c875b52651dd8dd17a31270c495082f3d55f6128e7629658d63765"}, - {file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8640f1fde5e1b8e3439fe482cdc2b0bb6c329f4bb161927c28d2e8879c6029ee"}, - {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:69b9f6f66c0af29642e73a520b6fed25ff9fd69a25975ebe6acb297234eda501"}, - {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0842571634f39016a6c03e9d4aba502be652a6e4455fadb73cd3a3a49173e38f"}, - {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a78ed23b08e8ab524551f52953a8a05d61c3a760781762aac49f8de6eede8c45"}, - {file = "coverage-7.4.3-cp39-cp39-win32.whl", hash = "sha256:c0524de3ff096e15fcbfe8f056fdb4ea0bf497d584454f344d59fce069d3e6e9"}, - {file = "coverage-7.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:0209a6369ccce576b43bb227dc8322d8ef9e323d089c6f3f26a597b09cb4d2aa"}, - {file = "coverage-7.4.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:7cbde573904625509a3f37b6fecea974e363460b556a627c60dc2f47e2fffa51"}, - {file = "coverage-7.4.3.tar.gz", hash = "sha256:276f6077a5c61447a48d133ed13e759c09e62aff0dc84274a68dc18660104d52"}, + {file = "coverage-7.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0884920835a033b78d1c73b6d3bbcda8161a900f38a488829a83982925f6c2e"}, + {file = "coverage-7.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:39afcd3d4339329c5f58de48a52f6e4e50f6578dd6099961cf22228feb25f38f"}, + {file = "coverage-7.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7b0ceee8147444347da6a66be737c9d78f3353b0681715b668b72e79203e4a"}, + {file = "coverage-7.5.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a9ca3f2fae0088c3c71d743d85404cec8df9be818a005ea065495bedc33da35"}, + {file = "coverage-7.5.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd215c0c7d7aab005221608a3c2b46f58c0285a819565887ee0b718c052aa4e"}, + {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4bf0655ab60d754491004a5efd7f9cccefcc1081a74c9ef2da4735d6ee4a6223"}, + {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:61c4bf1ba021817de12b813338c9be9f0ad5b1e781b9b340a6d29fc13e7c1b5e"}, + {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db66fc317a046556a96b453a58eced5024af4582a8dbdc0c23ca4dbc0d5b3146"}, + {file = "coverage-7.5.1-cp310-cp310-win32.whl", hash = "sha256:b016ea6b959d3b9556cb401c55a37547135a587db0115635a443b2ce8f1c7228"}, + {file = "coverage-7.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:df4e745a81c110e7446b1cc8131bf986157770fa405fe90e15e850aaf7619bc8"}, + {file = "coverage-7.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:796a79f63eca8814ca3317a1ea443645c9ff0d18b188de470ed7ccd45ae79428"}, + {file = "coverage-7.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4fc84a37bfd98db31beae3c2748811a3fa72bf2007ff7902f68746d9757f3746"}, + {file = "coverage-7.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6175d1a0559986c6ee3f7fccfc4a90ecd12ba0a383dcc2da30c2b9918d67d8a3"}, + {file = "coverage-7.5.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fc81d5878cd6274ce971e0a3a18a8803c3fe25457165314271cf78e3aae3aa2"}, + {file = "coverage-7.5.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:556cf1a7cbc8028cb60e1ff0be806be2eded2daf8129b8811c63e2b9a6c43bca"}, + {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9981706d300c18d8b220995ad22627647be11a4276721c10911e0e9fa44c83e8"}, + {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d7fed867ee50edf1a0b4a11e8e5d0895150e572af1cd6d315d557758bfa9c057"}, + {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef48e2707fb320c8f139424a596f5b69955a85b178f15af261bab871873bb987"}, + {file = "coverage-7.5.1-cp311-cp311-win32.whl", hash = "sha256:9314d5678dcc665330df5b69c1e726a0e49b27df0461c08ca12674bcc19ef136"}, + {file = "coverage-7.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:5fa567e99765fe98f4e7d7394ce623e794d7cabb170f2ca2ac5a4174437e90dd"}, + {file = "coverage-7.5.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b6cf3764c030e5338e7f61f95bd21147963cf6aa16e09d2f74f1fa52013c1206"}, + {file = "coverage-7.5.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ec92012fefebee89a6b9c79bc39051a6cb3891d562b9270ab10ecfdadbc0c34"}, + {file = "coverage-7.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16db7f26000a07efcf6aea00316f6ac57e7d9a96501e990a36f40c965ec7a95d"}, + {file = "coverage-7.5.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:beccf7b8a10b09c4ae543582c1319c6df47d78fd732f854ac68d518ee1fb97fa"}, + {file = "coverage-7.5.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8748731ad392d736cc9ccac03c9845b13bb07d020a33423fa5b3a36521ac6e4e"}, + {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7352b9161b33fd0b643ccd1f21f3a3908daaddf414f1c6cb9d3a2fd618bf2572"}, + {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7a588d39e0925f6a2bff87154752481273cdb1736270642aeb3635cb9b4cad07"}, + {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:68f962d9b72ce69ea8621f57551b2fa9c70509af757ee3b8105d4f51b92b41a7"}, + {file = "coverage-7.5.1-cp312-cp312-win32.whl", hash = "sha256:f152cbf5b88aaeb836127d920dd0f5e7edff5a66f10c079157306c4343d86c19"}, + {file = "coverage-7.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:5a5740d1fb60ddf268a3811bcd353de34eb56dc24e8f52a7f05ee513b2d4f596"}, + {file = "coverage-7.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e2213def81a50519d7cc56ed643c9e93e0247f5bbe0d1247d15fa520814a7cd7"}, + {file = "coverage-7.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5037f8fcc2a95b1f0e80585bd9d1ec31068a9bcb157d9750a172836e98bc7a90"}, + {file = "coverage-7.5.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3721c2c9e4c4953a41a26c14f4cef64330392a6d2d675c8b1db3b645e31f0e"}, + {file = "coverage-7.5.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca498687ca46a62ae590253fba634a1fe9836bc56f626852fb2720f334c9e4e5"}, + {file = "coverage-7.5.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cdcbc320b14c3e5877ee79e649677cb7d89ef588852e9583e6b24c2e5072661"}, + {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:57e0204b5b745594e5bc14b9b50006da722827f0b8c776949f1135677e88d0b8"}, + {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8fe7502616b67b234482c3ce276ff26f39ffe88adca2acf0261df4b8454668b4"}, + {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:9e78295f4144f9dacfed4f92935fbe1780021247c2fabf73a819b17f0ccfff8d"}, + {file = "coverage-7.5.1-cp38-cp38-win32.whl", hash = "sha256:1434e088b41594baa71188a17533083eabf5609e8e72f16ce8c186001e6b8c41"}, + {file = "coverage-7.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:0646599e9b139988b63704d704af8e8df7fa4cbc4a1f33df69d97f36cb0a38de"}, + {file = "coverage-7.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4cc37def103a2725bc672f84bd939a6fe4522310503207aae4d56351644682f1"}, + {file = "coverage-7.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fc0b4d8bfeabd25ea75e94632f5b6e047eef8adaed0c2161ada1e922e7f7cece"}, + {file = "coverage-7.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d0a0f5e06881ecedfe6f3dd2f56dcb057b6dbeb3327fd32d4b12854df36bf26"}, + {file = "coverage-7.5.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9735317685ba6ec7e3754798c8871c2f49aa5e687cc794a0b1d284b2389d1bd5"}, + {file = "coverage-7.5.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d21918e9ef11edf36764b93101e2ae8cc82aa5efdc7c5a4e9c6c35a48496d601"}, + {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c3e757949f268364b96ca894b4c342b41dc6f8f8b66c37878aacef5930db61be"}, + {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:79afb6197e2f7f60c4824dd4b2d4c2ec5801ceb6ba9ce5d2c3080e5660d51a4f"}, + {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d1d0d98d95dd18fe29dc66808e1accf59f037d5716f86a501fc0256455219668"}, + {file = "coverage-7.5.1-cp39-cp39-win32.whl", hash = "sha256:1cc0fe9b0b3a8364093c53b0b4c0c2dd4bb23acbec4c9240b5f284095ccf7981"}, + {file = "coverage-7.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:dde0070c40ea8bb3641e811c1cfbf18e265d024deff6de52c5950677a8fb1e0f"}, + {file = "coverage-7.5.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:6537e7c10cc47c595828b8a8be04c72144725c383c4702703ff4e42e44577312"}, + {file = "coverage-7.5.1.tar.gz", hash = "sha256:54de9ef3a9da981f7af93eafde4ede199e0846cd819eb27c88e2b712aae9708c"}, ] [package.dependencies] @@ -473,43 +473,43 @@ toml = ["tomli"] [[package]] name = "cryptography" -version = "42.0.5" +version = "42.0.7" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16"}, - {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da"}, - {file = "cryptography-42.0.5-cp37-abi3-win32.whl", hash = "sha256:b6cd2203306b63e41acdf39aa93b86fb566049aeb6dc489b70e34bcd07adca74"}, - {file = "cryptography-42.0.5-cp37-abi3-win_amd64.whl", hash = "sha256:98d8dc6d012b82287f2c3d26ce1d2dd130ec200c8679b6213b3c73c08b2b7940"}, - {file = "cryptography-42.0.5-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30"}, - {file = "cryptography-42.0.5-cp39-abi3-win32.whl", hash = "sha256:1f71c10d1e88467126f0efd484bd44bca5e14c664ec2ede64c32f20875c0d413"}, - {file = "cryptography-42.0.5-cp39-abi3-win_amd64.whl", hash = "sha256:a011a644f6d7d03736214d38832e030d8268bcff4a41f728e6030325fea3e400"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:111a0d8553afcf8eb02a4fea6ca4f59d48ddb34497aa8706a6cf536f1a5ec576"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cd65d75953847815962c84a4654a84850b2bb4aed3f26fadcc1c13892e1e29f6"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e807b3188f9eb0eaa7bbb579b462c5ace579f1cedb28107ce8b48a9f7ad3679e"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f12764b8fffc7a123f641d7d049d382b73f96a34117e0b637b80643169cec8ac"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:37dd623507659e08be98eec89323469e8c7b4c1407c85112634ae3dbdb926fdd"}, - {file = "cryptography-42.0.5.tar.gz", hash = "sha256:6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1"}, + {file = "cryptography-42.0.7-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:a987f840718078212fdf4504d0fd4c6effe34a7e4740378e59d47696e8dfb477"}, + {file = "cryptography-42.0.7-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bd13b5e9b543532453de08bcdc3cc7cebec6f9883e886fd20a92f26940fd3e7a"}, + {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a79165431551042cc9d1d90e6145d5d0d3ab0f2d66326c201d9b0e7f5bf43604"}, + {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a47787a5e3649008a1102d3df55424e86606c9bae6fb77ac59afe06d234605f8"}, + {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:02c0eee2d7133bdbbc5e24441258d5d2244beb31da5ed19fbb80315f4bbbff55"}, + {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5e44507bf8d14b36b8389b226665d597bc0f18ea035d75b4e53c7b1ea84583cc"}, + {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7f8b25fa616d8b846aef64b15c606bb0828dbc35faf90566eb139aa9cff67af2"}, + {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:93a3209f6bb2b33e725ed08ee0991b92976dfdcf4e8b38646540674fc7508e13"}, + {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e6b8f1881dac458c34778d0a424ae5769de30544fc678eac51c1c8bb2183e9da"}, + {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3de9a45d3b2b7d8088c3fbf1ed4395dfeff79d07842217b38df14ef09ce1d8d7"}, + {file = "cryptography-42.0.7-cp37-abi3-win32.whl", hash = "sha256:789caea816c6704f63f6241a519bfa347f72fbd67ba28d04636b7c6b7da94b0b"}, + {file = "cryptography-42.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:8cb8ce7c3347fcf9446f201dc30e2d5a3c898d009126010cbd1f443f28b52678"}, + {file = "cryptography-42.0.7-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:a3a5ac8b56fe37f3125e5b72b61dcde43283e5370827f5233893d461b7360cd4"}, + {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:779245e13b9a6638df14641d029add5dc17edbef6ec915688f3acb9e720a5858"}, + {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d563795db98b4cd57742a78a288cdbdc9daedac29f2239793071fe114f13785"}, + {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:31adb7d06fe4383226c3e963471f6837742889b3c4caa55aac20ad951bc8ffda"}, + {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:efd0bf5205240182e0f13bcaea41be4fdf5c22c5129fc7ced4a0282ac86998c9"}, + {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a9bc127cdc4ecf87a5ea22a2556cab6c7eda2923f84e4f3cc588e8470ce4e42e"}, + {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:3577d029bc3f4827dd5bf8bf7710cac13527b470bbf1820a3f394adb38ed7d5f"}, + {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2e47577f9b18723fa294b0ea9a17d5e53a227867a0a4904a1a076d1646d45ca1"}, + {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1a58839984d9cb34c855197043eaae2c187d930ca6d644612843b4fe8513c886"}, + {file = "cryptography-42.0.7-cp39-abi3-win32.whl", hash = "sha256:e6b79d0adb01aae87e8a44c2b64bc3f3fe59515280e00fb6d57a7267a2583cda"}, + {file = "cryptography-42.0.7-cp39-abi3-win_amd64.whl", hash = "sha256:16268d46086bb8ad5bf0a2b5544d8a9ed87a0e33f5e77dd3c3301e63d941a83b"}, + {file = "cryptography-42.0.7-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2954fccea107026512b15afb4aa664a5640cd0af630e2ee3962f2602693f0c82"}, + {file = "cryptography-42.0.7-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:362e7197754c231797ec45ee081f3088a27a47c6c01eff2ac83f60f85a50fe60"}, + {file = "cryptography-42.0.7-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4f698edacf9c9e0371112792558d2f705b5645076cc0aaae02f816a0171770fd"}, + {file = "cryptography-42.0.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5482e789294854c28237bba77c4c83be698be740e31a3ae5e879ee5444166582"}, + {file = "cryptography-42.0.7-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e9b2a6309f14c0497f348d08a065d52f3020656f675819fc405fb63bbcd26562"}, + {file = "cryptography-42.0.7-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d8e3098721b84392ee45af2dd554c947c32cc52f862b6a3ae982dbb90f577f14"}, + {file = "cryptography-42.0.7-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c65f96dad14f8528a447414125e1fc8feb2ad5a272b8f68477abbcc1ea7d94b9"}, + {file = "cryptography-42.0.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:36017400817987670037fbb0324d71489b6ead6231c9604f8fc1f7d008087c68"}, + {file = "cryptography-42.0.7.tar.gz", hash = "sha256:ecbfbc00bf55888edda9868a4cf927205de8499e7fabe6c050322298382953f2"}, ] [package.dependencies] @@ -538,13 +538,13 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.2.0" +version = "1.2.1" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, - {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, + {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, + {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, ] [package.extras] @@ -566,29 +566,29 @@ tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipyth [[package]] name = "flake8" -version = "6.1.0" +version = "7.0.0" description = "the modular source code checker: pep8 pyflakes and co" optional = false python-versions = ">=3.8.1" files = [ - {file = "flake8-6.1.0-py2.py3-none-any.whl", hash = "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5"}, - {file = "flake8-6.1.0.tar.gz", hash = "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23"}, + {file = "flake8-7.0.0-py2.py3-none-any.whl", hash = "sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3"}, + {file = "flake8-7.0.0.tar.gz", hash = "sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132"}, ] [package.dependencies] mccabe = ">=0.7.0,<0.8.0" pycodestyle = ">=2.11.0,<2.12.0" -pyflakes = ">=3.1.0,<3.2.0" +pyflakes = ">=3.2.0,<3.3.0" [[package]] name = "flake8-builtins" -version = "2.2.0" +version = "2.5.0" description = "Check for python builtins being used as variables or parameters" optional = false python-versions = ">=3.8" files = [ - {file = "flake8_builtins-2.2.0-py3-none-any.whl", hash = "sha256:7ee5766d9c60e5d579dfda84e65c6d0e6c26005f6f59cb9bf722462d7987a807"}, - {file = "flake8_builtins-2.2.0.tar.gz", hash = "sha256:392d5af3a0720c5a863aa93dc47f48c879081345a143fe9f20d995fe9ff5686a"}, + {file = "flake8_builtins-2.5.0-py3-none-any.whl", hash = "sha256:8cac7c52c6f0708c0902b46b385bc7e368a9068965083796f1431c0d2e6550cf"}, + {file = "flake8_builtins-2.5.0.tar.gz", hash = "sha256:bdaa3dd823e4f5308c5e712d19fa5f69daa52781ea874f5ea9c3637bcf56faa6"}, ] [package.dependencies] @@ -628,13 +628,13 @@ pydocstyle = ">=2.1" [[package]] name = "google-auth" -version = "2.28.1" +version = "2.29.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google-auth-2.28.1.tar.gz", hash = "sha256:34fc3046c257cedcf1622fc4b31fc2be7923d9b4d44973d481125ecc50d83885"}, - {file = "google_auth-2.28.1-py2.py3-none-any.whl", hash = "sha256:25141e2d7a14bfcba945f5e9827f98092716e99482562f15306e5b026e21aa72"}, + {file = "google-auth-2.29.0.tar.gz", hash = "sha256:672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360"}, + {file = "google_auth-2.29.0-py2.py3-none-any.whl", hash = "sha256:d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415"}, ] [package.dependencies] @@ -651,13 +651,13 @@ requests = ["requests (>=2.20.0,<3.0.0.dev0)"] [[package]] name = "hvac" -version = "2.1.0" +version = "2.2.0" description = "HashiCorp Vault API client" optional = false -python-versions = ">=3.8,<4.0" +python-versions = "<4.0,>=3.8" files = [ - {file = "hvac-2.1.0-py3-none-any.whl", hash = "sha256:73bc91e58c3fc7c6b8107cdaca9cb71fa0a893dfd80ffbc1c14e20f24c0c29d7"}, - {file = "hvac-2.1.0.tar.gz", hash = "sha256:b48bcda11a4ab0a7b6c47232c7ba7c87fda318ae2d4a7662800c465a78742894"}, + {file = "hvac-2.2.0-py3-none-any.whl", hash = "sha256:f287a19940c6fc518c723f8276cc9927f7400734303ee5872ac2e84539466d8d"}, + {file = "hvac-2.2.0.tar.gz", hash = "sha256:e4b0248c5672cb9a6f5974e7c8f5271a09c6c663cbf8ab11733a227f3d2db2c2"}, ] [package.dependencies] @@ -668,13 +668,13 @@ parser = ["pyhcl (>=0.4.4,<0.5.0)"] [[package]] name = "idna" -version = "3.6" +version = "3.7" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" files = [ - {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, - {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] [[package]] @@ -706,13 +706,13 @@ tomli = {version = "*", markers = "python_version > \"3.6\" and python_version < [[package]] name = "ipython" -version = "8.22.1" +version = "8.24.0" description = "IPython: Productive Interactive Computing" optional = false python-versions = ">=3.10" files = [ - {file = "ipython-8.22.1-py3-none-any.whl", hash = "sha256:869335e8cded62ffb6fac8928e5287a05433d6462e3ebaac25f4216474dd6bc4"}, - {file = "ipython-8.22.1.tar.gz", hash = "sha256:39c6f9efc079fb19bfb0f17eee903978fe9a290b1b82d68196c641cecb76ea22"}, + {file = "ipython-8.24.0-py3-none-any.whl", hash = "sha256:d7bf2f6c4314984e3e02393213bab8703cf163ede39672ce5918c51fe253a2a3"}, + {file = "ipython-8.24.0.tar.gz", hash = "sha256:010db3f8a728a578bb641fdd06c063b9fb8e96a9464c63aec6310fbcb5e80501"}, ] [package.dependencies] @@ -726,18 +726,20 @@ prompt-toolkit = ">=3.0.41,<3.1.0" pygments = ">=2.4.0" stack-data = "*" traitlets = ">=5.13.0" +typing-extensions = {version = ">=4.6", markers = "python_version < \"3.12\""} [package.extras] -all = ["ipython[black,doc,kernel,nbconvert,nbformat,notebook,parallel,qtconsole,terminal]", "ipython[test,test-extra]"] +all = ["ipython[black,doc,kernel,matplotlib,nbconvert,nbformat,notebook,parallel,qtconsole]", "ipython[test,test-extra]"] black = ["black"] doc = ["docrepr", "exceptiongroup", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "stack-data", "typing-extensions"] kernel = ["ipykernel"] +matplotlib = ["matplotlib"] nbconvert = ["nbconvert"] nbformat = ["nbformat"] notebook = ["ipywidgets", "notebook"] parallel = ["ipyparallel"] qtconsole = ["qtconsole"] -test = ["pickleshare", "pytest (<8)", "pytest-asyncio (<0.22)", "testpath"] +test = ["pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath"] test-extra = ["curio", "ipython[test]", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.23)", "pandas", "trio"] [[package]] @@ -817,13 +819,13 @@ six = ">=1.13,<2.0" [[package]] name = "jsonschema" -version = "4.21.1" +version = "4.22.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" files = [ - {file = "jsonschema-4.21.1-py3-none-any.whl", hash = "sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f"}, - {file = "jsonschema-4.21.1.tar.gz", hash = "sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5"}, + {file = "jsonschema-4.22.0-py3-none-any.whl", hash = "sha256:ff4cfd6b1367a40e7bc6411caec72effadd3db0bbe5017de188f2d6108335802"}, + {file = "jsonschema-4.22.0.tar.gz", hash = "sha256:5b22d434a45935119af990552c862e5d6d564e8f6601206b305a61fdf661a2b7"}, ] [package.dependencies] @@ -852,12 +854,12 @@ referencing = ">=0.31.0" [[package]] name = "juju" -version = "3.3.1.1" +version = "3.4.0.0" description = "Python library for Juju" optional = false python-versions = "*" files = [ - {file = "juju-3.3.1.1.tar.gz", hash = "sha256:b30b19051a1c394fa1b1a8c6d38c31d8acdceec2f95b33ffb46fab2d791a29fd"}, + {file = "juju-3.4.0.0.tar.gz", hash = "sha256:5b883446ca0977c1255b0876ed5d2eab01cffaf03a8c77cfd768975264abef3d"}, ] [package.dependencies] @@ -989,13 +991,13 @@ files = [ [[package]] name = "matplotlib-inline" -version = "0.1.6" +version = "0.1.7" description = "Inline Matplotlib backend for Jupyter" optional = false -python-versions = ">=3.5" +python-versions = ">=3.8" files = [ - {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"}, - {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"}, + {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"}, + {file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"}, ] [package.dependencies] @@ -1041,13 +1043,13 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] [[package]] name = "opensearch-py" -version = "2.4.2" +version = "2.5.0" description = "Python client for OpenSearch" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,<4,>=2.7" files = [ - {file = "opensearch-py-2.4.2.tar.gz", hash = "sha256:564f175af134aa885f4ced6846eb4532e08b414fff0a7976f76b276fe0e69158"}, - {file = "opensearch_py-2.4.2-py2.py3-none-any.whl", hash = "sha256:7867319132133e2974c09f76a54eb1d502b989229be52da583d93ddc743ea111"}, + {file = "opensearch-py-2.5.0.tar.gz", hash = "sha256:0dde4ac7158a717d92a8cd81964cb99705a4b80bcf9258ba195b9a9f23f5226d"}, + {file = "opensearch_py-2.5.0-py2.py3-none-any.whl", hash = "sha256:cf093a40e272b60663f20417fc1264ac724dcf1e03c1a4542a6b44835b1e6c49"}, ] [package.dependencies] @@ -1055,7 +1057,7 @@ certifi = ">=2022.12.07" python-dateutil = "*" requests = ">=2.4.0,<3.0.0" six = "*" -urllib3 = ">=1.26.18" +urllib3 = ">=1.26.18,<2" [package.extras] async = ["aiohttp (>=3,<4)"] @@ -1065,13 +1067,13 @@ kerberos = ["requests-kerberos"] [[package]] name = "ops" -version = "2.12.0" +version = "2.13.0" description = "The Python library behind great charms" optional = false python-versions = ">=3.8" files = [ - {file = "ops-2.12.0-py3-none-any.whl", hash = "sha256:b6f7db8aa2886351d0a2527f0df6c8a34e0d9cf90ddfbb91e734f73259df8ddf"}, - {file = "ops-2.12.0.tar.gz", hash = "sha256:7d88522914728caa13aaf1689637f8b573eaf5d38b7f2b8cf135406ee6ef0fc3"}, + {file = "ops-2.13.0-py3-none-any.whl", hash = "sha256:edebef03841d727a9b8bd9ee3f52c5b94070fd748641a0927b51f6fe3a887365"}, + {file = "ops-2.13.0.tar.gz", hash = "sha256:106deec8c18a6dbf7fa3e6fe6e288784b1da8cb626b5265f6c4b959e10877272"}, ] [package.dependencies] @@ -1083,24 +1085,24 @@ docs = ["canonical-sphinx-extensions", "furo", "linkify-it-py", "myst-parser", " [[package]] name = "overrides" -version = "7.4.0" +version = "7.7.0" description = "A decorator to automatically detect mismatch when overriding a method." optional = false python-versions = ">=3.6" files = [ - {file = "overrides-7.4.0-py3-none-any.whl", hash = "sha256:3ad24583f86d6d7a49049695efe9933e67ba62f0c7625d53c59fa832ce4b8b7d"}, - {file = "overrides-7.4.0.tar.gz", hash = "sha256:9502a3cca51f4fac40b5feca985b6703a5c1f6ad815588a7ca9e285b9dca6757"}, + {file = "overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49"}, + {file = "overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a"}, ] [[package]] name = "packaging" -version = "23.2" +version = "24.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.7" files = [ - {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, - {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, + {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, + {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, ] [[package]] @@ -1140,18 +1142,18 @@ invoke = ["invoke (>=2.0)"] [[package]] name = "parso" -version = "0.8.3" +version = "0.8.4" description = "A Python Parser" optional = false python-versions = ">=3.6" files = [ - {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, - {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"}, + {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"}, + {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"}, ] [package.extras] -qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] -testing = ["docopt", "pytest (<6.0.0)"] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["docopt", "pytest"] [[package]] name = "pathspec" @@ -1194,28 +1196,29 @@ ptyprocess = ">=0.5" [[package]] name = "platformdirs" -version = "4.2.0" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +version = "4.2.1" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"}, - {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"}, + {file = "platformdirs-4.2.1-py3-none-any.whl", hash = "sha256:17d5a1161b3fd67b390023cb2d3b026bbd40abde6fdb052dfbd3a29c3ba22ee1"}, + {file = "platformdirs-4.2.1.tar.gz", hash = "sha256:031cd18d4ec63ec53e82dceaac0417d218a6863f7745dfcc9efe7793b7039bdf"}, ] [package.extras] docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] [[package]] name = "pluggy" -version = "1.4.0" +version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, - {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [package.extras] @@ -1249,35 +1252,22 @@ wcwidth = "*" [[package]] name = "protobuf" -version = "3.20.0" -description = "Protocol Buffers" +version = "5.27.0" +description = "" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "protobuf-3.20.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:9d0f3aca8ca51c8b5e204ab92bd8afdb2a8e3df46bd0ce0bd39065d79aabcaa4"}, - {file = "protobuf-3.20.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:001c2160c03b6349c04de39cf1a58e342750da3632f6978a1634a3dcca1ec10e"}, - {file = "protobuf-3.20.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5b5860b790498f233cdc8d635a17fc08de62e59d4dcd8cdb6c6c0d38a31edf2b"}, - {file = "protobuf-3.20.0-cp310-cp310-win32.whl", hash = "sha256:0b250c60256c8824219352dc2a228a6b49987e5bf94d3ffcf4c46585efcbd499"}, - {file = "protobuf-3.20.0-cp310-cp310-win_amd64.whl", hash = "sha256:a1eebb6eb0653e594cb86cd8e536b9b083373fca9aba761ade6cd412d46fb2ab"}, - {file = "protobuf-3.20.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:bc14037281db66aa60856cd4ce4541a942040686d290e3f3224dd3978f88f554"}, - {file = "protobuf-3.20.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:47257d932de14a7b6c4ae1b7dbf592388153ee35ec7cae216b87ae6490ed39a3"}, - {file = "protobuf-3.20.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:fbcbb068ebe67c4ff6483d2e2aa87079c325f8470b24b098d6bf7d4d21d57a69"}, - {file = "protobuf-3.20.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:542f25a4adf3691a306dcc00bf9a73176554938ec9b98f20f929a044f80acf1b"}, - {file = "protobuf-3.20.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fd7133b885e356fa4920ead8289bb45dc6f185a164e99e10279f33732ed5ce15"}, - {file = "protobuf-3.20.0-cp37-cp37m-win32.whl", hash = "sha256:8d84453422312f8275455d1cb52d850d6a4d7d714b784e41b573c6f5bfc2a029"}, - {file = "protobuf-3.20.0-cp37-cp37m-win_amd64.whl", hash = "sha256:52bae32a147c375522ce09bd6af4d2949aca32a0415bc62df1456b3ad17c6001"}, - {file = "protobuf-3.20.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25d2fcd6eef340082718ec9ad2c58d734429f2b1f7335d989523852f2bba220b"}, - {file = "protobuf-3.20.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:88c8be0558bdfc35e68c42ae5bf785eb9390d25915d4863bbc7583d23da77074"}, - {file = "protobuf-3.20.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:38fd9eb74b852e4ee14b16e9670cd401d147ee3f3ec0d4f7652e0c921d6227f8"}, - {file = "protobuf-3.20.0-cp38-cp38-win32.whl", hash = "sha256:7dcd84dc31ebb35ade755e06d1561d1bd3b85e85dbdbf6278011fc97b22810db"}, - {file = "protobuf-3.20.0-cp38-cp38-win_amd64.whl", hash = "sha256:1eb13f5a5a59ca4973bcfa2fc8fff644bd39f2109c3f7a60bd5860cb6a49b679"}, - {file = "protobuf-3.20.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1d24c81c2310f0063b8fc1c20c8ed01f3331be9374b4b5c2de846f69e11e21fb"}, - {file = "protobuf-3.20.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:8be43a91ab66fe995e85ccdbdd1046d9f0443d59e060c0840319290de25b7d33"}, - {file = "protobuf-3.20.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7a53d4035427b9dbfbb397f46642754d294f131e93c661d056366f2a31438263"}, - {file = "protobuf-3.20.0-cp39-cp39-win32.whl", hash = "sha256:32bf4a90c207a0b4e70ca6dd09d43de3cb9898f7d5b69c2e9e3b966a7f342820"}, - {file = "protobuf-3.20.0-cp39-cp39-win_amd64.whl", hash = "sha256:6efe066a7135233f97ce51a1aa007d4fb0be28ef093b4f88dac4ad1b3a2b7b6f"}, - {file = "protobuf-3.20.0-py2.py3-none-any.whl", hash = "sha256:4eda68bd9e2a4879385e6b1ea528c976f59cd9728382005cc54c28bcce8db983"}, - {file = "protobuf-3.20.0.tar.gz", hash = "sha256:71b2c3d1cd26ed1ec7c8196834143258b2ad7f444efff26fdc366c6f5e752702"}, + {file = "protobuf-5.27.0-cp310-abi3-win32.whl", hash = "sha256:2f83bf341d925650d550b8932b71763321d782529ac0eaf278f5242f513cc04e"}, + {file = "protobuf-5.27.0-cp310-abi3-win_amd64.whl", hash = "sha256:b276e3f477ea1eebff3c2e1515136cfcff5ac14519c45f9b4aa2f6a87ea627c4"}, + {file = "protobuf-5.27.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:744489f77c29174328d32f8921566fb0f7080a2f064c5137b9d6f4b790f9e0c1"}, + {file = "protobuf-5.27.0-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:f51f33d305e18646f03acfdb343aac15b8115235af98bc9f844bf9446573827b"}, + {file = "protobuf-5.27.0-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:56937f97ae0dcf4e220ff2abb1456c51a334144c9960b23597f044ce99c29c89"}, + {file = "protobuf-5.27.0-cp38-cp38-win32.whl", hash = "sha256:a17f4d664ea868102feaa30a674542255f9f4bf835d943d588440d1f49a3ed15"}, + {file = "protobuf-5.27.0-cp38-cp38-win_amd64.whl", hash = "sha256:aabbbcf794fbb4c692ff14ce06780a66d04758435717107c387f12fb477bf0d8"}, + {file = "protobuf-5.27.0-cp39-cp39-win32.whl", hash = "sha256:587be23f1212da7a14a6c65fd61995f8ef35779d4aea9e36aad81f5f3b80aec5"}, + {file = "protobuf-5.27.0-cp39-cp39-win_amd64.whl", hash = "sha256:7cb65fc8fba680b27cf7a07678084c6e68ee13cab7cace734954c25a43da6d0f"}, + {file = "protobuf-5.27.0-py3-none-any.whl", hash = "sha256:673ad60f1536b394b4fa0bcd3146a4130fcad85bfe3b60eaa86d6a0ace0fa374"}, + {file = "protobuf-5.27.0.tar.gz", hash = "sha256:07f2b9a15255e3cf3f137d884af7972407b556a7a220912b252f26dc3121e6bf"}, ] [[package]] @@ -1307,28 +1297,28 @@ tests = ["pytest"] [[package]] name = "pyasn1" -version = "0.5.1" +version = "0.6.0" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +python-versions = ">=3.8" files = [ - {file = "pyasn1-0.5.1-py2.py3-none-any.whl", hash = "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58"}, - {file = "pyasn1-0.5.1.tar.gz", hash = "sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c"}, + {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, + {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, ] [[package]] name = "pyasn1-modules" -version = "0.3.0" +version = "0.4.0" description = "A collection of ASN.1-based protocols modules" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +python-versions = ">=3.8" files = [ - {file = "pyasn1_modules-0.3.0-py2.py3-none-any.whl", hash = "sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d"}, - {file = "pyasn1_modules-0.3.0.tar.gz", hash = "sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c"}, + {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, + {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, ] [package.dependencies] -pyasn1 = ">=0.4.6,<0.6.0" +pyasn1 = ">=0.4.6,<0.7.0" [[package]] name = "pycodestyle" @@ -1343,58 +1333,58 @@ files = [ [[package]] name = "pycparser" -version = "2.21" +version = "2.22" description = "C parser in Python" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.8" files = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, ] [[package]] name = "pydantic" -version = "1.10.14" +version = "1.10.15" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.14-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7f4fcec873f90537c382840f330b90f4715eebc2bc9925f04cb92de593eae054"}, - {file = "pydantic-1.10.14-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e3a76f571970fcd3c43ad982daf936ae39b3e90b8a2e96c04113a369869dc87"}, - {file = "pydantic-1.10.14-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d886bd3c3fbeaa963692ef6b643159ccb4b4cefaf7ff1617720cbead04fd1d"}, - {file = "pydantic-1.10.14-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:798a3d05ee3b71967844a1164fd5bdb8c22c6d674f26274e78b9f29d81770c4e"}, - {file = "pydantic-1.10.14-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:23d47a4b57a38e8652bcab15a658fdb13c785b9ce217cc3a729504ab4e1d6bc9"}, - {file = "pydantic-1.10.14-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f9f674b5c3bebc2eba401de64f29948ae1e646ba2735f884d1594c5f675d6f2a"}, - {file = "pydantic-1.10.14-cp310-cp310-win_amd64.whl", hash = "sha256:24a7679fab2e0eeedb5a8924fc4a694b3bcaac7d305aeeac72dd7d4e05ecbebf"}, - {file = "pydantic-1.10.14-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9d578ac4bf7fdf10ce14caba6f734c178379bd35c486c6deb6f49006e1ba78a7"}, - {file = "pydantic-1.10.14-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fa7790e94c60f809c95602a26d906eba01a0abee9cc24150e4ce2189352deb1b"}, - {file = "pydantic-1.10.14-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aad4e10efa5474ed1a611b6d7f0d130f4aafadceb73c11d9e72823e8f508e663"}, - {file = "pydantic-1.10.14-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1245f4f61f467cb3dfeced2b119afef3db386aec3d24a22a1de08c65038b255f"}, - {file = "pydantic-1.10.14-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:21efacc678a11114c765eb52ec0db62edffa89e9a562a94cbf8fa10b5db5c046"}, - {file = "pydantic-1.10.14-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:412ab4a3f6dbd2bf18aefa9f79c7cca23744846b31f1d6555c2ee2b05a2e14ca"}, - {file = "pydantic-1.10.14-cp311-cp311-win_amd64.whl", hash = "sha256:e897c9f35281f7889873a3e6d6b69aa1447ceb024e8495a5f0d02ecd17742a7f"}, - {file = "pydantic-1.10.14-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d604be0f0b44d473e54fdcb12302495fe0467c56509a2f80483476f3ba92b33c"}, - {file = "pydantic-1.10.14-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a42c7d17706911199798d4c464b352e640cab4351efe69c2267823d619a937e5"}, - {file = "pydantic-1.10.14-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:596f12a1085e38dbda5cbb874d0973303e34227b400b6414782bf205cc14940c"}, - {file = "pydantic-1.10.14-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bfb113860e9288d0886e3b9e49d9cf4a9d48b441f52ded7d96db7819028514cc"}, - {file = "pydantic-1.10.14-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bc3ed06ab13660b565eed80887fcfbc0070f0aa0691fbb351657041d3e874efe"}, - {file = "pydantic-1.10.14-cp37-cp37m-win_amd64.whl", hash = "sha256:ad8c2bc677ae5f6dbd3cf92f2c7dc613507eafe8f71719727cbc0a7dec9a8c01"}, - {file = "pydantic-1.10.14-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c37c28449752bb1f47975d22ef2882d70513c546f8f37201e0fec3a97b816eee"}, - {file = "pydantic-1.10.14-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:49a46a0994dd551ec051986806122767cf144b9702e31d47f6d493c336462597"}, - {file = "pydantic-1.10.14-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53e3819bd20a42470d6dd0fe7fc1c121c92247bca104ce608e609b59bc7a77ee"}, - {file = "pydantic-1.10.14-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0fbb503bbbbab0c588ed3cd21975a1d0d4163b87e360fec17a792f7d8c4ff29f"}, - {file = "pydantic-1.10.14-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:336709883c15c050b9c55a63d6c7ff09be883dbc17805d2b063395dd9d9d0022"}, - {file = "pydantic-1.10.14-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4ae57b4d8e3312d486e2498d42aed3ece7b51848336964e43abbf9671584e67f"}, - {file = "pydantic-1.10.14-cp38-cp38-win_amd64.whl", hash = "sha256:dba49d52500c35cfec0b28aa8b3ea5c37c9df183ffc7210b10ff2a415c125c4a"}, - {file = "pydantic-1.10.14-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c66609e138c31cba607d8e2a7b6a5dc38979a06c900815495b2d90ce6ded35b4"}, - {file = "pydantic-1.10.14-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d986e115e0b39604b9eee3507987368ff8148222da213cd38c359f6f57b3b347"}, - {file = "pydantic-1.10.14-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:646b2b12df4295b4c3148850c85bff29ef6d0d9621a8d091e98094871a62e5c7"}, - {file = "pydantic-1.10.14-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282613a5969c47c83a8710cc8bfd1e70c9223feb76566f74683af889faadc0ea"}, - {file = "pydantic-1.10.14-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:466669501d08ad8eb3c4fecd991c5e793c4e0bbd62299d05111d4f827cded64f"}, - {file = "pydantic-1.10.14-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:13e86a19dca96373dcf3190fcb8797d40a6f12f154a244a8d1e8e03b8f280593"}, - {file = "pydantic-1.10.14-cp39-cp39-win_amd64.whl", hash = "sha256:08b6ec0917c30861e3fe71a93be1648a2aa4f62f866142ba21670b24444d7fd8"}, - {file = "pydantic-1.10.14-py3-none-any.whl", hash = "sha256:8ee853cd12ac2ddbf0ecbac1c289f95882b2d4482258048079d13be700aa114c"}, - {file = "pydantic-1.10.14.tar.gz", hash = "sha256:46f17b832fe27de7850896f3afee50ea682220dd218f7e9c88d436788419dca6"}, + {file = "pydantic-1.10.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:22ed12ee588b1df028a2aa5d66f07bf8f8b4c8579c2e96d5a9c1f96b77f3bb55"}, + {file = "pydantic-1.10.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:75279d3cac98186b6ebc2597b06bcbc7244744f6b0b44a23e4ef01e5683cc0d2"}, + {file = "pydantic-1.10.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50f1666a9940d3d68683c9d96e39640f709d7a72ff8702987dab1761036206bb"}, + {file = "pydantic-1.10.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82790d4753ee5d00739d6cb5cf56bceb186d9d6ce134aca3ba7befb1eedbc2c8"}, + {file = "pydantic-1.10.15-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:d207d5b87f6cbefbdb1198154292faee8017d7495a54ae58db06762004500d00"}, + {file = "pydantic-1.10.15-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e49db944fad339b2ccb80128ffd3f8af076f9f287197a480bf1e4ca053a866f0"}, + {file = "pydantic-1.10.15-cp310-cp310-win_amd64.whl", hash = "sha256:d3b5c4cbd0c9cb61bbbb19ce335e1f8ab87a811f6d589ed52b0254cf585d709c"}, + {file = "pydantic-1.10.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c3d5731a120752248844676bf92f25a12f6e45425e63ce22e0849297a093b5b0"}, + {file = "pydantic-1.10.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c365ad9c394f9eeffcb30a82f4246c0006417f03a7c0f8315d6211f25f7cb654"}, + {file = "pydantic-1.10.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3287e1614393119c67bd4404f46e33ae3be3ed4cd10360b48d0a4459f420c6a3"}, + {file = "pydantic-1.10.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be51dd2c8596b25fe43c0a4a59c2bee4f18d88efb8031188f9e7ddc6b469cf44"}, + {file = "pydantic-1.10.15-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6a51a1dd4aa7b3f1317f65493a182d3cff708385327c1c82c81e4a9d6d65b2e4"}, + {file = "pydantic-1.10.15-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4e316e54b5775d1eb59187f9290aeb38acf620e10f7fd2f776d97bb788199e53"}, + {file = "pydantic-1.10.15-cp311-cp311-win_amd64.whl", hash = "sha256:0d142fa1b8f2f0ae11ddd5e3e317dcac060b951d605fda26ca9b234b92214986"}, + {file = "pydantic-1.10.15-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7ea210336b891f5ea334f8fc9f8f862b87acd5d4a0cbc9e3e208e7aa1775dabf"}, + {file = "pydantic-1.10.15-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3453685ccd7140715e05f2193d64030101eaad26076fad4e246c1cc97e1bb30d"}, + {file = "pydantic-1.10.15-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bea1f03b8d4e8e86702c918ccfd5d947ac268f0f0cc6ed71782e4b09353b26f"}, + {file = "pydantic-1.10.15-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:005655cabc29081de8243126e036f2065bd7ea5b9dff95fde6d2c642d39755de"}, + {file = "pydantic-1.10.15-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:af9850d98fc21e5bc24ea9e35dd80a29faf6462c608728a110c0a30b595e58b7"}, + {file = "pydantic-1.10.15-cp37-cp37m-win_amd64.whl", hash = "sha256:d31ee5b14a82c9afe2bd26aaa405293d4237d0591527d9129ce36e58f19f95c1"}, + {file = "pydantic-1.10.15-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5e09c19df304b8123938dc3c53d3d3be6ec74b9d7d0d80f4f4b5432ae16c2022"}, + {file = "pydantic-1.10.15-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7ac9237cd62947db00a0d16acf2f3e00d1ae9d3bd602b9c415f93e7a9fc10528"}, + {file = "pydantic-1.10.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:584f2d4c98ffec420e02305cf675857bae03c9d617fcfdc34946b1160213a948"}, + {file = "pydantic-1.10.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbc6989fad0c030bd70a0b6f626f98a862224bc2b1e36bfc531ea2facc0a340c"}, + {file = "pydantic-1.10.15-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d573082c6ef99336f2cb5b667b781d2f776d4af311574fb53d908517ba523c22"}, + {file = "pydantic-1.10.15-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6bd7030c9abc80134087d8b6e7aa957e43d35714daa116aced57269a445b8f7b"}, + {file = "pydantic-1.10.15-cp38-cp38-win_amd64.whl", hash = "sha256:3350f527bb04138f8aff932dc828f154847fbdc7a1a44c240fbfff1b57f49a12"}, + {file = "pydantic-1.10.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:51d405b42f1b86703555797270e4970a9f9bd7953f3990142e69d1037f9d9e51"}, + {file = "pydantic-1.10.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a980a77c52723b0dc56640ced396b73a024d4b74f02bcb2d21dbbac1debbe9d0"}, + {file = "pydantic-1.10.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67f1a1fb467d3f49e1708a3f632b11c69fccb4e748a325d5a491ddc7b5d22383"}, + {file = "pydantic-1.10.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:676ed48f2c5bbad835f1a8ed8a6d44c1cd5a21121116d2ac40bd1cd3619746ed"}, + {file = "pydantic-1.10.15-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:92229f73400b80c13afcd050687f4d7e88de9234d74b27e6728aa689abcf58cc"}, + {file = "pydantic-1.10.15-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2746189100c646682eff0bce95efa7d2e203420d8e1c613dc0c6b4c1d9c1fde4"}, + {file = "pydantic-1.10.15-cp39-cp39-win_amd64.whl", hash = "sha256:394f08750bd8eaad714718812e7fab615f873b3cdd0b9d84e76e51ef3b50b6b7"}, + {file = "pydantic-1.10.15-py3-none-any.whl", hash = "sha256:28e552a060ba2740d0d2aabe35162652c1459a0b9069fe0db7f4ee0e18e74d58"}, + {file = "pydantic-1.10.15.tar.gz", hash = "sha256:ca832e124eda231a60a041da4f013e3ff24949d94a01154b137fc2f2a43c3ffb"}, ] [package.dependencies] @@ -1423,13 +1413,13 @@ toml = ["tomli (>=1.2.3)"] [[package]] name = "pyflakes" -version = "3.1.0" +version = "3.2.0" description = "passive checker of Python programs" optional = false python-versions = ">=3.8" files = [ - {file = "pyflakes-3.1.0-py2.py3-none-any.whl", hash = "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774"}, - {file = "pyflakes-3.1.0.tar.gz", hash = "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc"}, + {file = "pyflakes-3.2.0-py2.py3-none-any.whl", hash = "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"}, + {file = "pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f"}, ] [[package]] @@ -1490,17 +1480,17 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyproject-flake8" -version = "6.1.0" +version = "7.0.0" description = "pyproject-flake8 (`pflake8`), a monkey patching wrapper to connect flake8 with pyproject.toml configuration" optional = false python-versions = ">=3.8.1" files = [ - {file = "pyproject_flake8-6.1.0-py3-none-any.whl", hash = "sha256:86ea5559263c098e1aa4f866776aa2cf45362fd91a576b9fd8fbbbb55db12c4e"}, - {file = "pyproject_flake8-6.1.0.tar.gz", hash = "sha256:6da8e5a264395e0148bc11844c6fb50546f1fac83ac9210f7328664135f9e70f"}, + {file = "pyproject_flake8-7.0.0-py3-none-any.whl", hash = "sha256:611e91b49916e6d0685f88423ad4baff490888278a258975403c0dee6eb6072e"}, + {file = "pyproject_flake8-7.0.0.tar.gz", hash = "sha256:5b953592336bc04d86e8942fdca1014256044a3445c8b6ca9467d08636749158"}, ] [package.dependencies] -flake8 = "6.1.0" +flake8 = "7.0.0" tomli = {version = "*", markers = "python_version < \"3.11\""} [[package]] @@ -1519,13 +1509,13 @@ pytz = "*" [[package]] name = "pytest" -version = "7.4.4" +version = "8.2.1" description = "pytest: simple powerful testing with Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, - {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, + {file = "pytest-8.2.1-py3-none-any.whl", hash = "sha256:faccc5d332b8c3719f40283d0d44aa5cf101cec36f88cde9ed8f2bc0538612b1"}, + {file = "pytest-8.2.1.tar.gz", hash = "sha256:5046e5b46d8e4cac199c373041f26be56fdb81eb4e67dc11d4e10811fc3408fd"}, ] [package.dependencies] @@ -1533,21 +1523,21 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" -pluggy = ">=0.12,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} +pluggy = ">=1.5,<2.0" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] -testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-asyncio" -version = "0.21.1" +version = "0.21.2" description = "Pytest support for asyncio" optional = false python-versions = ">=3.7" files = [ - {file = "pytest-asyncio-0.21.1.tar.gz", hash = "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d"}, - {file = "pytest_asyncio-0.21.1-py3-none-any.whl", hash = "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"}, + {file = "pytest_asyncio-0.21.2-py3-none-any.whl", hash = "sha256:ab664c88bb7998f711d8039cacd4884da6430886ae8bbd4eded552ed2004f16b"}, + {file = "pytest_asyncio-0.21.2.tar.gz", hash = "sha256:d67738fc232b94b326b9d060750beb16e0074210b98dd8b58a5239fa2a154f45"}, ] [package.dependencies] @@ -1569,8 +1559,8 @@ develop = false [package.source] type = "git" url = "https://github.com/canonical/data-platform-workflows" -reference = "v13.1.1" -resolved_reference = "52f3d97ebb97f4f37ec9678af850ecfb97fcf71a" +reference = "v13.1.2" +resolved_reference = "f86cfdfbc92c929928c0722e7542867db0b092cd" subdirectory = "python/pytest_plugins/github_secrets" [[package]] @@ -1589,19 +1579,19 @@ pytest = "*" [package.source] type = "git" url = "https://github.com/canonical/data-platform-workflows" -reference = "v13.1.1" -resolved_reference = "52f3d97ebb97f4f37ec9678af850ecfb97fcf71a" +reference = "v13.1.2" +resolved_reference = "f86cfdfbc92c929928c0722e7542867db0b092cd" subdirectory = "python/pytest_plugins/microceph" [[package]] name = "pytest-operator" -version = "0.32.0" +version = "0.35.0" description = "Fixtures for Operators" optional = false python-versions = "*" files = [ - {file = "pytest-operator-0.32.0.tar.gz", hash = "sha256:9e7b3b1384118110654f86bb6aaf772d29c6f38aec05492707ad09beff7b645b"}, - {file = "pytest_operator-0.32.0-py3-none-any.whl", hash = "sha256:a03efd6e3aea5f5c7395ef64c45d6d1719fde61f8593804dc5c8ffff561ecfd4"}, + {file = "pytest-operator-0.35.0.tar.gz", hash = "sha256:ed963dc013fc576e218081e95197926b7c98116c1fb5ab234269cf72e0746d5b"}, + {file = "pytest_operator-0.35.0-py3-none-any.whl", hash = "sha256:026715faba7a0d725ca386fe05a45cfc73746293d8d755be6d2a67ca252267f5"}, ] [package.dependencies] @@ -1627,8 +1617,8 @@ pyyaml = "*" [package.source] type = "git" url = "https://github.com/canonical/data-platform-workflows" -reference = "v13.1.1" -resolved_reference = "52f3d97ebb97f4f37ec9678af850ecfb97fcf71a" +reference = "v13.1.2" +resolved_reference = "f86cfdfbc92c929928c0722e7542867db0b092cd" subdirectory = "python/pytest_plugins/pytest_operator_cache" [[package]] @@ -1646,19 +1636,19 @@ pytest = "*" [package.source] type = "git" url = "https://github.com/canonical/data-platform-workflows" -reference = "v13.1.1" -resolved_reference = "52f3d97ebb97f4f37ec9678af850ecfb97fcf71a" +reference = "v13.1.2" +resolved_reference = "f86cfdfbc92c929928c0722e7542867db0b092cd" subdirectory = "python/pytest_plugins/pytest_operator_groups" [[package]] name = "python-dateutil" -version = "2.8.2" +version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, ] [package.dependencies] @@ -1687,7 +1677,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -1695,16 +1684,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -1721,7 +1702,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -1729,7 +1709,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -1737,13 +1716,13 @@ files = [ [[package]] name = "referencing" -version = "0.33.0" +version = "0.35.0" description = "JSON Referencing + Python" optional = false python-versions = ">=3.8" files = [ - {file = "referencing-0.33.0-py3-none-any.whl", hash = "sha256:39240f2ecc770258f28b642dd47fd74bc8b02484de54e1882b74b35ebd779bd5"}, - {file = "referencing-0.33.0.tar.gz", hash = "sha256:c775fedf74bc0f9189c2a3be1c12fd03e8c23f4d371dce795df44e06c5b412f7"}, + {file = "referencing-0.35.0-py3-none-any.whl", hash = "sha256:8080727b30e364e5783152903672df9b6b091c926a146a759080b62ca3126cd6"}, + {file = "referencing-0.35.0.tar.gz", hash = "sha256:191e936b0c696d0af17ad7430a3dc68e88bc11be6514f4757dc890f04ab05889"}, ] [package.dependencies] @@ -1752,13 +1731,13 @@ rpds-py = ">=0.7.0" [[package]] name = "requests" -version = "2.31.0" +version = "2.32.2" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.2-py3-none-any.whl", hash = "sha256:fc06670dd0ed212426dfeb94fc1b983d917c4f9847c863f313c9dfaaffb7c23c"}, + {file = "requests-2.32.2.tar.gz", hash = "sha256:dd951ff5ecf3e3b3aa26b40703ba77495dab41da839ae72ef3c8e5d8e2433289"}, ] [package.dependencies] @@ -1773,13 +1752,13 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "requests-oauthlib" -version = "1.3.1" +version = "2.0.0" description = "OAuthlib authentication support for Requests." optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.4" files = [ - {file = "requests-oauthlib-1.3.1.tar.gz", hash = "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a"}, - {file = "requests_oauthlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5"}, + {file = "requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9"}, + {file = "requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36"}, ] [package.dependencies] @@ -1913,20 +1892,20 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruamel-yaml" -version = "0.17.35" +version = "0.18.6" description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" optional = false -python-versions = ">=3" +python-versions = ">=3.7" files = [ - {file = "ruamel.yaml-0.17.35-py3-none-any.whl", hash = "sha256:b105e3e6fc15b41fdb201ba1b95162ae566a4ef792b9f884c46b4ccc5513a87a"}, - {file = "ruamel.yaml-0.17.35.tar.gz", hash = "sha256:801046a9caacb1b43acc118969b49b96b65e8847f29029563b29ac61d02db61b"}, + {file = "ruamel.yaml-0.18.6-py3-none-any.whl", hash = "sha256:57b53ba33def16c4f3d807c0ccbc00f8a6081827e81ba2491691b76882d0c636"}, + {file = "ruamel.yaml-0.18.6.tar.gz", hash = "sha256:8b27e6a217e786c6fbe5634d8f3f11bc63e0f80f6a5890f28863d9c45aac311b"}, ] [package.dependencies] "ruamel.yaml.clib" = {version = ">=0.2.7", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.13\""} [package.extras] -docs = ["ryd"] +docs = ["mercurial (>5.7)", "ryd"] jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"] [[package]] @@ -1990,13 +1969,13 @@ files = [ [[package]] name = "s3transfer" -version = "0.10.0" +version = "0.10.1" description = "An Amazon S3 Transfer Manager" optional = false python-versions = ">= 3.8" files = [ - {file = "s3transfer-0.10.0-py3-none-any.whl", hash = "sha256:3cdb40f5cfa6966e812209d0994f2a4709b561c88e90cf00c2696d2df4e56b2e"}, - {file = "s3transfer-0.10.0.tar.gz", hash = "sha256:d0c8bbf672d5eebbe4e57945e23b972d963f07d82f661cabf678a5c88831595b"}, + {file = "s3transfer-0.10.1-py3-none-any.whl", hash = "sha256:ceb252b11bcf87080fb7850a224fb6e05c8a776bab8f2b64b7f25b969464839d"}, + {file = "s3transfer-0.10.1.tar.gz", hash = "sha256:5683916b4c724f799e600f41dd9e10a9ff19871bf87623cc8f491cb4f5fa0a19"}, ] [package.dependencies] @@ -2007,42 +1986,43 @@ crt = ["botocore[crt] (>=1.33.2,<2.0a.0)"] [[package]] name = "setuptools" -version = "69.1.1" +version = "69.5.1" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-69.1.1-py3-none-any.whl", hash = "sha256:02fa291a0471b3a18b2b2481ed902af520c69e8ae0919c13da936542754b4c56"}, - {file = "setuptools-69.1.1.tar.gz", hash = "sha256:5c0806c7d9af348e6dd3777b4f4dbb42c7ad85b190104837488eab9a7c945cf8"}, + {file = "setuptools-69.5.1-py3-none-any.whl", hash = "sha256:c636ac361bc47580504644275c9ad802c50415c7522212252c033bd15f301f32"}, + {file = "setuptools-69.5.1.tar.gz", hash = "sha256:6c1fccdac05a97e598fb0ae3bbed5904ccb317337a51139dcd51453611bbb987"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "shellcheck-py" -version = "0.9.0.6" +version = "0.10.0.1" description = "Python wrapper around invoking shellcheck (https://www.shellcheck.net/)" optional = false python-versions = ">=3.8" files = [ - {file = "shellcheck_py-0.9.0.6-py2.py3-none-macosx_11_0_x86_64.whl", hash = "sha256:38d48a4e2279f5deac374574e7625cd53b7f615301f36b1b1fffd22105dc066d"}, - {file = "shellcheck_py-0.9.0.6-py2.py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:730235c4f92657884f8b343d5426e4dc28e9a6ba9ad54d469cd038e340ea5be0"}, - {file = "shellcheck_py-0.9.0.6-py2.py3-none-win_amd64.whl", hash = "sha256:d1d0c285e2c094813659e0920559a2892da598c1176da59cb4eb9e2f505e5ee8"}, - {file = "shellcheck_py-0.9.0.6.tar.gz", hash = "sha256:f83a0ee1e9762f787ab52e8a906e553b9583586c44e3f9730b6e635f296a69e8"}, + {file = "shellcheck_py-0.10.0.1-py2.py3-none-macosx_11_0_x86_64.whl", hash = "sha256:48f08965cafbb3363b265c4ef40628ffced19cb6fc7c4bb5ce72d32cbcfb4bb9"}, + {file = "shellcheck_py-0.10.0.1-py2.py3-none-macosx_14_0_arm64.whl", hash = "sha256:8f3bf12ee6d0845dd5ac1a7bac8c4b1fec0379e115950986883c9488af40ada7"}, + {file = "shellcheck_py-0.10.0.1-py2.py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1c266f7f54cd286057c592ead3095f93d123acdcabf048879a7d8900c3aac7b"}, + {file = "shellcheck_py-0.10.0.1-py2.py3-none-win_amd64.whl", hash = "sha256:be73a16931c05f79643ff74b6519d1e1203b394583ab8c68a48a8e7f257d1090"}, + {file = "shellcheck_py-0.10.0.1.tar.gz", hash = "sha256:390826b340b8c19173922b0da5ef7b66ef34d4d087dc48aad3e01f7e77e164d9"}, ] [[package]] name = "shortuuid" -version = "1.0.11" +version = "1.0.13" description = "A generator library for concise, unambiguous and URL-safe UUIDs." optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" files = [ - {file = "shortuuid-1.0.11-py3-none-any.whl", hash = "sha256:27ea8f28b1bd0bf8f15057a3ece57275d2059d2b0bb02854f02189962c13b6aa"}, - {file = "shortuuid-1.0.11.tar.gz", hash = "sha256:fc75f2615914815a8e4cb1501b3a513745cb66ef0fd5fc6fb9f8c3fa3481f789"}, + {file = "shortuuid-1.0.13-py3-none-any.whl", hash = "sha256:a482a497300b49b4953e15108a7913244e1bb0d41f9d332f5e9925dba33a3c5a"}, + {file = "shortuuid-1.0.13.tar.gz", hash = "sha256:3bb9cf07f606260584b1df46399c0b87dd84773e7b25912b7e391e30797c5e72"}, ] [[package]] @@ -2088,17 +2068,18 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] [[package]] name = "tenacity" -version = "8.2.3" +version = "8.3.0" description = "Retry code until it succeeds" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tenacity-8.2.3-py3-none-any.whl", hash = "sha256:ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c"}, - {file = "tenacity-8.2.3.tar.gz", hash = "sha256:5398ef0d78e63f40007c1fb4c0bff96e1911394d2fa8d194f77619c05ff6cc8a"}, + {file = "tenacity-8.3.0-py3-none-any.whl", hash = "sha256:3649f6443dbc0d9b01b9d8020a9c4ec7a1ff5f6f3c6c8a036ef371f573fe9185"}, + {file = "tenacity-8.3.0.tar.gz", hash = "sha256:953d4e6ad24357bceffbc9707bc74349aca9d245f68eb65419cf0c249a1949a2"}, ] [package.extras] -doc = ["reno", "sphinx", "tornado (>=4.5)"] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] [[package]] name = "tomli" @@ -2124,28 +2105,28 @@ files = [ [[package]] name = "traitlets" -version = "5.14.1" +version = "5.14.3" description = "Traitlets Python configuration system" optional = false python-versions = ">=3.8" files = [ - {file = "traitlets-5.14.1-py3-none-any.whl", hash = "sha256:2e5a030e6eff91737c643231bfcf04a65b0132078dad75e4936700b213652e74"}, - {file = "traitlets-5.14.1.tar.gz", hash = "sha256:8585105b371a04b8316a43d5ce29c098575c2e477850b62b848b964f1444527e"}, + {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, + {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, ] [package.extras] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] -test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] [[package]] name = "typing-extensions" -version = "4.9.0" +version = "4.11.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"}, - {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"}, + {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, + {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, ] [[package]] @@ -2192,17 +2173,17 @@ files = [ [[package]] name = "websocket-client" -version = "1.7.0" +version = "1.8.0" description = "WebSocket client for Python with low level API options" optional = false python-versions = ">=3.8" files = [ - {file = "websocket-client-1.7.0.tar.gz", hash = "sha256:10e511ea3a8c744631d3bd77e61eb17ed09304c413ad42cf6ddfa4c7787e8fe6"}, - {file = "websocket_client-1.7.0-py3-none-any.whl", hash = "sha256:f4c3d22fec12a2461427a29957ff07d35098ee2d976d3ba244e688b8b4057588"}, + {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, + {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, ] [package.extras] -docs = ["Sphinx (>=6.0)", "sphinx-rtd-theme (>=1.1.0)"] +docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"] optional = ["python-socks", "wsaccel"] test = ["websockets"] @@ -2290,4 +2271,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "0884e699f8c06fc88c22169d9348a6573f12eb7870a6ebf72a260de47c5ac6c9" +content-hash = "a09f89456b47ef14e1d9da1d5f87ac855dd6aacaaa3e2bbd0f4f7b1dc530192c" diff --git a/pyproject.toml b/pyproject.toml index 68d558240..3ac373e53 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,79 +10,78 @@ authors = [] [tool.poetry.dependencies] python = "^3.10" -ops = "^2.12.0" -tenacity = "^8.2.2" +ops = "^2.13.0" +tenacity = "^8.2.3" boto3 = "^1.28.22" -jinja2 = "^3.1.2" -overrides = "7.4.0" -requests = "2.31.0" +overrides = "^7.7.0" +requests = "2.32.2" # Official name: ruamel.yaml, but due to Poetry GH#109 - replace dots with dashs -ruamel-yaml = "0.17.35" -shortuuid = "1.0.11" +ruamel-yaml = "0.18.6" +shortuuid = "1.0.13" jproperties = "2.1.1" pydantic = "^1.10, <2" -cryptography = "^42.0.2" +cryptography = "^42.0.7" jsonschema = "^4.21.1" poetry-core = "^1.9.0" [tool.poetry.group.charm-libs.dependencies] # data_platform_libs/v0/data_interfaces.py -ops = ">=2.0.0" +ops = "^2.13.0" # data_platform_libs/v0/upgrade.py # grafana_agent/v0/cos_agent.py requires pydantic <2 pydantic = "^1.10, <2" # tls_certificates_interface/v1/tls_certificates.py -cryptography = "^42.0.2" +cryptography = "^42.0.5" jsonschema = "^4.21.1" # grafana_agent/v0/cos_agent.py -cosl = ">=0.0.7" -bcrypt = ">=4.0.1" +cosl = "^0.0.11" +bcrypt = "^4.1.3" [tool.poetry.group.format] optional = true [tool.poetry.group.format.dependencies] -black = "^23.7.0" -isort = "^5.12.0" +black = "^24.4.2" +isort = "^5.13.2" [tool.poetry.group.lint] optional = true [tool.poetry.group.lint.dependencies] -black = "^23.7.0" -isort = "^5.12.0" -flake8 = "^6.0.0" +black = "^24.4.2" +isort = "^5.13.2" +flake8 = "^7.0.0" flake8-docstrings = "^1.7.0" flake8-copyright = "^0.2.4" -flake8-builtins = "^2.1.0" -pyproject-flake8 = "^6.0.0.post1" +flake8-builtins = "^2.5.0" +pyproject-flake8 = "^7.0.0" pep8-naming = "^0.13.3" -codespell = "^2.2.5" -shellcheck-py = "^0.9.0.5" +codespell = "^2.2.6" +shellcheck-py = "^0.10.0.1" [tool.poetry.group.unit.dependencies] -pytest = "^7.4.0" -pytest-asyncio = "<0.23" -coverage = {extras = ["toml"], version = "^7.4.1"} +pytest = "^8.2.1" +pytest-asyncio = "^0.21.2" +coverage = {extras = ["toml"], version = "^7.5.1"} parameterized = "^0.9.0" [tool.poetry.group.integration.dependencies] -boto3 = "^1.28.23" -pytest = "^7.4.0" -pytest-github-secrets = {git = "https://github.com/canonical/data-platform-workflows", tag = "v13.1.1", subdirectory = "python/pytest_plugins/github_secrets"} -pytest-asyncio = "<0.23" -pytest-operator = "^0.32.0" -pytest-operator-cache = {git = "https://github.com/canonical/data-platform-workflows", tag = "v13.1.1", subdirectory = "python/pytest_plugins/pytest_operator_cache"} -pytest-operator-groups = {git = "https://github.com/canonical/data-platform-workflows", tag = "v13.1.1", subdirectory = "python/pytest_plugins/pytest_operator_groups"} -pytest-microceph = {git = "https://github.com/canonical/data-platform-workflows", tag = "v13.1.1", subdirectory = "python/pytest_plugins/microceph"} -juju = "^3.2.2" -ops = "^2.5.0" -tenacity = "^8.2.2" +boto3 = "^1.34.112" +pytest = "^8.2.1" +pytest-github-secrets = {git = "https://github.com/canonical/data-platform-workflows", tag = "v13.1.2", subdirectory = "python/pytest_plugins/github_secrets"} +pytest-asyncio = "^0.21.2" +pytest-operator = "^0.35.0" +pytest-operator-cache = {git = "https://github.com/canonical/data-platform-workflows", tag = "v13.1.2", subdirectory = "python/pytest_plugins/pytest_operator_cache"} +pytest-operator-groups = {git = "https://github.com/canonical/data-platform-workflows", tag = "v13.1.2", subdirectory = "python/pytest_plugins/pytest_operator_groups"} +pytest-microceph = {git = "https://github.com/canonical/data-platform-workflows", tag = "v13.1.2", subdirectory = "python/pytest_plugins/microceph"} +juju = "^3.4.0.0" +ops = "^2.13.0" +tenacity = "^8.3.0" pyyaml = "^6.0.1" -urllib3 = "^1.26.16" -protobuf = "3.20.0" -opensearch-py = "^2.4.2" +urllib3 = "^1.26.18" +protobuf = "^5.27.0" +opensearch-py = "^2.5.0" [tool.coverage.run] branch = true diff --git a/src/charm.py b/src/charm.py index 67e2e71a2..cb25697f7 100755 --- a/src/charm.py +++ b/src/charm.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Charmed Machine Operator for OpenSearch.""" diff --git a/src/lifecycle.py b/src/lifecycle.py index c8ec9da46..c13791cdf 100644 --- a/src/lifecycle.py +++ b/src/lifecycle.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Charm lifecycle diff --git a/src/machine_upgrade.py b/src/machine_upgrade.py index 06218bdef..30e6cbdb6 100644 --- a/src/machine_upgrade.py +++ b/src/machine_upgrade.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """In-place upgrades on machines @@ -179,7 +179,9 @@ def authorized(self) -> bool: or state is not upgrade.UnitState.HEALTHY ): # Waiting for higher number units to upgrade + logger.debug(f"Upgrade not authorized. Waiting for {unit.name=} to upgrade") return False + logger.debug(f"Upgrade not authorized. Waiting for {unit.name=} to upgrade") return False def upgrade_unit(self, *, snap: OpenSearchSnap) -> None: diff --git a/src/opensearch.py b/src/opensearch.py index d4ade92dc..95bfd1b22 100644 --- a/src/opensearch.py +++ b/src/opensearch.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """In this class we manage opensearch distributions specific to the VM charm. diff --git a/src/status_exception.py b/src/status_exception.py index f3cc6dd7c..e7b94ea83 100644 --- a/src/status_exception.py +++ b/src/status_exception.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Exception with ops status""" diff --git a/src/upgrade.py b/src/upgrade.py index a42521fc7..2143d5b48 100644 --- a/src/upgrade.py +++ b/src/upgrade.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """In-place upgrades @@ -17,12 +17,16 @@ import ops import poetry.core.constraints.version as poetry_version +from charms.opensearch.v0.helper_cluster import ClusterTopology from charms.opensearch.v0.opensearch_distro import OpenSearchDistribution +from charms.opensearch.v0.opensearch_exceptions import OpenSearchHttpError +from charms.opensearch.v0.opensearch_health import HealthColors import status_exception logger = logging.getLogger(__name__) + PEER_RELATION_ENDPOINT_NAME = "upgrade-version-a" PRECHECK_ACTION_NAME = "pre-upgrade-check" RESUME_ACTION_NAME = "resume-upgrade" @@ -66,6 +70,7 @@ def __init__(self, charm_: ops.CharmBase) -> None: if not relations: raise PeerRelationNotReady assert len(relations) == 1 + self._charm = charm_ self._peer_relation = relations[0] self._unit: ops.Unit = charm_.unit self._unit_databag = self._peer_relation.data[self._unit] @@ -262,7 +267,29 @@ def pre_upgrade_check(self) -> None: See https://chat.canonical.com/canonical/pl/cmf6uhm1rp8b7k8gkjkdsj4mya """ logger.debug("Running pre-upgrade checks") - # TODO: implement checks - # e.g. - # if health != green: - # raise PrecheckFailed("Cluster is not healthy") + + try: + health = self._charm.health.get( + local_app_only=False, + wait_for_green_first=True, + ) + if health != HealthColors.GREEN: + raise PrecheckFailed(f"Cluster health is {health} instead of green") + + online_nodes = ClusterTopology.nodes( + self._charm.opensearch, + True, + hosts=self._charm.alt_hosts, + ) + if ( + not self._charm.is_every_unit_marked_as_started() + or len([node for node in online_nodes if node.app_name == self._charm.app.name]) + < self._charm.app.planned_units() + ): + raise PrecheckFailed("Not all units are online for the current app.") + + if not self._charm.backup.is_idle_or_not_set(): + raise PrecheckFailed("Backup or restore is in progress") + + except OpenSearchHttpError: + raise PrecheckFailed("Cluster is unreachable") diff --git a/src/utils.py b/src/utils.py index bcaa8d5de..d4655e10f 100644 --- a/src/utils.py +++ b/src/utils.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Utility functions.""" diff --git a/tests/helpers.py b/tests/helpers.py index 111ebac7b..e1a4cdc99 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import shutil from datetime import datetime, timedelta diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index db3bfe1a6..e3979c0f6 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -1,2 +1,2 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. diff --git a/tests/integration/ha/__init__.py b/tests/integration/ha/__init__.py index db3bfe1a6..e3979c0f6 100644 --- a/tests/integration/ha/__init__.py +++ b/tests/integration/ha/__init__.py @@ -1,2 +1,2 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. diff --git a/tests/integration/ha/conftest.py b/tests/integration/ha/conftest.py index 93a2511f6..073e9d308 100644 --- a/tests/integration/ha/conftest.py +++ b/tests/integration/ha/conftest.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import logging diff --git a/tests/integration/ha/continuous_writes.py b/tests/integration/ha/continuous_writes.py index d8153b076..6e9d2f23f 100644 --- a/tests/integration/ha/continuous_writes.py +++ b/tests/integration/ha/continuous_writes.py @@ -1,11 +1,11 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import asyncio import logging import os import time -from multiprocessing import Event, Process, Queue +from multiprocessing import Event, Process, Queue, log_to_stderr from types import SimpleNamespace from typing import Optional @@ -48,7 +48,7 @@ def __init__(self, ops_test: OpsTest, app: str, initial_count: int = 0): wait=wait_fixed(wait=5) + wait_random(0, 5), stop=stop_after_attempt(5), ) - async def start(self, repl_on_all_nodes: bool = False) -> None: + async def start(self, repl_on_all_nodes: bool = False, is_bulk: bool = True) -> None: """Run continuous writes in the background.""" if not self._is_stopped: await self.clear() @@ -58,7 +58,7 @@ async def start(self, repl_on_all_nodes: bool = False) -> None: await self._create_fully_replicated_index() # create process - self._create_process() + self._create_process(is_bulk=is_bulk) # put data (hosts, password) in the process queue await self.update() @@ -87,7 +87,7 @@ async def clear(self) -> None: client = await self._client() try: - client.indices.delete(index=ContinuousWrites.INDEX_NAME) + client.indices.delete(index=ContinuousWrites.INDEX_NAME, ignore_unavailable=True) finally: client.close() @@ -137,6 +137,7 @@ async def _create_fully_replicated_index(self): body={ "settings": {"index": {"number_of_shards": 2, "auto_expand_replicas": "1-all"}} }, + wait_for_active_shards="all", ) finally: client.close() @@ -169,25 +170,30 @@ async def stop(self) -> SimpleNamespace: return result - def _create_process(self): + def _create_process(self, is_bulk: bool = True): self._is_stopped = False self._event = Event() self._queue = Queue() self._process = Process( target=ContinuousWrites._run_async, name="continuous_writes", - args=(self._event, self._queue, self._initial_count, True), + args=(self._event, self._queue, self._initial_count, is_bulk), ) def _stop_process(self): + if self._is_stopped or not self._process.is_alive(): + self._is_stopped = True + return + self._event.set() self._process.join() self._queue.close() + self._process.terminate() self._is_stopped = True async def _secrets(self) -> str: """Fetch secrets and return the password.""" - secrets = await get_secrets(self._ops_test) + secrets = await get_secrets(self._ops_test, app=self._app) with open(ContinuousWrites.CERT_PATH, "w") as chain: chain.write(secrets["ca-chain"]) @@ -211,6 +217,8 @@ async def _run( # noqa: C901 event: Event, data_queue: Queue, starting_number: int, is_bulk: bool ) -> None: """Continuous writing.""" + proc_logger = log_to_stderr() + proc_logger.setLevel(logging.INFO) def _client(_data) -> OpenSearch: return opensearch_client( @@ -219,6 +227,10 @@ def _client(_data) -> OpenSearch: write_value = starting_number + proc_logger.info( + f"Starting continuous writes from {write_value} with is_bulk={is_bulk}..." + ) + data = data_queue.get(True) client = _client(data) @@ -235,8 +247,9 @@ def _client(_data) -> OpenSearch: ContinuousWrites._index(client, write_value) # todo: remove when we get bigger runners (to reduce data transfer time) - time.sleep(0.75) + time.sleep(1) except BulkIndexError: + proc_logger.info(f"Bulk failed for {write_value}") continue except (TransportError, ConnectionRefusedError): client.close() @@ -245,6 +258,7 @@ def _client(_data) -> OpenSearch: except (TransportError, ConnectionRefusedError): pass + proc_logger.info(f"Transport or Conn Refused error for {write_value}") continue finally: # process termination requested @@ -256,7 +270,7 @@ def _client(_data) -> OpenSearch: # write last expected written value on disk with open(ContinuousWrites.LAST_WRITTEN_VAL_PATH, "w") as f: if is_bulk: - write_value = (100 * write_value) + 99 + write_value += 99 f.write(str(write_value)) os.fsync(f) @@ -269,7 +283,7 @@ def _bulk(client: OpenSearch, write_value: int) -> None: """Bulk Index group of docs.""" data = [] for i in range(100): - val = (100 * write_value) + i + val = write_value + i data.append( { "_index": ContinuousWrites.INDEX_NAME, diff --git a/tests/integration/ha/helpers.py b/tests/integration/ha/helpers.py index 2e57798cb..ffe847da9 100644 --- a/tests/integration/ha/helpers.py +++ b/tests/integration/ha/helpers.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import asyncio @@ -22,6 +22,7 @@ ) from ..helpers import ( + APP_NAME, get_application_unit_ids, get_application_unit_ids_hostnames, get_application_unit_ids_ips, @@ -45,12 +46,13 @@ class Shard: """Class for holding a shard.""" - def __init__(self, index: str, num: int, is_prim: bool, node_id: str, unit_id: int): + def __init__(self, index: str, num: int, is_prim: bool, node_id: str, unit_id: int, app: str): self.index = index self.num = num self.is_prim = is_prim self.node_id = node_id self.unit_id = unit_id + self.app = app async def app_name(ops_test: OpsTest) -> Optional[str]: @@ -60,12 +62,20 @@ async def app_name(ops_test: OpsTest) -> Optional[str]: application name "opensearch". Note: if multiple clusters are running OpenSearch this will return the one first found. """ - status = await ops_test.model.get_status() - for app in ops_test.model.applications: - if "opensearch" in status["applications"][app]["charm"]: - return app + apps = json.loads( + subprocess.check_output( + f"juju status --model {ops_test.model.info.name} --format=json".split() + ) + )["applications"] + + opensearch_apps = { + name: desc for name, desc in apps.items() if desc["charm-name"] == "opensearch" + } + for name, desc in opensearch_apps.items(): + if name == "opensearch-main": + return name - return None + return list(opensearch_apps.keys())[0] if opensearch_apps else None @retry( @@ -146,14 +156,15 @@ async def get_shards_by_index(ops_test: OpsTest, unit_ip: str, index_name: str) result = [] for shards_collection in response["shards"]: for shard in shards_collection: - unit_id = int(nodes[shard["node"]]["name"].split("-")[1]) + node_name_split = nodes[shard["node"]]["name"].split("-") result.append( Shard( index=index_name, num=shard["shard"], is_prim=shard["primary"], node_id=shard["node"], - unit_id=unit_id, + unit_id=int(node_name_split[-1]), + app="-".join(node_name_split[:-1]), ) ) @@ -191,12 +202,13 @@ async def get_number_of_shards_by_node(ops_test: OpsTest, unit_ip: str) -> Dict[ wait=wait_fixed(wait=15) + wait_random(0, 5), stop=stop_after_attempt(25), ) -async def all_nodes(ops_test: OpsTest, unit_ip: str) -> List[Node]: +async def all_nodes(ops_test: OpsTest, unit_ip: str, app: str = APP_NAME) -> List[Node]: """Fetch all cluster nodes.""" response = await http_request( ops_test, "GET", f"https://{unit_ip}:9200/_nodes", + app=app, ) nodes = response.get("nodes", {}) @@ -226,7 +238,7 @@ async def assert_continuous_writes_increasing( async def assert_continuous_writes_consistency( - ops_test: OpsTest, c_writes: ContinuousWrites, app: str + ops_test: OpsTest, c_writes: ContinuousWrites, apps: List[str] ) -> None: """Continuous writes checks.""" result = await c_writes.stop() @@ -234,24 +246,26 @@ async def assert_continuous_writes_consistency( assert result.max_stored_id == result.count - 1 assert result.max_stored_id == result.last_expected_id - # investigate the data in each shard, primaries and their respective replicas - units_ips = await get_application_unit_ids_ips(ops_test, app) - shards = await get_shards_by_index( - ops_test, list(units_ips.values())[0], ContinuousWrites.INDEX_NAME - ) + unit_ip = await get_leader_unit_ip(ops_test, apps[0]) + # fetch unit ips by unit id by application + apps_units_ips = {app: await get_application_unit_ids_ips(ops_test, app) for app in apps} + + # investigate the data in each shard, primaries and their respective replicas + shards = await get_shards_by_index(ops_test, unit_ip, ContinuousWrites.INDEX_NAME) shards_by_id = {} for shard in shards: shards_by_id.setdefault(shard.num, []).append(shard) - # count data on each shard. For the continuous writes index, we have 2 primary shards - # and replica shards of each on all the nodes. In other words: prim1 and its replicas - # will have a different "num" than prim2 and its replicas. + # count data on each shard. For the **balanced** continuous writes index, we have 2 + # primary shards and replica shards of each on all the nodes. In other words: prim1 and + # its replicas will have a different "num" than prim2 and its replicas. count_from_shards = 0 for shard_num, shards_list in shards_by_id.items(): count_by_shard = [ await c_writes.count( - units_ips[shard.unit_id], preference=f"_shards:{shard_num}|_only_local" + unit_ip=apps_units_ips[shard.app][shard.unit_id], + preference=f"_shards:{shard_num}|_only_local", ) for shard in shards_list ] @@ -461,13 +475,15 @@ async def print_logs(ops_test: OpsTest, app: str, unit_id: int, msg: str) -> str return msg -async def wait_for_backup_system_to_settle(ops_test: OpsTest, leader_id: int, unit_ip: str): +async def wait_for_backup_system_to_settle( + ops_test: OpsTest, leader_id: int, unit_ip: str, app: str = APP_NAME +): """Waits the backup to finish and move to the finished state or throws a RetryException.""" for attempt in Retrying(stop=stop_after_attempt(8), wait=wait_fixed(15)): with attempt: # First, check if current backups are finished action = await run_action( - ops_test, leader_id, "list-backups", params={"output": "json"} + ops_test, leader_id, "list-backups", params={"output": "json"}, app=app ) # Expected format: # namespace(status='completed', response={'return-code': 0, 'backups': '{"1": ...}'}) @@ -501,7 +517,9 @@ async def delete_backup(ops_test: OpsTest, backup_id: int) -> None: ) -async def start_and_check_continuous_writes(ops_test: OpsTest, unit_ip: str, app: str) -> bool: +async def assert_start_and_check_continuous_writes( + ops_test: OpsTest, unit_ip: str, app: str +) -> None: """Start continuous writes and check that documents are increasing after some time. Given we are restoring an index, we need to make sure ContinuousWrites restart at @@ -515,53 +533,58 @@ async def start_and_check_continuous_writes(ops_test: OpsTest, unit_ip: str, app ) writer = ContinuousWrites(ops_test, app, initial_count=initial_count) await writer.start() - time.sleep(60) + time.sleep(10) # Ensure we have writes happening and the index is consistent at the end await assert_continuous_writes_increasing(writer) - await assert_continuous_writes_consistency(ops_test, writer, app) + await assert_continuous_writes_consistency(ops_test, writer, [app]) # Clear the writer manually, as we are not using the conftest c_writes_runner to do so await writer.clear() -async def create_backup(ops_test: OpsTest, leader_id: int, unit_ip: str) -> str: +async def create_backup( + ops_test: OpsTest, leader_id: int, unit_ip: str, app: str = APP_NAME +) -> str: """Runs the backup of the cluster.""" - action = await run_action(ops_test, leader_id, "create-backup") + action = await run_action(ops_test, leader_id, "create-backup", app=app) logger.debug(f"create-backup output: {action}") - await wait_for_backup_system_to_settle(ops_test, leader_id, unit_ip) + await wait_for_backup_system_to_settle(ops_test, leader_id, unit_ip, app=app) assert action.status == "completed" assert action.response["status"] == "Backup is running." return action.response["backup-id"] -async def restore(ops_test: OpsTest, backup_id: str, unit_ip: str, leader_id: int) -> bool: +async def restore( + ops_test: OpsTest, backup_id: str, unit_ip: str, leader_id: int, app: str = APP_NAME +) -> bool: """Restores a backup.""" - action = await run_action(ops_test, leader_id, "restore", params={"backup-id": backup_id}) + action = await run_action( + ops_test, leader_id, "restore", params={"backup-id": backup_id}, app=app + ) logger.debug(f"restore output: {action}") - await wait_for_backup_system_to_settle(ops_test, leader_id, unit_ip) + await wait_for_backup_system_to_settle(ops_test, leader_id, unit_ip, app=app) return action.status == "completed" -async def list_backups(ops_test: OpsTest, leader_id: int) -> Dict[str, str]: - action = await run_action(ops_test, leader_id, "list-backups", params={"output": "json"}) +async def list_backups(ops_test: OpsTest, leader_id: int, app: str = APP_NAME) -> Dict[str, str]: + action = await run_action( + ops_test, leader_id, "list-backups", params={"output": "json"}, app=app + ) assert action.status == "completed" return json.loads(action.response["backups"]) async def assert_restore_indices_and_compare_consistency( - ops_test: OpsTest, app: str, leader_id: int, unit_ip: str, backup_id: int + ops_test: OpsTest, app: str, leader_id: int, unit_ip: str, backup_id: str ) -> None: - """Ensures that continuous writes index has at least the value below. - - assert new_count >= * (1 - loss) documents. - """ + """Ensures that continuous writes index has at least the value below.""" original_count = await index_docs_count(ops_test, app, unit_ip, ContinuousWrites.INDEX_NAME) # As stated on: https://discuss.elastic.co/t/how-to-parse-snapshot-dat-file/218888, # the only way to discover the documents in a backup is to recover it and check # on opensearch. # The logic below will run over each backup id, restore it and ensure continuous writes # index loss is within the "loss" parameter. - assert await restore(ops_test, backup_id, unit_ip, leader_id) + assert await restore(ops_test, backup_id, unit_ip, leader_id, app=app) new_count = await index_docs_count(ops_test, app, unit_ip, ContinuousWrites.INDEX_NAME) logger.info( f"Testing restore for {ContinuousWrites.INDEX_NAME} - " @@ -569,4 +592,4 @@ async def assert_restore_indices_and_compare_consistency( ) # We expect that new_count has a loss of documents and the numbers are different. # Check if we have data but not all of it. - assert new_count > 0 and new_count < original_count + assert 0 < new_count < original_count diff --git a/tests/integration/ha/helpers_data.py b/tests/integration/ha/helpers_data.py index 254b6fe24..7118a3c47 100644 --- a/tests/integration/ha/helpers_data.py +++ b/tests/integration/ha/helpers_data.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Helper functions for data related tests, such as indexing, searching etc..""" diff --git a/tests/integration/ha/test_backups.py b/tests/integration/ha/test_backups.py index 84fc203c8..03197fcbc 100644 --- a/tests/integration/ha/test_backups.py +++ b/tests/integration/ha/test_backups.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Tests for the OpenSearch charm with backups and restores. @@ -25,7 +25,11 @@ import boto3 import pytest -from charms.opensearch.v0.constants_charm import OPENSEARCH_BACKUP_ID_FORMAT +from charms.opensearch.v0.constants_charm import ( + OPENSEARCH_BACKUP_ID_FORMAT, + BackupSetupFailed, + S3RelMissing, +) from charms.opensearch.v0.opensearch_backups import S3_REPOSITORY from pytest_operator.plugin import OpsTest @@ -47,15 +51,56 @@ assert_continuous_writes_consistency, assert_continuous_writes_increasing, assert_restore_indices_and_compare_consistency, + assert_start_and_check_continuous_writes, create_backup, list_backups, restore, - start_and_check_continuous_writes, ) from .helpers_data import index_docs_count logger = logging.getLogger(__name__) +DEPLOY_CLOUD_GROUP_MARKS = [ + ( + pytest.param( + cloud_name, + deploy_type, + id=f"{cloud_name}-{deploy_type}", + marks=pytest.mark.group(f"{cloud_name}-{deploy_type}"), + ) + ) + for cloud_name in ["microceph", "aws"] + for deploy_type in ["large", "small"] +] + + +DEPLOY_SMALL_ONLY_CLOUD_GROUP_MARKS = [ + ( + pytest.param( + cloud_name, + deploy_type, + id=f"{cloud_name}-{deploy_type}", + marks=pytest.mark.group(f"{cloud_name}-{deploy_type}"), + ) + ) + for cloud_name in ["microceph", "aws"] + for deploy_type in ["small"] +] + + +DEPLOY_LARGE_ONLY_CLOUD_GROUP_MARKS = [ + ( + pytest.param( + cloud_name, + deploy_type, + id=f"{cloud_name}-{deploy_type}", + marks=pytest.mark.group(f"{cloud_name}-{deploy_type}"), + ) + ) + for cloud_name in ["microceph", "aws"] + for deploy_type in ["large"] +] + S3_INTEGRATOR = "s3-integrator" S3_INTEGRATOR_CHANNEL = "latest/edge" TIMEOUT = 10 * 60 @@ -185,21 +230,12 @@ async def _configure_s3( ) -S3_INTEGRATOR = "s3-integrator" -S3_INTEGRATOR_CHANNEL = "latest/edge" -TIMEOUT = 10 * 60 - - -@pytest.mark.parametrize( - "cloud_name", - [ - (pytest.param("microceph", marks=pytest.mark.group("microceph"))), - (pytest.param("aws", marks=pytest.mark.group("aws"))), - ], -) +@pytest.mark.parametrize("cloud_name,deploy_type", DEPLOY_SMALL_ONLY_CLOUD_GROUP_MARKS) @pytest.mark.abort_on_fail @pytest.mark.skip_if_deployed -async def test_build_and_deploy(ops_test: OpsTest, cloud_name: Dict[str, Dict[str, str]]) -> None: +async def test_small_deployment_build_and_deploy( + ops_test: OpsTest, cloud_name: str, deploy_type: str +) -> None: """Build and deploy an HA cluster of OpenSearch and corresponding S3 integration.""" if await app_name(ops_test): return @@ -228,13 +264,177 @@ async def test_build_and_deploy(ops_test: OpsTest, cloud_name: Dict[str, Dict[st await ops_test.model.integrate(APP_NAME, S3_INTEGRATOR) -@pytest.mark.parametrize( - "cloud_name", - [ - (pytest.param("microceph", marks=pytest.mark.group("microceph"))), - (pytest.param("aws", marks=pytest.mark.group("aws"))), - ], -) +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "xlarge"]) +@pytest.mark.parametrize("cloud_name,deploy_type", DEPLOY_LARGE_ONLY_CLOUD_GROUP_MARKS) +@pytest.mark.abort_on_fail +@pytest.mark.skip_if_deployed +async def test_large_deployment_build_and_deploy( + ops_test: OpsTest, cloud_name: str, deploy_type: str +) -> None: + """Build and deploy a large deployment for OpenSearch. + + The following apps will be deployed: + * main: the main orchestrator + * failover: the failover orchestrator + * opensearch (or APP_NAME): the data.hot node + + The data node is selected to adopt the "APP_NAME" value because it is the node which + ContinuousWrites will later target its writes to. + """ + await ops_test.model.set_config(MODEL_CONFIG) + # Deploy TLS Certificates operator. + tls_config = {"ca-common-name": "CN_CA"} + + my_charm = await ops_test.build_charm(".") + + main_orchestrator_conf = { + "cluster_name": "backup-test", + "init_hold": False, + "roles": "cluster_manager", + } + failover_orchestrator_conf = { + "cluster_name": "backup-test", + "init_hold": True, + "roles": "cluster_manager", + } + data_hot_conf = {"cluster_name": "backup-test", "init_hold": True, "roles": "data.hot"} + + await asyncio.gather( + ops_test.model.deploy(TLS_CERTIFICATES_APP_NAME, channel="stable", config=tls_config), + ops_test.model.deploy(S3_INTEGRATOR, channel=S3_INTEGRATOR_CHANNEL), + ops_test.model.deploy( + my_charm, + application_name="main", + num_units=1, + series=SERIES, + config=main_orchestrator_conf, + ), + ops_test.model.deploy( + my_charm, + application_name="failover", + num_units=2, + series=SERIES, + config=failover_orchestrator_conf, + ), + ops_test.model.deploy( + my_charm, application_name=APP_NAME, num_units=1, series=SERIES, config=data_hot_conf + ), + ) + + # Large deployment setup + await ops_test.model.integrate("main:peer-cluster-orchestrator", "failover:peer-cluster") + await ops_test.model.integrate("main:peer-cluster-orchestrator", f"{APP_NAME}:peer-cluster") + await ops_test.model.integrate( + "failover:peer-cluster-orchestrator", f"{APP_NAME}:peer-cluster" + ) + + # TLS setup + await ops_test.model.integrate("main", TLS_CERTIFICATES_APP_NAME) + await ops_test.model.integrate("failover", TLS_CERTIFICATES_APP_NAME) + await ops_test.model.integrate(APP_NAME, TLS_CERTIFICATES_APP_NAME) + + # Charms except s3-integrator should be active + await wait_until( + ops_test, + apps=[TLS_CERTIFICATES_APP_NAME, "main", "failover", APP_NAME], + apps_statuses=["active"], + units_statuses=["active"], + wait_for_exact_units={ + TLS_CERTIFICATES_APP_NAME: 1, + "main": 1, + "failover": 2, + APP_NAME: 1, + }, + idle_period=IDLE_PERIOD, + timeout=3600, + ) + + # Credentials not set yet, this will move the opensearch to blocked state + # Credentials are set per test scenario + await ops_test.model.integrate("main", S3_INTEGRATOR) + + +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "xlarge"]) +@pytest.mark.parametrize("cloud_name,deploy_type", DEPLOY_LARGE_ONLY_CLOUD_GROUP_MARKS) +@pytest.mark.abort_on_fail +async def test_large_setups_relations_with_misconfiguration( + ops_test: OpsTest, + cloud_name: str, + deploy_type: str, +) -> None: + """Tests the different blocked messages expected in large deployments.""" + config = { + "endpoint": "http://localhost", + "bucket": "error", + "path": "/", + "region": "default", + } + credentials = { + "access-key": "error", + "secret-key": "error", + } + + # Not using _configure_s3 as this method will cause opensearch to block + await ops_test.model.applications[S3_INTEGRATOR].set_config(config) + await run_action( + ops_test, + 0, + "sync-s3-credentials", + params=credentials, + app=S3_INTEGRATOR, + ) + await ops_test.model.wait_for_idle( + apps=[S3_INTEGRATOR], + status="active", + timeout=TIMEOUT, + ) + await wait_until( + ops_test, + apps=["main"], + apps_statuses=["blocked"], + apps_full_statuses={"main": {"blocked": [BackupSetupFailed]}}, + idle_period=IDLE_PERIOD, + ) + + # Now, relate failover cluster to s3-integrator and review the status + await ops_test.model.integrate("failover:s3-credentials", S3_INTEGRATOR) + await ops_test.model.integrate(f"{APP_NAME}:s3-credentials", S3_INTEGRATOR) + await wait_until( + ops_test, + apps=["main", "failover", APP_NAME], + apps_statuses=["blocked"], + units_statuses=["blocked"], + apps_full_statuses={ + "main": {"blocked": [BackupSetupFailed]}, + "failover": {"blocked": [S3RelMissing]}, + APP_NAME: {"blocked": [S3RelMissing]}, + }, + idle_period=IDLE_PERIOD, + ) + + # Reverting should return it to normal + await ops_test.model.applications[APP_NAME].destroy_relation( + f"{APP_NAME}:s3-credentials", S3_INTEGRATOR + ) + await ops_test.model.applications["failover"].destroy_relation( + "failover:s3-credentials", S3_INTEGRATOR + ) + await wait_until( + ops_test, + apps=["main"], + apps_statuses=["blocked"], + apps_full_statuses={"main": {"blocked": [BackupSetupFailed]}}, + idle_period=IDLE_PERIOD, + ) + await wait_until( + ops_test, + apps=["failover", APP_NAME], + apps_statuses=["active"], + idle_period=IDLE_PERIOD, + ) + + +@pytest.mark.parametrize("cloud_name,deploy_type", DEPLOY_CLOUD_GROUP_MARKS) @pytest.mark.abort_on_fail async def test_create_backup_and_restore( ops_test: OpsTest, @@ -243,11 +443,13 @@ async def test_create_backup_and_restore( cloud_configs: Dict[str, Dict[str, str]], cloud_credentials: Dict[str, Dict[str, str]], cloud_name: str, + deploy_type: str, ) -> None: """Runs the backup process whilst writing to the cluster into 'noisy-index'.""" - app = (await app_name(ops_test)) or APP_NAME - leader_id = await get_leader_unit_id(ops_test) - unit_ip = await get_leader_unit_ip(ops_test) + app = (await app_name(ops_test) or APP_NAME) if deploy_type == "small" else "main" + apps = [app] if deploy_type == "small" else [app, APP_NAME] + leader_id = await get_leader_unit_id(ops_test, app=app) + unit_ip = await get_leader_unit_ip(ops_test, app=app) config = cloud_configs[cloud_name] logger.info(f"Syncing credentials for {cloud_name}") @@ -260,6 +462,7 @@ async def test_create_backup_and_restore( ops_test, leader_id, unit_ip=unit_ip, + app=app, ), OPENSEARCH_BACKUP_ID_FORMAT, ) @@ -267,7 +470,7 @@ async def test_create_backup_and_restore( ) # continuous writes checks await assert_continuous_writes_increasing(c_writes) - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, apps) await assert_restore_indices_and_compare_consistency( ops_test, app, leader_id, unit_ip, backup_id ) @@ -280,13 +483,7 @@ async def test_create_backup_and_restore( ) -@pytest.mark.parametrize( - "cloud_name", - [ - (pytest.param("microceph", marks=pytest.mark.group("microceph"))), - (pytest.param("aws", marks=pytest.mark.group("aws"))), - ], -) +@pytest.mark.parametrize("cloud_name,deploy_type", DEPLOY_CLOUD_GROUP_MARKS) @pytest.mark.abort_on_fail async def test_remove_and_readd_s3_relation( ops_test: OpsTest, @@ -295,11 +492,14 @@ async def test_remove_and_readd_s3_relation( cloud_configs: Dict[str, Dict[str, str]], cloud_credentials: Dict[str, Dict[str, str]], cloud_name: str, + deploy_type: str, ) -> None: """Removes and re-adds the s3-credentials relation to test backup and restore.""" - app: str = (await app_name(ops_test)) or APP_NAME - leader_id: str = await get_leader_unit_id(ops_test) - unit_ip: str = await get_leader_unit_ip(ops_test) + app = (await app_name(ops_test) or APP_NAME) if deploy_type == "small" else "main" + apps = [app] if deploy_type == "small" else [app, APP_NAME] + + leader_id: int = await get_leader_unit_id(ops_test, app=app) + unit_ip: str = await get_leader_unit_ip(ops_test, app=app) config: Dict[str, str] = cloud_configs[cloud_name] logger.info("Remove s3-credentials relation") @@ -315,7 +515,7 @@ async def test_remove_and_readd_s3_relation( ) logger.info("Re-add s3-credentials relation") - await ops_test.model.integrate(APP_NAME, S3_INTEGRATOR) + await ops_test.model.integrate(app, S3_INTEGRATOR) await ops_test.model.wait_for_idle( apps=[app], status="active", @@ -333,6 +533,7 @@ async def test_remove_and_readd_s3_relation( ops_test, leader_id, unit_ip=unit_ip, + app=app, ), OPENSEARCH_BACKUP_ID_FORMAT, ) @@ -341,7 +542,7 @@ async def test_remove_and_readd_s3_relation( # continuous writes checks await assert_continuous_writes_increasing(c_writes) - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, apps) await assert_restore_indices_and_compare_consistency( ops_test, app, leader_id, unit_ip, backup_id ) @@ -354,19 +555,14 @@ async def test_remove_and_readd_s3_relation( ) -@pytest.mark.parametrize( - "cloud_name", - [ - (pytest.param("microceph", marks=pytest.mark.group("microceph"))), - (pytest.param("aws", marks=pytest.mark.group("aws"))), - ], -) +@pytest.mark.parametrize("cloud_name,deploy_type", DEPLOY_SMALL_ONLY_CLOUD_GROUP_MARKS) @pytest.mark.abort_on_fail async def test_restore_to_new_cluster( ops_test: OpsTest, cloud_configs: Dict[str, Dict[str, str]], cloud_credentials: Dict[str, Dict[str, str]], cloud_name: str, + deploy_type: str, force_clear_cwrites_index, ) -> None: """Deletes the entire OpenSearch cluster and redeploys from scratch. @@ -376,9 +572,9 @@ async def test_restore_to_new_cluster( 1) At each backup restored, check our track of doc count vs. current index count 2) Try to write to that new index. """ - if app := await app_name(ops_test): - return + app = (await app_name(ops_test) or APP_NAME) if deploy_type == "small" else "main" logging.info("Destroying the application") + # Now, remove the applications await asyncio.gather( ops_test.model.remove_application(S3_INTEGRATOR, block_until_done=True), ops_test.model.remove_application(app, block_until_done=True), @@ -398,24 +594,24 @@ async def test_restore_to_new_cluster( ) # Relate it to OpenSearch to set up TLS. - await ops_test.model.integrate(APP_NAME, TLS_CERTIFICATES_APP_NAME) + await ops_test.model.integrate(app, TLS_CERTIFICATES_APP_NAME) await ops_test.model.wait_for_idle( - apps=[TLS_CERTIFICATES_APP_NAME, APP_NAME], + apps=[TLS_CERTIFICATES_APP_NAME, app], status="active", timeout=1400, idle_period=IDLE_PERIOD, ) # Credentials not set yet, this will move the opensearch to blocked state # Credentials are set per test scenario - await ops_test.model.integrate(APP_NAME, S3_INTEGRATOR) + await ops_test.model.integrate(app, S3_INTEGRATOR) - leader_id = await get_leader_unit_id(ops_test) - unit_ip = await get_leader_unit_ip(ops_test) + leader_id = await get_leader_unit_id(ops_test, app=app) + unit_ip = await get_leader_unit_ip(ops_test, app=app) config: Dict[str, str] = cloud_configs[cloud_name] logger.info(f"Syncing credentials for {cloud_name}") await _configure_s3(ops_test, config, cloud_credentials[cloud_name], app) - backups = await list_backups(ops_test, leader_id) + backups = await list_backups(ops_test, leader_id, app=app) global cwrites_backup_doc_count # We are expecting 2x backups available @@ -423,14 +619,14 @@ async def test_restore_to_new_cluster( assert len(cwrites_backup_doc_count) == 2 count = 0 for backup_id in backups.keys(): - assert await restore(ops_test, backup_id, unit_ip, leader_id) + assert await restore(ops_test, backup_id, unit_ip, leader_id, app=app) count = await index_docs_count(ops_test, app, unit_ip, ContinuousWrites.INDEX_NAME) # Ensure we have the same doc count as we had on the original cluster assert count == cwrites_backup_doc_count[backup_id] # restart the continuous writes and check the cluster is still accessible post restore - assert await start_and_check_continuous_writes(ops_test, unit_ip, app) + await assert_start_and_check_continuous_writes(ops_test, unit_ip, app) # Now, try a backup & restore with continuous writes logger.info("Final stage of DR test: try a backup & restore with continuous writes") @@ -449,6 +645,7 @@ async def test_restore_to_new_cluster( ops_test, leader_id, unit_ip=unit_ip, + app=app, ), OPENSEARCH_BACKUP_ID_FORMAT, ) @@ -457,7 +654,7 @@ async def test_restore_to_new_cluster( # continuous writes checks await assert_continuous_writes_increasing(writer) - await assert_continuous_writes_consistency(ops_test, writer, app) + await assert_continuous_writes_consistency(ops_test, writer, [app]) # This assert assures we have taken a new backup, after the last restore from the original # cluster. That means the index is writable. await assert_restore_indices_and_compare_consistency( @@ -518,7 +715,8 @@ async def test_repo_missing_message(ops_test: OpsTest) -> None: We use the message format to monitor the cluster status. We need to know if this message pattern changed between releases of OpenSearch. """ - unit_ip = await get_leader_unit_ip(ops_test) + app: str = (await app_name(ops_test)) or APP_NAME + unit_ip = await get_leader_unit_ip(ops_test, app=app) resp = await http_request( ops_test, "GET", f"https://{unit_ip}:9200/_snapshot/{S3_REPOSITORY}", json_resp=True ) @@ -531,8 +729,8 @@ async def test_repo_missing_message(ops_test: OpsTest) -> None: @pytest.mark.abort_on_fail async def test_wrong_s3_credentials(ops_test: OpsTest) -> None: """Check the repo is misconfigured.""" - unit_ip = await get_leader_unit_ip(ops_test) app = (await app_name(ops_test)) or APP_NAME + unit_ip = await get_leader_unit_ip(ops_test, app=app) config = { "endpoint": "http://localhost", @@ -586,9 +784,9 @@ async def test_change_config_and_backup_restore( force_clear_cwrites_index, ) -> None: """Run for each cloud and update the cluster config.""" - unit_ip: str = await get_leader_unit_ip(ops_test) app: str = (await app_name(ops_test)) or APP_NAME - leader_id: str = await get_leader_unit_id(ops_test) + unit_ip: str = await get_leader_unit_ip(ops_test, app=app) + leader_id: int = await get_leader_unit_id(ops_test, app=app) initial_count: int = 0 for cloud_name in cloud_configs.keys(): @@ -625,7 +823,7 @@ async def test_change_config_and_backup_restore( # continuous writes checks await assert_continuous_writes_increasing(writer) - await assert_continuous_writes_consistency(ops_test, writer, app) + await assert_continuous_writes_consistency(ops_test, writer, [app]) await assert_restore_indices_and_compare_consistency( ops_test, app, leader_id, unit_ip, backup_id ) diff --git a/tests/integration/ha/test_ha.py b/tests/integration/ha/test_ha.py index 2fcb9277e..4c268d6e5 100644 --- a/tests/integration/ha/test_ha.py +++ b/tests/integration/ha/test_ha.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import asyncio @@ -113,7 +113,7 @@ async def test_replication_across_members( await delete_index(ops_test, app, leader_unit_ip, index_name) # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) @pytest.mark.group(1) @@ -175,7 +175,7 @@ async def test_kill_db_process_node_with_primary_shard( ) # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) @pytest.mark.group(1) @@ -227,7 +227,7 @@ async def test_kill_db_process_node_with_elected_cm( ) # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) @pytest.mark.group(1) @@ -312,7 +312,7 @@ async def test_freeze_db_process_node_with_primary_shard( ) # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) @pytest.mark.group(1) @@ -386,7 +386,7 @@ async def test_freeze_db_process_node_with_elected_cm( ) # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) @pytest.mark.group(1) @@ -437,7 +437,7 @@ async def test_restart_db_process_node_with_elected_cm( ops_test, leader_unit_ip, get_application_unit_names(ops_test, app=app) ) - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) @pytest.mark.group(1) @@ -498,7 +498,7 @@ async def test_restart_db_process_node_with_primary_shard( ops_test, leader_unit_ip, get_application_unit_names(ops_test, app=app) ) - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) @pytest.mark.group(1) @@ -550,7 +550,7 @@ async def test_full_cluster_crash( assert health_resp["status"] == "green", f"Cluster {health_resp['status']} - expected green." # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) @pytest.mark.group(1) @@ -603,4 +603,4 @@ async def test_full_cluster_restart( assert health_resp["status"] == "green", f"Cluster {health_resp['status']} - expected green." # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) diff --git a/tests/integration/ha/test_ha_multi_clusters.py b/tests/integration/ha/test_ha_multi_clusters.py index 67c3616d7..bbd093068 100644 --- a/tests/integration/ha/test_ha_multi_clusters.py +++ b/tests/integration/ha/test_ha_multi_clusters.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import asyncio @@ -109,4 +109,4 @@ async def test_multi_clusters_db_isolation( await ops_test.model.remove_application(SECOND_APP_NAME) # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) diff --git a/tests/integration/ha/test_ha_networking.py b/tests/integration/ha/test_ha_networking.py index 4b43b9d8b..3f14e3f1c 100644 --- a/tests/integration/ha/test_ha_networking.py +++ b/tests/integration/ha/test_ha_networking.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import asyncio @@ -170,7 +170,7 @@ async def test_full_network_cut_with_ip_change_node_with_elected_cm( ), "Unit did NOT join the rest of the cluster." # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) @pytest.mark.group(1) @@ -288,7 +288,7 @@ async def test_full_network_cut_with_ip_change_node_with_primary_shard( ), "Unit did NOT join the rest of the cluster." # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) @pytest.mark.group(1) @@ -376,7 +376,7 @@ async def test_full_network_cut_without_ip_change_node_with_elected_cm( ), "Unit did NOT join the rest of the cluster." # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) @pytest.mark.group(1) @@ -479,4 +479,4 @@ async def test_full_network_cut_without_ip_change_node_with_primary_shard( ), "Unit did NOT join the rest of the cluster." # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) diff --git a/tests/integration/ha/test_horizontal_scaling.py b/tests/integration/ha/test_horizontal_scaling.py index 973db96af..784b6878f 100644 --- a/tests/integration/ha/test_horizontal_scaling.py +++ b/tests/integration/ha/test_horizontal_scaling.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import asyncio @@ -40,6 +40,7 @@ logger = logging.getLogger(__name__) +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "xlarge"]) @pytest.mark.group(1) @pytest.mark.abort_on_fail @pytest.mark.skip_if_deployed @@ -67,6 +68,7 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: assert len(ops_test.model.applications[APP_NAME].units) == 1 +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "xlarge"]) @pytest.mark.group(1) @pytest.mark.abort_on_fail async def test_horizontal_scale_up( @@ -112,9 +114,10 @@ async def test_horizontal_scale_up( ) # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "xlarge"]) @pytest.mark.group(1) @pytest.mark.abort_on_fail async def test_safe_scale_down_shards_realloc( @@ -222,104 +225,10 @@ async def test_safe_scale_down_shards_realloc( await delete_dummy_indexes(ops_test, app, leader_unit_ip) # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) - - -@pytest.mark.group(1) -@pytest.mark.abort_on_fail -async def test_safe_scale_down_roles_reassigning( - ops_test: OpsTest, c_writes: ContinuousWrites, c_writes_runner -) -> None: - """Tests the shutdown of a node with a role requiring the re-balance of the cluster roles. - - The goal of this test is to make sure that roles are automatically recalculated after - a scale-up/down event. For this end, we want to start testing with an even number of units. - """ - app = (await app_name(ops_test)) or APP_NAME - init_units_count = len(ops_test.model.applications[app].units) - - # scale up by 1/2 units depending on the parity of current units: to trigger roles reassignment - if init_units_count % 2 == 1: - # this will NOT trigger any role reassignment, but will ensure the next call will - await ops_test.model.applications[app].add_unit(count=1) - await wait_until( - ops_test, - apps=[app], - apps_statuses=["active"], - units_statuses=["active"], - wait_for_exact_units=init_units_count + 1, - idle_period=IDLE_PERIOD, - ) - init_units_count += 1 - - # going from an even to odd number of units, this should trigger a role reassignment - await ops_test.model.applications[app].add_unit(count=1) - await wait_until( - ops_test, - apps=[app], - apps_statuses=["active"], - units_statuses=["active"], - wait_for_exact_units=init_units_count + 1, - idle_period=IDLE_PERIOD, - ) - - leader_unit_ip = await get_leader_unit_ip(ops_test, app=app) - - # fetch all nodes - nodes = await all_nodes(ops_test, leader_unit_ip) - num_units = len(ops_test.model.applications[app].units) - assert ClusterTopology.nodes_count_by_role(nodes)["cluster_manager"] == num_units - - # pick a cluster manager node to remove - unit_id_to_stop = [ - node.name.split("-")[1] - for node in nodes - if node.ip != leader_unit_ip and node.is_cm_eligible() - ][0] - - # scale-down: remove a cm unit - await ops_test.model.applications[app].destroy_unit(f"{app}/{unit_id_to_stop}") - await wait_until( - ops_test, - apps=[app], - apps_statuses=["active"], - units_statuses=["active"], - wait_for_exact_units=init_units_count, - idle_period=IDLE_PERIOD, - ) - - # we expect to have a "cm" node, reconfigured to be "data only" to keep the quorum - new_nodes = await all_nodes(ops_test, leader_unit_ip) - num_units = len(ops_test.model.applications[app].units) - assert ClusterTopology.nodes_count_by_role(new_nodes)["cluster_manager"] == num_units - 1 - assert ClusterTopology.nodes_count_by_role(new_nodes)["data"] == num_units - - # scale-down: remove another cm unit - unit_id_to_stop = [ - node.name.split("-")[1] - for node in new_nodes - if node.ip != leader_unit_ip and node.is_cm_eligible() - ][0] - await ops_test.model.applications[app].destroy_unit(f"{app}/{unit_id_to_stop}") - await wait_until( - ops_test, - apps=[app], - apps_statuses=["active"], - units_statuses=["active"], - wait_for_exact_units=num_units - 1, - idle_period=IDLE_PERIOD, - ) - - # fetch nodes, we expect to have all nodes "cluster_manager" to keep the quorum - new_nodes = await all_nodes(ops_test, leader_unit_ip) - num_units = len(ops_test.model.applications[app].units) - assert ClusterTopology.nodes_count_by_role(new_nodes)["cluster_manager"] == num_units - assert ClusterTopology.nodes_count_by_role(new_nodes)["data"] == num_units - - # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "xlarge"]) @pytest.mark.group(1) async def test_safe_scale_down_remove_leaders( ops_test: OpsTest, c_writes: ContinuousWrites, c_writes_runner @@ -336,17 +245,22 @@ async def test_safe_scale_down_remove_leaders( app = (await app_name(ops_test)) or APP_NAME init_units_count = len(ops_test.model.applications[app].units) - # scale up by 2 units - await ops_test.model.applications[app].add_unit(count=3) - await wait_until( - ops_test, - apps=[app], - apps_statuses=["active"], - units_statuses=["active"], - wait_for_exact_units=init_units_count + 3, - idle_period=IDLE_PERIOD, - timeout=1800, - ) + if init_units_count < 5: + # scale up by 5 - init units + added_units = 5 - init_units_count + await ops_test.model.applications[app].add_unit(count=added_units) + + await wait_until( + ops_test, + apps=[app], + apps_statuses=["active"], + units_statuses=["active"], + wait_for_exact_units=init_units_count + added_units, + idle_period=IDLE_PERIOD, + timeout=1800, + ) + + init_units_count += added_units # scale down: remove the juju leader leader_unit_id = await get_leader_unit_id(ops_test, app=app) @@ -357,22 +271,12 @@ async def test_safe_scale_down_remove_leaders( apps=[app], apps_statuses=["active"], units_statuses=["active"], - wait_for_exact_units=init_units_count + 2, + wait_for_exact_units=init_units_count - 1, idle_period=IDLE_PERIOD, timeout=1800, ) - # make sure the duties supposed to be done by the departing leader are done - # we expect to have 3 cm-eligible+data (one of which will be elected) and - # 1 data-only nodes as per the roles-reassigning logic leader_unit_ip = await get_leader_unit_ip(ops_test, app=app) - nodes = await all_nodes(ops_test, leader_unit_ip) - assert ( - ClusterTopology.nodes_count_by_role(nodes)["cluster_manager"] == init_units_count + 2 - if init_units_count % 2 != 0 - else init_units_count + 1 - ) - assert ClusterTopology.nodes_count_by_role(nodes)["data"] == init_units_count + 2 # scale-down: remove the current elected CM first_elected_cm_unit_id = await get_elected_cm_unit_id(ops_test, leader_unit_ip) @@ -383,7 +287,7 @@ async def test_safe_scale_down_remove_leaders( apps=[app], apps_statuses=["active"], units_statuses=["active"], - wait_for_exact_units=init_units_count + 1, + wait_for_exact_units=init_units_count - 2, idle_period=IDLE_PERIOD, timeout=1800, ) @@ -402,9 +306,15 @@ async def test_safe_scale_down_remove_leaders( shards = await get_shards_by_index(ops_test, leader_unit_ip, ContinuousWrites.INDEX_NAME) unit_with_primary_shard = [shard.unit_id for shard in shards if shard.is_prim][0] await ops_test.model.applications[app].destroy_unit(f"{app}/{unit_with_primary_shard}") - - # sleep for a couple of minutes for the model to stabilise - time.sleep(IDLE_PERIOD + 60) + await wait_until( + ops_test, + apps=[app], + apps_statuses=["active"], + units_statuses=["active"], + wait_for_exact_units=init_units_count - 3, + idle_period=IDLE_PERIOD, + timeout=1800, + ) writes = await c_writes.count() @@ -425,4 +335,4 @@ async def test_safe_scale_down_remove_leaders( assert new_writes > writes # continuous writes checks - await assert_continuous_writes_consistency(ops_test, c_writes, app) + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) diff --git a/tests/integration/ha/test_large_deployments_relations.py b/tests/integration/ha/test_large_deployments_relations.py new file mode 100644 index 000000000..116d400fa --- /dev/null +++ b/tests/integration/ha/test_large_deployments_relations.py @@ -0,0 +1,237 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import asyncio +import logging +import time + +import pytest +from charms.opensearch.v0.constants_charm import PClusterNoRelation, TLSRelationMissing +from pytest_operator.plugin import OpsTest + +from ..helpers import MODEL_CONFIG, SERIES, get_leader_unit_ip +from ..helpers_deployments import wait_until +from ..tls.test_tls import TLS_CERTIFICATES_APP_NAME +from .continuous_writes import ContinuousWrites +from .helpers import all_nodes +from .test_horizontal_scaling import IDLE_PERIOD + +logger = logging.getLogger(__name__) + +REL_ORCHESTRATOR = "peer-cluster-orchestrator" +REL_PEER = "peer-cluster" + +MAIN_APP = "opensearch-main" +FAILOVER_APP = "opensearch-failover" +DATA_APP = "opensearch-data" +INVALID_APP = "opensearch-invalid" + +CLUSTER_NAME = "log-app" +INVALID_CLUSTER_NAME = "timeseries" + +APP_UNITS = {MAIN_APP: 3, FAILOVER_APP: 3, DATA_APP: 2, INVALID_APP: 1} + + +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "xlarge"]) +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +@pytest.mark.skip_if_deployed +async def test_build_and_deploy(ops_test: OpsTest) -> None: + """Build and deploy one unit of OpenSearch.""" + # it is possible for users to provide their own cluster for HA testing. + # Hence, check if there is a pre-existing cluster. + my_charm = await ops_test.build_charm(".") + await ops_test.model.set_config(MODEL_CONFIG) + + # Deploy TLS Certificates operator. + config = {"ca-common-name": "CN_CA"} + await asyncio.gather( + ops_test.model.deploy(TLS_CERTIFICATES_APP_NAME, channel="stable", config=config), + ops_test.model.deploy( + my_charm, + application_name=MAIN_APP, + num_units=3, + series=SERIES, + config={"cluster_name": CLUSTER_NAME}, + ), + ops_test.model.deploy( + my_charm, + application_name=FAILOVER_APP, + num_units=3, + series=SERIES, + config={"cluster_name": CLUSTER_NAME, "init_hold": True}, + ), + ops_test.model.deploy( + my_charm, + application_name=DATA_APP, + num_units=2, + series=SERIES, + config={"cluster_name": CLUSTER_NAME, "init_hold": True, "roles": "data.hot,ml"}, + ), + ops_test.model.deploy( + my_charm, + application_name=INVALID_APP, + num_units=1, + series=SERIES, + config={"cluster_name": INVALID_CLUSTER_NAME, "init_hold": True, "roles": "data.cold"}, + ), + ) + + # wait until the TLS operator is ready + await wait_until( + ops_test, + apps=[TLS_CERTIFICATES_APP_NAME], + apps_statuses=["active"], + units_statuses=["active"], + wait_for_exact_units={TLS_CERTIFICATES_APP_NAME: 1}, + idle_period=IDLE_PERIOD, + ) + + # confirm all apps are blocked because NO TLS relation established + await wait_until( + ops_test, + apps=list(APP_UNITS.keys()), + apps_full_statuses={ + MAIN_APP: {"blocked": [TLSRelationMissing]}, + FAILOVER_APP: {"blocked": [PClusterNoRelation]}, + DATA_APP: {"blocked": [PClusterNoRelation]}, + INVALID_APP: {"blocked": [PClusterNoRelation]}, + }, + units_full_statuses={ + MAIN_APP: {"units": {"blocked": [TLSRelationMissing]}}, + FAILOVER_APP: {"units": {"active": []}}, + DATA_APP: {"units": {"active": []}}, + INVALID_APP: {"units": {"active": []}}, + }, + wait_for_exact_units={app: units for app, units in APP_UNITS.items()}, + idle_period=IDLE_PERIOD, + ) + + +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "xlarge"]) +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_invalid_conditions(ops_test: OpsTest) -> None: + """Check invalid conditions under different states.""" + # integrate an app with the main-orchestrator when TLS is not related to the provider + await ops_test.model.integrate(f"{FAILOVER_APP}:{REL_PEER}", f"{MAIN_APP}:{REL_ORCHESTRATOR}") + await wait_until( + ops_test, + apps=[MAIN_APP, FAILOVER_APP], + apps_full_statuses={ + MAIN_APP: {"blocked": [TLSRelationMissing]}, + FAILOVER_APP: { + "waiting": ["TLS not fully configured in related 'main-orchestrator'."] + }, + }, + units_full_statuses={ + MAIN_APP: {"units": {"blocked": [TLSRelationMissing]}}, + FAILOVER_APP: {"units": {"active": []}}, + }, + wait_for_exact_units={ + MAIN_APP: APP_UNITS[MAIN_APP], + FAILOVER_APP: APP_UNITS[FAILOVER_APP], + }, + idle_period=IDLE_PERIOD, + ) + + # integrate TLS to all applications + for app in [MAIN_APP, FAILOVER_APP, DATA_APP, INVALID_APP]: + await ops_test.model.integrate(app, TLS_CERTIFICATES_APP_NAME) + + await wait_until( + ops_test, + apps=[MAIN_APP, FAILOVER_APP, DATA_APP, INVALID_APP], + apps_full_statuses={ + MAIN_APP: {"active": []}, + FAILOVER_APP: {"active": []}, + DATA_APP: {"blocked": [PClusterNoRelation]}, + INVALID_APP: {"blocked": [PClusterNoRelation]}, + }, + units_statuses=["active"], + wait_for_exact_units={app: units for app, units in APP_UNITS.items()}, + idle_period=IDLE_PERIOD, + ) + + c_writes = ContinuousWrites(ops_test, app=MAIN_APP) + await c_writes.start() + time.sleep(120) + await c_writes.stop() + + # fetch nodes, we should have 6 nodes (main + failover)-orchestrators + leader_unit_ip = await get_leader_unit_ip(ops_test, app=MAIN_APP) + nodes = await all_nodes(ops_test, leader_unit_ip, app=MAIN_APP) + assert len(nodes) == 6, f"Wrong node count. Expecting 6 online nodes, found: {len(nodes)}." + + # integrate cluster with different name + await ops_test.model.integrate(f"{INVALID_APP}:{REL_PEER}", f"{MAIN_APP}:{REL_ORCHESTRATOR}") + await wait_until( + ops_test, + apps=[MAIN_APP, INVALID_APP], + apps_full_statuses={ + MAIN_APP: {"active": []}, + INVALID_APP: { + "blocked": ["Cannot relate 2 clusters with different 'cluster_name' values."] + }, + }, + units_statuses=["active"], + wait_for_exact_units={MAIN_APP: APP_UNITS[MAIN_APP], INVALID_APP: APP_UNITS[INVALID_APP]}, + idle_period=IDLE_PERIOD, + ) + + # delete the invalid app name + await ops_test.model.remove_application( + INVALID_APP, block_until_done=True, force=True, destroy_storage=True, no_wait=True + ) + + +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "xlarge"]) +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_large_deployment_fully_formed( + ops_test: OpsTest, c_writes: ContinuousWrites, c_writes_runner +) -> None: + """Test that under optimal conditions all the nodes form the same big cluster.""" + await ops_test.model.integrate(f"{DATA_APP}:{REL_PEER}", f"{MAIN_APP}:{REL_ORCHESTRATOR}") + await ops_test.model.integrate(f"{DATA_APP}:{REL_PEER}", f"{FAILOVER_APP}:{REL_ORCHESTRATOR}") + + await wait_until( + ops_test, + apps=[MAIN_APP, FAILOVER_APP, DATA_APP], + apps_statuses=["active"], + units_statuses=["active"], + wait_for_exact_units={ + app: units for app, units in APP_UNITS.items() if app != INVALID_APP + }, + idle_period=IDLE_PERIOD, + ) + + # fetch nodes, we should have 6 nodes (main + failover)-orchestrators + leader_unit_ip = await get_leader_unit_ip(ops_test, app=MAIN_APP) + nodes = await all_nodes(ops_test, leader_unit_ip, app=MAIN_APP) + assert len(nodes) == 8, f"Wrong node count. Expecting 8 online nodes, found: {len(nodes)}." + + # check the roles + auto_gen_roles = ["cluster_manager", "coordinating_only", "data", "ingest", "ml"] + data_roles = ["data", "ml"] + for app, node_count in [(MAIN_APP, 3), (FAILOVER_APP, 3), (DATA_APP, 2)]: + current_app_nodes = [node for node in nodes if node.app_name == app] + assert ( + len(current_app_nodes) == node_count + ), f"Wrong count for {app}:{len(current_app_nodes)} - expected:{node_count}" + + roles = current_app_nodes[0].roles + temperature = current_app_nodes[0].temperature + if app in [MAIN_APP, FAILOVER_APP]: + assert sorted(roles) == sorted( + auto_gen_roles + ), f"Wrong roles for {app}:{roles} - expected:{auto_gen_roles}" + assert temperature is None, f"Wrong temperature for {app}:{roles} - expected:None" + else: + assert sorted(roles) == sorted( + data_roles + ), f"Wrong roles for {app}:{roles} - expected:{data_roles}" + assert ( + temperature == "hot" + ), f"Wrong temperature for {app}:{temperature} - expected:hot" diff --git a/tests/integration/ha/test_large_deployments.py b/tests/integration/ha/test_roles_managements.py similarity index 72% rename from tests/integration/ha/test_large_deployments.py rename to tests/integration/ha/test_roles_managements.py index 1ad9af0a7..d6c6efa90 100644 --- a/tests/integration/ha/test_large_deployments.py +++ b/tests/integration/ha/test_roles_managements.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import asyncio @@ -15,8 +15,8 @@ SERIES, check_cluster_formation_successful, cluster_health, + get_application_unit_ids, get_application_unit_names, - get_application_unit_status, get_leader_unit_ip, ) from ..helpers_deployments import wait_until @@ -109,17 +109,12 @@ async def test_set_roles_manually( assert sorted(node.roles) == ["cluster_manager", "data"], "roles unchanged" assert node.temperature == "cold", "Temperature unchanged." - # scale up cluster by 1 unit, this should break the quorum and put the charm in a blocked state + # scale up cluster by 1 unit, this should give the new node the same roles await ops_test.model.applications[app].add_unit(count=1) + # TODO: this should have to go once we full trust that quorum is automatically established await wait_until( ops_test, apps=[app], - apps_full_statuses={ - app: { - "blocked": [PClusterWrongNodesCountForQuorum], - "active": [], - }, - }, units_full_statuses={ app: { "units": { @@ -131,10 +126,43 @@ async def test_set_roles_manually( wait_for_exact_units=len(nodes) + 1, idle_period=IDLE_PERIOD, ) - # new_unit_id = max( - # [int(unit.name.split("/")[1]) for unit in ops_test.model.applications[app].units] - # ) + new_nodes = await all_nodes(ops_test, leader_unit_ip) + assert len(new_nodes) == len(nodes) + 1 + + # remove new unit + last_unit_id = sorted(get_application_unit_ids(ops_test, app))[-1] + await ops_test.model.applications[app].destroy_unit(f"{app}/{last_unit_id}") + + +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_switch_back_to_auto_generated_roles( + ops_test: OpsTest, c_writes: ContinuousWrites, c_writes_runner +) -> None: + """Check roles changes in all nodes.""" + app = (await app_name(ops_test)) or APP_NAME + + leader_unit_ip = await get_leader_unit_ip(ops_test, app=app) + nodes = await all_nodes(ops_test, leader_unit_ip) + + await ops_test.model.applications[app].set_config({"roles": ""}) + await wait_until( + ops_test, + apps=[app], + apps_statuses=["active"], + units_statuses=["active"], + wait_for_exact_units=len(nodes), + idle_period=IDLE_PERIOD, + ) - app_unit_status = await get_application_unit_status(ops_test, app=app) - assert any(unit.value == "active" for unit in app_unit_status.values()) - # assert app_unit_status[new_unit_id].message == PClusterWrongNodesCountForQuorum + # check that nodes' roles have indeed changed + nodes = await all_nodes(ops_test, leader_unit_ip) + for node in nodes: + assert sorted(node.roles) == [ + "cluster_manager", + "coordinating_only", + "data", + "ingest", + "ml", + ] + assert node.temperature is None, "Node temperature was erroneously set." diff --git a/tests/integration/ha/test_storage.py b/tests/integration/ha/test_storage.py index 6af195f66..114ee1b59 100644 --- a/tests/integration/ha/test_storage.py +++ b/tests/integration/ha/test_storage.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import asyncio @@ -20,7 +20,6 @@ @pytest.mark.group(1) @pytest.mark.abort_on_fail -@pytest.mark.skip_if_deployed async def test_build_and_deploy(ops_test: OpsTest) -> None: """Build and deploy one unit of OpenSearch.""" # it is possible for users to provide their own cluster for HA testing. @@ -114,7 +113,7 @@ async def test_storage_reuse_after_scale_down( @pytest.mark.group(1) @pytest.mark.abort_on_fail async def test_storage_reuse_in_new_cluster_after_app_removal( - ops_test: OpsTest, c_writes: ContinuousWrites, c_balanced_writes_runner + ops_test: OpsTest, c_writes: ContinuousWrites, c_writes_runner ): """Check storage is reused and data accessible after removing app and deploying new cluster.""" app = (await app_name(ops_test)) or APP_NAME diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 8014c58c7..1ed9e3f6c 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import json import logging @@ -50,6 +50,12 @@ logger = logging.getLogger(__name__) +def model_conf_with_short_update_schedule(): + model_conf = MODEL_CONFIG.copy() + model_conf["update-status-hook-interval"] = "2m" + return model_conf + + async def app_name(ops_test: OpsTest) -> Optional[str]: """Returns the name of the cluster running OpenSearch. diff --git a/tests/integration/helpers_deployments.py b/tests/integration/helpers_deployments.py index 1684d477d..ab9c0fa3f 100644 --- a/tests/integration/helpers_deployments.py +++ b/tests/integration/helpers_deployments.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import json import logging diff --git a/tests/integration/plugins/__init__.py b/tests/integration/plugins/__init__.py index db3bfe1a6..e3979c0f6 100644 --- a/tests/integration/plugins/__init__.py +++ b/tests/integration/plugins/__init__.py @@ -1,2 +1,2 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. diff --git a/tests/integration/plugins/helpers.py b/tests/integration/plugins/helpers.py index d6ab3bb15..6ecc6a54e 100644 --- a/tests/integration/plugins/helpers.py +++ b/tests/integration/plugins/helpers.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Helper functions related to testing the different plugins.""" diff --git a/tests/integration/plugins/test_plugins.py b/tests/integration/plugins/test_plugins.py index e77a1969a..183704cc1 100644 --- a/tests/integration/plugins/test_plugins.py +++ b/tests/integration/plugins/test_plugins.py @@ -1,9 +1,10 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import asyncio import json +import logging import pytest from pytest_operator.plugin import OpsTest @@ -25,6 +26,7 @@ http_request, run_action, ) +from ..helpers_deployments import wait_until from ..plugins.helpers import ( create_index_and_bulk_insert, generate_bulk_training_data, @@ -40,6 +42,9 @@ COS_RELATION_NAME = "cos-agent" +logger = logging.getLogger(__name__) + + @pytest.mark.group(1) @pytest.mark.abort_on_fail @pytest.mark.skip_if_deployed @@ -69,9 +74,12 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: # Relate it to OpenSearch to set up TLS. await ops_test.model.integrate(APP_NAME, TLS_CERTIFICATES_APP_NAME) - await ops_test.model.wait_for_idle( + await wait_until( + ops_test, apps=[TLS_CERTIFICATES_APP_NAME, APP_NAME], - status="active", + apps_statuses=["active"], + units_statuses=["active"], + wait_for_exact_units={TLS_CERTIFICATES_APP_NAME: 1, APP_NAME: 3}, timeout=3400, idle_period=IDLE_PERIOD, ) @@ -96,10 +104,12 @@ async def test_prometheus_exporter_enabled_by_default(ops_test): async def test_prometheus_exporter_cos_relation(ops_test): await ops_test.model.deploy(COS_APP_NAME, channel="edge"), await ops_test.model.integrate(APP_NAME, COS_APP_NAME) - await ops_test.model.wait_for_idle( + await wait_until( + ops_test, apps=[APP_NAME], - status="active", - timeout=1400, + apps_statuses=["active"], + units_statuses=["active"], + wait_for_exact_units=3, idle_period=IDLE_PERIOD, ) @@ -175,18 +185,44 @@ async def test_knn_enabled_disabled(ops_test): assert config["plugin_opensearch_knn"]["default"] is True assert config["plugin_opensearch_knn"]["value"] is True - await ops_test.model.applications[APP_NAME].set_config({"plugin_opensearch_knn": "False"}) - await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", idle_period=15) - - config = await ops_test.model.applications[APP_NAME].get_config() - assert config["plugin_opensearch_knn"]["value"] is False + async with ops_test.fast_forward(): + await ops_test.model.applications[APP_NAME].set_config({"plugin_opensearch_knn": "False"}) + await wait_until( + ops_test, + apps=[APP_NAME], + apps_statuses=["active"], + units_statuses=["active"], + wait_for_exact_units={APP_NAME: 3}, + timeout=3600, + idle_period=IDLE_PERIOD, + ) - await ops_test.model.applications[APP_NAME].set_config({"plugin_opensearch_knn": "True"}) - await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", idle_period=15) + config = await ops_test.model.applications[APP_NAME].get_config() + assert config["plugin_opensearch_knn"]["value"] is False + + await ops_test.model.applications[APP_NAME].set_config({"plugin_opensearch_knn": "True"}) + await wait_until( + ops_test, + apps=[APP_NAME], + apps_statuses=["active"], + units_statuses=["active"], + wait_for_exact_units={APP_NAME: 3}, + timeout=3600, + idle_period=IDLE_PERIOD, + ) - config = await ops_test.model.applications[APP_NAME].get_config() - assert config["plugin_opensearch_knn"]["value"] is True - await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", idle_period=45) + config = await ops_test.model.applications[APP_NAME].get_config() + assert config["plugin_opensearch_knn"]["value"] is True + + await wait_until( + ops_test, + apps=[APP_NAME], + apps_statuses=["active"], + units_statuses=["active"], + wait_for_exact_units={APP_NAME: 3}, + timeout=3600, + idle_period=IDLE_PERIOD, + ) @pytest.mark.group(1) @@ -336,17 +372,31 @@ async def test_knn_training_search(ops_test: OpsTest) -> None: # Set the config to false, then to true for knn_enabled in [False, True]: + logger.info(f"KNN test starting with {knn_enabled}") + # get current timestamp, to compare with restarts later ts = await get_application_unit_ids_start_time(ops_test, APP_NAME) await ops_test.model.applications[APP_NAME].set_config( {"plugin_opensearch_knn": str(knn_enabled)} ) - await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", idle_period=60) + + await wait_until( + ops_test, + apps=[APP_NAME], + apps_statuses=["active"], + units_statuses=["active"], + wait_for_exact_units={APP_NAME: 3}, + timeout=3600, + idle_period=IDLE_PERIOD, + ) + # Now use it to compare with the restart assert await is_each_unit_restarted(ops_test, APP_NAME, ts) assert await check_cluster_formation_successful( ops_test, leader_unit_ip, get_application_unit_names(ops_test, app=APP_NAME) ), "Restart happened but cluster did not start correctly" + logger.info("Restart finished and was successful") + query = { "size": 2, "query": {"knn": {"target-field": {"vector": payload_list[0], "k": 2}}}, diff --git a/tests/integration/relations/__init__.py b/tests/integration/relations/__init__.py index db3bfe1a6..e3979c0f6 100644 --- a/tests/integration/relations/__init__.py +++ b/tests/integration/relations/__init__.py @@ -1,2 +1,2 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. diff --git a/tests/integration/relations/conftest.py b/tests/integration/relations/conftest.py index 6d6bbf6bf..2dec298af 100644 --- a/tests/integration/relations/conftest.py +++ b/tests/integration/relations/conftest.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import shutil diff --git a/tests/integration/relations/helpers.py b/tests/integration/relations/helpers.py index 661568a93..14b46dc4a 100644 --- a/tests/integration/relations/helpers.py +++ b/tests/integration/relations/helpers.py @@ -1,15 +1,22 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. - -import asyncio import logging import socket from typing import Optional import yaml from pytest_operator.plugin import OpsTest -from tenacity import RetryError, Retrying, stop_after_delay, wait_fixed +from tenacity import ( + RetryError, + Retrying, + retry, + stop_after_attempt, + stop_after_delay, + wait_fixed, +) + +from ..helpers import run_action async def get_application_relation_data( @@ -122,17 +129,7 @@ def new_relation_joined(ops_test: OpsTest, endpoint_one: str, endpoint_two: str) return False -async def run_action( - ops_test, action_name: str, unit_name: str, timeout: int = 30, **action_kwargs -): - """Runs the given action on the given unit.""" - client_unit = ops_test.model.units.get(unit_name) - action = await client_unit.run_action(action_name, **action_kwargs) - result = await asyncio.wait_for(action.wait(), timeout) - logging.info(f"request results: {result.results}") - return result.results - - +@retry(wait=wait_fixed(wait=15), stop=stop_after_attempt(15)) async def run_request( ops_test, unit_name: str, @@ -144,17 +141,27 @@ async def run_request( ): # python can't have variable names with a hyphen, and Juju can't have action variables with an # underscore, so this is a compromise. - kwargs = {"relation-id": relation_id, "relation-name": relation_name} + params = { + "relation-id": relation_id, + "relation-name": relation_name, + "method": method, + "endpoint": endpoint, + } if payload: - kwargs["payload"] = payload - return await run_action( + params["payload"] = payload + result = await run_action( ops_test, + unit_id=int(unit_name.split("/")[-1]), action_name="run-request", - unit_name=unit_name, - method=method, - endpoint=endpoint, - **kwargs, + params=params, + app="/".join(unit_name.split("/")[:-1]), ) + logging.info(f"request results: {result}") + + if result.status != "completed": + raise Exception(result.response) + + return result.response def ip_to_url(ip_str: str) -> str: diff --git a/tests/integration/relations/opensearch_provider/application-charm/actions.yaml b/tests/integration/relations/opensearch_provider/application-charm/actions.yaml index 9b5c36882..b981595c0 100644 --- a/tests/integration/relations/opensearch_provider/application-charm/actions.yaml +++ b/tests/integration/relations/opensearch_provider/application-charm/actions.yaml @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. run-request: diff --git a/tests/integration/relations/opensearch_provider/application-charm/charmcraft.yaml b/tests/integration/relations/opensearch_provider/application-charm/charmcraft.yaml index 7e33b57ee..1235c6e9a 100644 --- a/tests/integration/relations/opensearch_provider/application-charm/charmcraft.yaml +++ b/tests/integration/relations/opensearch_provider/application-charm/charmcraft.yaml @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. type: charm diff --git a/tests/integration/relations/opensearch_provider/application-charm/lib/charms/data_platform_libs/v0/data_interfaces.py b/tests/integration/relations/opensearch_provider/application-charm/lib/charms/data_platform_libs/v0/data_interfaces.py index 5b5a27718..f9cadd04a 100644 --- a/tests/integration/relations/opensearch_provider/application-charm/lib/charms/data_platform_libs/v0/data_interfaces.py +++ b/tests/integration/relations/opensearch_provider/application-charm/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -295,12 +295,23 @@ def _on_topic_requested(self, event: TopicRequestedEvent): import json import logging from abc import ABC, abstractmethod -from collections import namedtuple +from collections import UserDict, namedtuple from datetime import datetime from enum import Enum -from typing import Callable, Dict, List, Optional, Set, Tuple, Union +from typing import ( + Callable, + Dict, + ItemsView, + KeysView, + List, + Optional, + Set, + Tuple, + Union, + ValuesView, +) -from ops import JujuVersion, Secret, SecretInfo, SecretNotFoundError +from ops import JujuVersion, Model, Secret, SecretInfo, SecretNotFoundError from ops.charm import ( CharmBase, CharmEvents, @@ -320,7 +331,7 @@ def _on_topic_requested(self, event: TopicRequestedEvent): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 27 +LIBPATCH = 35 PYDEPS = ["ops>=2.0.0"] @@ -337,21 +348,46 @@ def _on_topic_requested(self, event: TopicRequestedEvent): PROV_SECRET_PREFIX = "secret-" REQ_SECRET_FIELDS = "requested-secrets" +GROUP_MAPPING_FIELD = "secret_group_mapping" +GROUP_SEPARATOR = "@" + + +class SecretGroup(str): + """Secret groups specific type.""" + + +class SecretGroupsAggregate(str): + """Secret groups with option to extend with additional constants.""" + + def __init__(self): + self.USER = SecretGroup("user") + self.TLS = SecretGroup("tls") + self.EXTRA = SecretGroup("extra") + + def __setattr__(self, name, value): + """Setting internal constants.""" + if name in self.__dict__: + raise RuntimeError("Can't set constant!") + else: + super().__setattr__(name, SecretGroup(value)) + def groups(self) -> list: + """Return the list of stored SecretGroups.""" + return list(self.__dict__.values()) -class SecretGroup(Enum): - """Secret groups as constants.""" + def get_group(self, group: str) -> Optional[SecretGroup]: + """If the input str translates to a group name, return that.""" + return SecretGroup(group) if group in self.groups() else None - USER = "user" - TLS = "tls" - EXTRA = "extra" + +SECRET_GROUPS = SecretGroupsAggregate() class DataInterfacesError(Exception): """Common ancestor for DataInterfaces related exceptions.""" -class SecretError(Exception): +class SecretError(DataInterfacesError): """Common ancestor for Secrets related exceptions.""" @@ -367,6 +403,10 @@ class SecretsIllegalUpdateError(SecretError): """Secrets aren't yet available for Juju version used.""" +class IllegalOperationError(DataInterfacesError): + """To be used when an operation is not allowed to be performed.""" + + def get_encoded_dict( relation: Relation, member: Union[Unit, Application], field: str ) -> Optional[Dict[str, str]]: @@ -397,7 +437,7 @@ def set_encoded_field( relation.data[member].update({field: json.dumps(value)}) -def diff(event: RelationChangedEvent, bucket: Union[Unit, Application]) -> Diff: +def diff(event: RelationChangedEvent, bucket: Optional[Union[Unit, Application]]) -> Diff: """Retrieves the diff of the data in the relation changed databag. Args: @@ -409,6 +449,9 @@ def diff(event: RelationChangedEvent, bucket: Union[Unit, Application]) -> Diff: keys from the event relation databag. """ # Retrieve the old data from the data key in the application relation databag. + if not bucket: + return Diff([], [], []) + old_data = get_encoded_dict(event.relation, bucket, "data") if not old_data: @@ -450,6 +493,7 @@ def wrapper(self, *args, **kwargs): return return f(self, *args, **kwargs) + wrapper.leader_only = True return wrapper @@ -464,6 +508,34 @@ def wrapper(self, *args, **kwargs): return wrapper +def dynamic_secrets_only(f): + """Decorator to ensure that certain operations would be only executed when NO static secrets are defined.""" + + def wrapper(self, *args, **kwargs): + if self.static_secret_fields: + raise IllegalOperationError( + "Unsafe usage of statically and dynamically defined secrets, aborting." + ) + return f(self, *args, **kwargs) + + return wrapper + + +def either_static_or_dynamic_secrets(f): + """Decorator to ensure that static and dynamic secrets won't be used in parallel.""" + + def wrapper(self, *args, **kwargs): + if self.static_secret_fields and set(self.current_secret_fields) - set( + self.static_secret_fields + ): + raise IllegalOperationError( + "Unsafe usage of statically and dynamically defined secrets, aborting." + ) + return f(self, *args, **kwargs) + + return wrapper + + class Scope(Enum): """Peer relations scope.""" @@ -471,6 +543,11 @@ class Scope(Enum): UNIT = "unit" +################################################################################ +# Secrets internal caching +################################################################################ + + class CachedSecret: """Locally cache a secret. @@ -479,27 +556,37 @@ class CachedSecret: def __init__( self, - charm: CharmBase, + model: Model, component: Union[Application, Unit], label: str, secret_uri: Optional[str] = None, + legacy_labels: List[str] = [], ): self._secret_meta = None self._secret_content = {} self._secret_uri = secret_uri self.label = label - self.charm = charm + self._model = model self.component = component + self.legacy_labels = legacy_labels + self.current_label = None - def add_secret(self, content: Dict[str, str], relation: Relation) -> Secret: + def add_secret( + self, + content: Dict[str, str], + relation: Optional[Relation] = None, + label: Optional[str] = None, + ) -> Secret: """Create a new secret.""" if self._secret_uri: raise SecretAlreadyExistsError( "Secret is already defined with uri %s", self._secret_uri ) - secret = self.component.add_secret(content, label=self.label) - if relation.app != self.charm.app: + label = self.label if not label else label + + secret = self.component.add_secret(content, label=label) + if relation and relation.app != self._model.app: # If it's not a peer relation, grant is to be applied secret.grant(relation) self._secret_uri = secret.id @@ -512,13 +599,20 @@ def meta(self) -> Optional[Secret]: if not self._secret_meta: if not (self._secret_uri or self.label): return - try: - self._secret_meta = self.charm.model.get_secret(label=self.label) - except SecretNotFoundError: - if self._secret_uri: - self._secret_meta = self.charm.model.get_secret( - id=self._secret_uri, label=self.label - ) + + for label in [self.label] + self.legacy_labels: + try: + self._secret_meta = self._model.get_secret(label=label) + except SecretNotFoundError: + pass + else: + if label != self.label: + self.current_label = label + break + + # If still not found, to be checked by URI, to be labelled with the proposed label + if not self._secret_meta and self._secret_uri: + self._secret_meta = self._model.get_secret(id=self._secret_uri, label=self.label) return self._secret_meta def get_content(self) -> Dict[str, str]: @@ -542,35 +636,69 @@ def get_content(self) -> Dict[str, str]: self._secret_content = self.meta.get_content() return self._secret_content + def _move_to_new_label_if_needed(self): + """Helper function to re-create the secret with a different label.""" + if not self.current_label or not (self.meta and self._secret_meta): + return + + # Create a new secret with the new label + old_meta = self._secret_meta + content = self._secret_meta.get_content() + + # I wish we could just check if we are the owners of the secret... + try: + self._secret_meta = self.add_secret(content, label=self.label) + except ModelError as err: + if "this unit is not the leader" not in str(err): + raise + old_meta.remove_all_revisions() + def set_content(self, content: Dict[str, str]) -> None: """Setting cached secret content.""" if not self.meta: return if content: + self._move_to_new_label_if_needed() self.meta.set_content(content) self._secret_content = content else: self.meta.remove_all_revisions() def get_info(self) -> Optional[SecretInfo]: - """Wrapper to apply the corresponding call on the Secret obj within CachedSecret if any.""" + """Wrapper function to apply the corresponding call on the Secret object within CachedSecret if any.""" if self.meta: return self.meta.get_info() + def remove(self) -> None: + """Remove secret.""" + if not self.meta: + raise SecretsUnavailableError("Non-existent secret was attempted to be removed.") + try: + self.meta.remove_all_revisions() + except SecretNotFoundError: + pass + self._secret_content = {} + self._secret_meta = None + self._secret_uri = None + class SecretCache: """A data structure storing CachedSecret objects.""" - def __init__(self, charm: CharmBase, component: Union[Application, Unit]): - self.charm = charm + def __init__(self, model: Model, component: Union[Application, Unit]): + self._model = model self.component = component self._secrets: Dict[str, CachedSecret] = {} - def get(self, label: str, uri: Optional[str] = None) -> Optional[CachedSecret]: + def get( + self, label: str, uri: Optional[str] = None, legacy_labels: List[str] = [] + ) -> Optional[CachedSecret]: """Getting a secret from Juju Secret store or cache.""" if not self._secrets.get(label): - secret = CachedSecret(self.charm, self.component, label, uri) + secret = CachedSecret( + self._model, self.component, label, uri, legacy_labels=legacy_labels + ) if secret.meta: self._secrets[label] = secret return self._secrets.get(label) @@ -580,49 +708,172 @@ def add(self, label: str, content: Dict[str, str], relation: Relation) -> Cached if self._secrets.get(label): raise SecretAlreadyExistsError(f"Secret {label} already exists") - secret = CachedSecret(self.charm, self.component, label) + secret = CachedSecret(self._model, self.component, label) secret.add_secret(content, relation) self._secrets[label] = secret return self._secrets[label] + def remove(self, label: str) -> None: + """Remove a secret from the cache.""" + if secret := self.get(label): + try: + secret.remove() + self._secrets.pop(label) + except (SecretsUnavailableError, KeyError): + pass + else: + return + logging.debug("Non-existing Juju Secret was attempted to be removed %s", label) + + +################################################################################ +# Relation Data base/abstract ancestors (i.e. parent classes) +################################################################################ + + +# Base Data + -# Base DataRelation +class DataDict(UserDict): + """Python Standard Library 'dict' - like representation of Relation Data.""" + def __init__(self, relation_data: "Data", relation_id: int): + self.relation_data = relation_data + self.relation_id = relation_id -class DataRelation(Object, ABC): + @property + def data(self) -> Dict[str, str]: + """Return the full content of the Abstract Relation Data dictionary.""" + result = self.relation_data.fetch_my_relation_data([self.relation_id]) + try: + result_remote = self.relation_data.fetch_relation_data([self.relation_id]) + except NotImplementedError: + result_remote = {self.relation_id: {}} + if result: + result_remote[self.relation_id].update(result[self.relation_id]) + return result_remote.get(self.relation_id, {}) + + def __setitem__(self, key: str, item: str) -> None: + """Set an item of the Abstract Relation Data dictionary.""" + self.relation_data.update_relation_data(self.relation_id, {key: item}) + + def __getitem__(self, key: str) -> str: + """Get an item of the Abstract Relation Data dictionary.""" + result = None + + # Avoiding "leader_only" error when cross-charm non-leader unit, not to report useless error + if ( + not hasattr(self.relation_data.fetch_my_relation_field, "leader_only") + or self.relation_data.component != self.relation_data.local_app + or self.relation_data.local_unit.is_leader() + ): + result = self.relation_data.fetch_my_relation_field(self.relation_id, key) + + if not result: + try: + result = self.relation_data.fetch_relation_field(self.relation_id, key) + except NotImplementedError: + pass + + if not result: + raise KeyError + return result + + def __eq__(self, d: dict) -> bool: + """Equality.""" + return self.data == d + + def __repr__(self) -> str: + """String representation Abstract Relation Data dictionary.""" + return repr(self.data) + + def __len__(self) -> int: + """Length of the Abstract Relation Data dictionary.""" + return len(self.data) + + def __delitem__(self, key: str) -> None: + """Delete an item of the Abstract Relation Data dictionary.""" + self.relation_data.delete_relation_data(self.relation_id, [key]) + + def has_key(self, key: str) -> bool: + """Does the key exist in the Abstract Relation Data dictionary?""" + return key in self.data + + def update(self, items: Dict[str, str]): + """Update the Abstract Relation Data dictionary.""" + self.relation_data.update_relation_data(self.relation_id, items) + + def keys(self) -> KeysView[str]: + """Keys of the Abstract Relation Data dictionary.""" + return self.data.keys() + + def values(self) -> ValuesView[str]: + """Values of the Abstract Relation Data dictionary.""" + return self.data.values() + + def items(self) -> ItemsView[str, str]: + """Items of the Abstract Relation Data dictionary.""" + return self.data.items() + + def pop(self, item: str) -> str: + """Pop an item of the Abstract Relation Data dictionary.""" + result = self.relation_data.fetch_my_relation_field(self.relation_id, item) + if not result: + raise KeyError(f"Item {item} doesn't exist.") + self.relation_data.delete_relation_data(self.relation_id, [item]) + return result + + def __contains__(self, item: str) -> bool: + """Does the Abstract Relation Data dictionary contain item?""" + return item in self.data.values() + + def __iter__(self): + """Iterate through the Abstract Relation Data dictionary.""" + return iter(self.data) + + def get(self, key: str, default: Optional[str] = None) -> Optional[str]: + """Safely get an item of the Abstract Relation Data dictionary.""" + try: + if result := self[key]: + return result + except KeyError: + return default + + +class Data(ABC): """Base relation data mainpulation (abstract) class.""" SCOPE = Scope.APP # Local map to associate mappings with secrets potentially as a group SECRET_LABEL_MAP = { - "username": SecretGroup.USER, - "password": SecretGroup.USER, - "uris": SecretGroup.USER, - "tls": SecretGroup.TLS, - "tls-ca": SecretGroup.TLS, + "username": SECRET_GROUPS.USER, + "password": SECRET_GROUPS.USER, + "uris": SECRET_GROUPS.USER, + "tls": SECRET_GROUPS.TLS, + "tls-ca": SECRET_GROUPS.TLS, } - def __init__(self, charm: CharmBase, relation_name: str) -> None: - super().__init__(charm, relation_name) - self.charm = charm - self.local_app = self.charm.model.app - self.local_unit = self.charm.unit + def __init__( + self, + model: Model, + relation_name: str, + ) -> None: + self._model = model + self.local_app = self._model.app + self.local_unit = self._model.unit self.relation_name = relation_name - self.framework.observe( - charm.on[relation_name].relation_changed, - self._on_relation_changed_event, - ) self._jujuversion = None self.component = self.local_app if self.SCOPE == Scope.APP else self.local_unit - self.secrets = SecretCache(self.charm, self.component) + self.secrets = SecretCache(self._model, self.component) + self.data_component = None @property def relations(self) -> List[Relation]: """The list of Relation instances associated with this relation_name.""" return [ relation - for relation in self.charm.model.relations[self.relation_name] + for relation in self._model.relations[self.relation_name] if self._is_relation_active(relation) ] @@ -633,12 +884,12 @@ def secrets_enabled(self): self._jujuversion = JujuVersion.from_environ() return self._jujuversion.has_secrets - # Mandatory overrides for internal/helper methods + @property + def secret_label_map(self): + """Exposing secret-label map via a property -- could be overridden in descendants!""" + return self.SECRET_LABEL_MAP - @abstractmethod - def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: - """Event emitted when the relation data has changed.""" - raise NotImplementedError + # Mandatory overrides for internal/helper methods @abstractmethod def _get_relation_secret( @@ -658,17 +909,17 @@ def _fetch_specific_relation_data( def _fetch_my_specific_relation_data( self, relation: Relation, fields: Optional[List[str]] ) -> Dict[str, str]: - """Fetch data available from the relation for owner/this_app.""" + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" raise NotImplementedError @abstractmethod def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: - """Update data available from the relation for owner/this_app.""" + """Update data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" raise NotImplementedError @abstractmethod def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: - """Delete data available from the relation for owner/this_app.""" + """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" raise NotImplementedError # Internal helper methods @@ -692,11 +943,11 @@ def _generate_secret_label( relation_name: str, relation_id: int, group_mapping: SecretGroup ) -> str: """Generate unique group_mappings for secrets within a relation context.""" - return f"{relation_name}.{relation_id}.{group_mapping.value}.secret" + return f"{relation_name}.{relation_id}.{group_mapping}.secret" def _generate_secret_field_name(self, group_mapping: SecretGroup) -> str: """Generate unique group_mappings for secrets within a relation context.""" - return f"{PROV_SECRET_PREFIX}{group_mapping.value}" + return f"{PROV_SECRET_PREFIX}{group_mapping}" def _relation_from_secret_label(self, secret_label: str) -> Optional[Relation]: """Retrieve the relation that belongs to a secret label.""" @@ -721,8 +972,7 @@ def _relation_from_secret_label(self, secret_label: str) -> Optional[Relation]: except ModelError: return - @classmethod - def _group_secret_fields(cls, secret_fields: List[str]) -> Dict[SecretGroup, List[str]]: + def _group_secret_fields(self, secret_fields: List[str]) -> Dict[SecretGroup, List[str]]: """Helper function to arrange secret mappings under their group. NOTE: All unrecognized items end up in the 'extra' secret bucket. @@ -730,44 +980,42 @@ def _group_secret_fields(cls, secret_fields: List[str]) -> Dict[SecretGroup, Lis """ secret_fieldnames_grouped = {} for key in secret_fields: - if group := cls.SECRET_LABEL_MAP.get(key): + if group := self.secret_label_map.get(key): secret_fieldnames_grouped.setdefault(group, []).append(key) else: - secret_fieldnames_grouped.setdefault(SecretGroup.EXTRA, []).append(key) + secret_fieldnames_grouped.setdefault(SECRET_GROUPS.EXTRA, []).append(key) return secret_fieldnames_grouped def _get_group_secret_contents( self, relation: Relation, group: SecretGroup, - secret_fields: Optional[Union[Set[str], List[str]]] = None, + secret_fields: Union[Set[str], List[str]] = [], ) -> Dict[str, str]: """Helper function to retrieve collective, requested contents of a secret.""" - if not secret_fields: - secret_fields = [] - if (secret := self._get_relation_secret(relation.id, group)) and ( secret_data := secret.get_content() ): - return {k: v for k, v in secret_data.items() if k in secret_fields} + return { + k: v for k, v in secret_data.items() if not secret_fields or k in secret_fields + } return {} - @classmethod def _content_for_secret_group( - cls, content: Dict[str, str], secret_fields: Set[str], group_mapping: SecretGroup + self, content: Dict[str, str], secret_fields: Set[str], group_mapping: SecretGroup ) -> Dict[str, str]: - """Select : pairs from input, that belong to this particular Secret group.""" - if group_mapping == SecretGroup.EXTRA: + """Select : pairs from input, that belong to this particular Secret group.""" + if group_mapping == SECRET_GROUPS.EXTRA: return { k: v for k, v in content.items() - if k in secret_fields and k not in cls.SECRET_LABEL_MAP.keys() + if k in secret_fields and k not in self.secret_label_map.keys() } return { k: v for k, v in content.items() - if k in secret_fields and cls.SECRET_LABEL_MAP.get(k) == group_mapping + if k in secret_fields and self.secret_label_map.get(k) == group_mapping } @juju_secrets_only @@ -779,11 +1027,8 @@ def _get_relation_secret_data( if secret: return secret.get_content() - # Core operations on Relation Fields manipulations (regardless whether the - # field is in the databag or in a secret) - - # Internal functions to be called directly from transparent public interface - # functions (+closely related helpers) + # Core operations on Relation Fields manipulations (regardless whether the field is in the databag or in a secret) + # Internal functions to be called directly from transparent public interface functions (+closely related helpers) def _process_secret_fields( self, @@ -794,17 +1039,15 @@ def _process_secret_fields( *args, **kwargs, ) -> Tuple[Dict[str, str], Set[str]]: - """Isolate target secret fields, and execute requested operation by Secret Group.""" + """Isolate target secret fields of manipulation, and execute requested operation by Secret Group.""" result = {} # If the relation started on a databag, we just stay on the databag - # (Rolling upgrades may result in a relation starting on databag, getting - # secrets enabled on-the-fly) - # self.local_app is sufficient to check (ignored if Requires, never has - # secrets -- works if Provides) + # (Rolling upgrades may result in a relation starting on databag, getting secrets enabled on-the-fly) + # self.local_app is sufficient to check (ignored if Requires, never has secrets -- works if Provider) fallback_to_databag = ( req_secret_fields - and self.local_unit.is_leader() + and (self.local_unit == self._model.unit and self.local_unit.is_leader()) and set(req_secret_fields) & set(relation.data[self.component]) ) @@ -818,8 +1061,7 @@ def _process_secret_fields( for group in secret_fieldnames_grouped: # operation() should return nothing when all goes well if group_result := operation(relation, group, secret_fields, *args, **kwargs): - # If "meaningful" data was returned, we take it. (Some 'operation'-s - # only return success/failure.) + # If "meaningful" data was returned, we take it. (Some 'operation'-s only return success/failure.) if isinstance(group_result, dict): result.update(group_result) else: @@ -835,7 +1077,7 @@ def _fetch_relation_data_without_secrets( Since the Provider's databag is the only one holding secrest, we can apply a simplified workflow to read the Require's side's databag. - This is used typically when the Provides side wants to read the Requires side's data, + This is used typically when the Provider side wants to read the Requires side's data, or when the Requires side may want to read its own data. """ if component not in relation.data or not relation.data[component]: @@ -859,32 +1101,27 @@ def _fetch_relation_data_with_secrets( This function has internal logic to resolve if a requested field may be "hidden" within a Relation Secret, or directly available as a databag field. Typically - used to read the Provides side's databag (eigher by the Requires side, or by - Provides side itself). + used to read the Provider side's databag (eigher by the Requires side, or by + Provider side itself). """ result = {} normal_fields = [] if not fields: - if component not in relation.data or not relation.data[component]: + if component not in relation.data: return {} all_fields = list(relation.data[component].keys()) normal_fields = [field for field in all_fields if not self._is_secret_field(field)] - - # There must have been secrets there - if all_fields != normal_fields and req_secret_fields: - # So we assemble the full fields list (without 'secret-' fields) - fields = normal_fields + req_secret_fields + fields = normal_fields + req_secret_fields if req_secret_fields else normal_fields if fields: result, normal_fields = self._process_secret_fields( relation, req_secret_fields, fields, self._get_group_secret_contents ) - # Processing "normal" fields. May include leftover from what we couldn't - # retrieve as a secret. - # (Typically when Juju3 Requires meets Juju2 Provides) + # Processing "normal" fields. May include leftover from what we couldn't retrieve as a secret. + # (Typically when Juju3 Requires meets Juju2 Provider) if normal_fields: result.update( self._fetch_relation_data_without_secrets(component, relation, list(normal_fields)) @@ -912,7 +1149,7 @@ def _delete_relation_data_without_secrets( try: relation.data[component].pop(field) except KeyError: - logger.error( + logger.debug( "Non-existing field '%s' was attempted to be removed from the databag (relation ID: %s)", str(field), str(relation.id), @@ -922,9 +1159,13 @@ def _delete_relation_data_without_secrets( # Public interface methods # Handling Relation Fields seamlessly, regardless if in databag or a Juju Secret + def as_dict(self, relation_id: int) -> UserDict: + """Dict behavior representation of the Abstract Data.""" + return DataDict(self, relation_id) + def get_relation(self, relation_name, relation_id) -> Relation: """Safe way of retrieving a relation.""" - relation = self.charm.model.get_relation(relation_name, relation_id) + relation = self._model.get_relation(relation_name, relation_id) if not relation: raise DataInterfacesError( @@ -1030,14 +1271,22 @@ def delete_relation_data(self, relation_id: int, fields: List[str]) -> None: return self._delete_relation_data(relation, fields) -# Base DataProvides and DataRequires +class EventHandlers(Object): + """Requires-side of the relation.""" + def __init__(self, charm: CharmBase, relation_data: Data, unique_key: str = ""): + """Manager of base client relations.""" + if not unique_key: + unique_key = relation_data.relation_name + super().__init__(charm, unique_key) -class DataProvides(DataRelation): - """Base provides-side of the data products relation.""" + self.charm = charm + self.relation_data = relation_data - def __init__(self, charm: CharmBase, relation_name: str) -> None: - super().__init__(charm, relation_name) + self.framework.observe( + charm.on[self.relation_data.relation_name].relation_changed, + self._on_relation_changed_event, + ) def _diff(self, event: RelationChangedEvent) -> Diff: """Retrieves the diff of the data in the relation changed databag. @@ -1049,7 +1298,27 @@ def _diff(self, event: RelationChangedEvent) -> Diff: a Diff instance containing the added, deleted and changed keys from the event relation databag. """ - return diff(event, self.local_app) + return diff(event, self.relation_data.data_component) + + @abstractmethod + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation data has changed.""" + raise NotImplementedError + + +# Base ProviderData and RequiresData + + +class ProviderData(Data): + """Base provides-side of the data products relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + ) -> None: + super().__init__(model, relation_name) + self.data_component = self.local_app # Private methods handling secrets @@ -1136,15 +1405,13 @@ def _delete_relation_secret( try: new_content.pop(field) except KeyError: - logging.error( + logging.debug( "Non-existing secret was attempted to be removed %s, %s", str(relation.id), str(field), ) return False - secret.set_content(new_content) - # Remove secret from the relation if it's fully gone if not new_content: field = self._generate_secret_field_name(group) @@ -1152,6 +1419,10 @@ def _delete_relation_secret( relation.data[self.component].pop(field) except KeyError: pass + label = self._generate_secret_label(self.relation_name, relation.id, group) + self.secrets.remove(label) + else: + secret.set_content(new_content) # Return the content that was removed return True @@ -1170,7 +1441,7 @@ def _get_relation_secret( if secret := self.secrets.get(label): return secret - relation = self.charm.model.get_relation(relation_name, relation_id) + relation = self._model.get_relation(relation_name, relation_id) if not relation: return @@ -1181,10 +1452,9 @@ def _get_relation_secret( def _fetch_specific_relation_data( self, relation: Relation, fields: Optional[List[str]] ) -> Dict[str, str]: - """Fetching relation data for Provides. + """Fetching relation data for Provider. - NOTE: Since all secret fields are in the Provides side of the databag, - we don't need to worry about that + NOTE: Since all secret fields are in the Provider side of the databag, we don't need to worry about that """ if not relation.app: return {} @@ -1269,36 +1539,29 @@ def set_tls_ca(self, relation_id: int, tls_ca: str) -> None: # Public functions -- inherited - fetch_my_relation_data = leader_only(DataRelation.fetch_my_relation_data) - fetch_my_relation_field = leader_only(DataRelation.fetch_my_relation_field) + fetch_my_relation_data = leader_only(Data.fetch_my_relation_data) + fetch_my_relation_field = leader_only(Data.fetch_my_relation_field) -class DataRequires(DataRelation): - """Requires-side of the relation.""" +class RequirerData(Data): + """Requirer-side of the relation.""" SECRET_FIELDS = ["username", "password", "tls", "tls-ca", "uris"] def __init__( self, - charm, + model, relation_name: str, extra_user_roles: Optional[str] = None, additional_secret_fields: Optional[List[str]] = [], ): """Manager of base client relations.""" - super().__init__(charm, relation_name) + super().__init__(model, relation_name) self.extra_user_roles = extra_user_roles self._secret_fields = list(self.SECRET_FIELDS) if additional_secret_fields: self._secret_fields += additional_secret_fields - - self.framework.observe( - self.charm.on[relation_name].relation_created, self._on_relation_created_event - ) - self.framework.observe( - charm.on.secret_changed, - self._on_secret_changed_event, - ) + self.data_component = self.local_unit @property def secret_fields(self) -> Optional[List[str]]: @@ -1306,18 +1569,6 @@ def secret_fields(self) -> Optional[List[str]]: if self.secrets_enabled: return self._secret_fields - def _diff(self, event: RelationChangedEvent) -> Diff: - """Retrieves the diff of the data in the relation changed databag. - - Args: - event: relation changed event. - - Returns: - a Diff instance containing the added, deleted and changed - keys from the event relation databag. - """ - return diff(event, self.local_unit) - # Internal helper functions def _register_secret_to_relation( @@ -1330,13 +1581,13 @@ def _register_secret_to_relation( then will be "stuck" on the Secret object, whenever it may appear (i.e. as an event attribute, or fetched manually) on future occasions. - This will allow us to uniquely identify the secret on Provides side (typically on + This will allow us to uniquely identify the secret on Provider side (typically on 'secret-changed' events), and map it to the corresponding relation. """ label = self._generate_secret_label(relation_name, relation_id, group) # Fetchin the Secret's meta information ensuring that it's locally getting registered with - CachedSecret(self.charm, self.component, label, secret_id).meta + CachedSecret(self._model, self.component, label, secret_id).meta def _register_secrets_to_relation(self, relation: Relation, params_name_list: List[str]): """Make sure that secrets of the provided list are locally 'registered' from the databag. @@ -1346,7 +1597,7 @@ def _register_secrets_to_relation(self, relation: Relation, params_name_list: Li if not relation.app: return - for group in SecretGroup: + for group in SECRET_GROUPS.groups(): secret_field = self._generate_secret_field_name(group) if secret_field in params_name_list: if secret_uri := relation.data[relation.app].get(secret_field): @@ -1396,23 +1647,6 @@ def is_resource_created(self, relation_id: Optional[int] = None) -> bool: else False ) - # Event handlers - - def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: - """Event emitted when the relation is created.""" - if not self.local_unit.is_leader(): - return - - if self.secret_fields: - set_encoded_field( - event.relation, self.charm.app, REQ_SECRET_FIELDS, self.secret_fields - ) - - @abstractmethod - def _on_secret_changed_event(self, event: RelationChangedEvent) -> None: - """Event emitted when the relation data has changed.""" - raise NotImplementedError - # Mandatory internal overrides @juju_secrets_only @@ -1429,7 +1663,7 @@ def _get_relation_secret( def _fetch_specific_relation_data( self, relation, fields: Optional[List[str]] = None ) -> Dict[str, str]: - """Fetching Requires data -- that may include secrets.""" + """Fetching Requirer data -- that may include secrets.""" if not relation.app: return {} return self._fetch_relation_data_with_secrets( @@ -1467,35 +1701,92 @@ def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: # Public functions -- inherited - fetch_my_relation_data = leader_only(DataRelation.fetch_my_relation_data) - fetch_my_relation_field = leader_only(DataRelation.fetch_my_relation_field) + fetch_my_relation_data = leader_only(Data.fetch_my_relation_data) + fetch_my_relation_field = leader_only(Data.fetch_my_relation_field) -# Base DataPeer +class RequirerEventHandlers(EventHandlers): + """Requires-side of the relation.""" + def __init__(self, charm: CharmBase, relation_data: RequirerData, unique_key: str = ""): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) -class DataPeer(DataRequires, DataProvides): - """Represents peer relations.""" + self.framework.observe( + self.charm.on[relation_data.relation_name].relation_created, + self._on_relation_created_event, + ) + self.framework.observe( + charm.on.secret_changed, + self._on_secret_changed_event, + ) + + # Event handlers + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the relation is created.""" + if not self.relation_data.local_unit.is_leader(): + return + + if self.relation_data.secret_fields: # pyright: ignore [reportAttributeAccessIssue] + set_encoded_field( + event.relation, + self.relation_data.component, + REQ_SECRET_FIELDS, + self.relation_data.secret_fields, # pyright: ignore [reportAttributeAccessIssue] + ) + + @abstractmethod + def _on_secret_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation data has changed.""" + raise NotImplementedError + + +################################################################################ +# Peer Relation Data +################################################################################ - SECRET_FIELDS = ["operator-password"] + +class DataPeerData(RequirerData, ProviderData): + """Represents peer relations data.""" + + SECRET_FIELDS = [] SECRET_FIELD_NAME = "internal_secret" SECRET_LABEL_MAP = {} def __init__( self, - charm, + model, relation_name: str, extra_user_roles: Optional[str] = None, additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, secret_field_name: Optional[str] = None, deleted_label: Optional[str] = None, ): """Manager of base client relations.""" - DataRequires.__init__( - self, charm, relation_name, extra_user_roles, additional_secret_fields + RequirerData.__init__( + self, + model, + relation_name, + extra_user_roles, + additional_secret_fields, ) self.secret_field_name = secret_field_name if secret_field_name else self.SECRET_FIELD_NAME self.deleted_label = deleted_label + self._secret_label_map = {} + # Secrets that are being dynamically added within the scope of this event handler run + self._new_secrets = [] + self._additional_secret_group_mapping = additional_secret_group_mapping + + for group, fields in additional_secret_group_mapping.items(): + if group not in SECRET_GROUPS.groups(): + setattr(SECRET_GROUPS, group, group) + for field in fields: + secret_group = SECRET_GROUPS.get_group(group) + internal_field = self._field_to_internal_name(field, secret_group) + self._secret_label_map.setdefault(group, []).append(internal_field) + self._secret_fields.append(internal_field) @property def scope(self) -> Optional[Scope]: @@ -1505,71 +1796,157 @@ def scope(self) -> Optional[Scope]: if isinstance(self.component, Unit): return Scope.UNIT - def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: - """Event emitted when the relation has changed.""" - pass + @property + def secret_label_map(self) -> Dict[str, str]: + """Property storing secret mappings.""" + return self._secret_label_map - def _on_secret_changed_event(self, event: SecretChangedEvent) -> None: - """Event emitted when the secret has changed.""" - pass + @property + def static_secret_fields(self) -> List[str]: + """Re-definition of the property in a way that dynamically extended list is retrieved.""" + return self._secret_fields - def _generate_secret_label( - self, relation_name: str, relation_id: int, group_mapping: SecretGroup - ) -> str: - members = [self.charm.app.name] - if self.scope: - members.append(self.scope.value) - return f"{'.'.join(members)}" + @property + def secret_fields(self) -> List[str]: + """Re-definition of the property in a way that dynamically extended list is retrieved.""" + return ( + self.static_secret_fields if self.static_secret_fields else self.current_secret_fields + ) - def _generate_secret_field_name(self, group_mapping: SecretGroup = SecretGroup.EXTRA) -> str: - """Generate unique group_mappings for secrets within a relation context.""" - return f"{self.secret_field_name}" + @property + def current_secret_fields(self) -> List[str]: + """Helper method to get all currently existing secret fields (added statically or dynamically).""" + if not self.secrets_enabled: + return [] - @juju_secrets_only - def _get_relation_secret( + if len(self._model.relations[self.relation_name]) > 1: + raise ValueError(f"More than one peer relation on {self.relation_name}") + + relation = self._model.relations[self.relation_name][0] + fields = [] + + ignores = [SECRET_GROUPS.get_group("user"), SECRET_GROUPS.get_group("tls")] + for group in SECRET_GROUPS.groups(): + if group in ignores: + continue + if content := self._get_group_secret_contents(relation, group): + fields += list(content.keys()) + return list(set(fields) | set(self._new_secrets)) + + @dynamic_secrets_only + def set_secret( self, relation_id: int, - group_mapping: SecretGroup = SecretGroup.EXTRA, - relation_name: Optional[str] = None, - ) -> Optional[CachedSecret]: - """Retrieve a Juju Secret specifically for peer relations. + field: str, + value: str, + group_mapping: Optional[SecretGroup] = None, + ) -> None: + """Public interface method to add a Relation Data field specifically as a Juju Secret. - In case this code may be executed within a rolling upgrade, and we may need to - migrate secrets from the databag to labels, we make sure to stick the correct - label on the secret, and clean up the local databag. + Args: + relation_id: ID of the relation + field: The secret field that is to be added + value: The string value of the secret + group_mapping: The name of the "secret group", in case the field is to be added to an existing secret """ - if not relation_name: - relation_name = self.relation_name + full_field = self._field_to_internal_name(field, group_mapping) + if self.secrets_enabled and full_field not in self.current_secret_fields: + self._new_secrets.append(full_field) + if self._no_group_with_databag(field, full_field): + self.update_relation_data(relation_id, {full_field: value}) + + # Unlike for set_secret(), there's no harm using this operation with static secrets + # The restricion is only added to keep the concept clear + @dynamic_secrets_only + def get_secret( + self, + relation_id: int, + field: str, + group_mapping: Optional[SecretGroup] = None, + ) -> Optional[str]: + """Public interface method to fetch secrets only.""" + full_field = self._field_to_internal_name(field, group_mapping) + if ( + self.secrets_enabled + and full_field not in self.current_secret_fields + and field not in self.current_secret_fields + ): + return + if self._no_group_with_databag(field, full_field): + return self.fetch_my_relation_field(relation_id, full_field) - relation = self.charm.model.get_relation(relation_name, relation_id) - if not relation: + @dynamic_secrets_only + def delete_secret( + self, + relation_id: int, + field: str, + group_mapping: Optional[SecretGroup] = None, + ) -> Optional[str]: + """Public interface method to delete secrets only.""" + full_field = self._field_to_internal_name(field, group_mapping) + if self.secrets_enabled and full_field not in self.current_secret_fields: + logger.warning(f"Secret {field} from group {group_mapping} was not found") return + if self._no_group_with_databag(field, full_field): + self.delete_relation_data(relation_id, [full_field]) - label = self._generate_secret_label(relation_name, relation_id, group_mapping) - secret_uri = relation.data[self.component].get(self._generate_secret_field_name(), None) + # Helpers - # Fetching the secret with fallback to URI (in case label is not yet known) - # Label would we "stuck" on the secret in case it is found - secret = self.secrets.get(label, secret_uri) + @staticmethod + def _field_to_internal_name(field: str, group: Optional[SecretGroup]) -> str: + if not group or group == SECRET_GROUPS.EXTRA: + return field + return f"{field}{GROUP_SEPARATOR}{group}" - # Either app scope secret with leader executing, or unit scope secret - leader_or_unit_scope = self.component != self.local_app or self.local_unit.is_leader() - if secret_uri and secret and leader_or_unit_scope: - # Databag reference to the secret URI can be removed, now that it's labelled - relation.data[self.component].pop(self._generate_secret_field_name(), None) - return secret + @staticmethod + def _internal_name_to_field(name: str) -> Tuple[str, SecretGroup]: + parts = name.split(GROUP_SEPARATOR) + if not len(parts) > 1: + return (parts[0], SECRET_GROUPS.EXTRA) + secret_group = SECRET_GROUPS.get_group(parts[1]) + if not secret_group: + raise ValueError(f"Invalid secret field {name}") + return (parts[0], secret_group) + + def _group_secret_fields(self, secret_fields: List[str]) -> Dict[SecretGroup, List[str]]: + """Helper function to arrange secret mappings under their group. - def _get_group_secret_contents( - self, - relation: Relation, - group: SecretGroup, - secret_fields: Optional[Union[Set[str], List[str]]] = None, + NOTE: All unrecognized items end up in the 'extra' secret bucket. + Make sure only secret fields are passed! + """ + secret_fieldnames_grouped = {} + for key in secret_fields: + field, group = self._internal_name_to_field(key) + secret_fieldnames_grouped.setdefault(group, []).append(field) + return secret_fieldnames_grouped + + def _content_for_secret_group( + self, content: Dict[str, str], secret_fields: Set[str], group_mapping: SecretGroup ) -> Dict[str, str]: - """Helper function to retrieve collective, requested contents of a secret.""" - result = super()._get_group_secret_contents(relation, group, secret_fields) - if not self.deleted_label: - return result - return {key: result[key] for key in result if result[key] != self.deleted_label} + """Select : pairs from input, that belong to this particular Secret group.""" + if group_mapping == SECRET_GROUPS.EXTRA: + return {k: v for k, v in content.items() if k in self.secret_fields} + return { + self._internal_name_to_field(k)[0]: v + for k, v in content.items() + if k in self.secret_fields + } + + # Backwards compatibility + + def _check_deleted_label(self, relation, fields) -> None: + """Helper function for legacy behavior.""" + current_data = self.fetch_my_relation_data([relation.id], fields) + if current_data is not None: + # Check if the secret we wanna delete actually exists + # Given the "deleted label", here we can't rely on the default mechanism (i.e. 'key not found') + if non_existent := (set(fields) & set(self.secret_fields)) - set( + current_data.get(relation.id, []) + ): + logger.debug( + "Non-existing secret %s was attempted to be removed.", + ", ".join(non_existent), + ) def _remove_secret_from_databag(self, relation, fields: List[str]) -> None: """For Rolling Upgrades -- when moving from databag to secrets usage. @@ -1585,24 +1962,126 @@ def _remove_secret_from_databag(self, relation, fields: List[str]) -> None: if self._fetch_relation_data_without_secrets(self.component, relation, [field]): self._delete_relation_data_without_secrets(self.component, relation, [field]) - def _fetch_specific_relation_data( - self, relation: Relation, fields: Optional[List[str]] + def _remove_secret_field_name_from_databag(self, relation) -> None: + """Making sure that the old databag URI is gone. + + This action should not be executed more than once. + """ + # Nothing to do if 'internal-secret' is not in the databag + if not (relation.data[self.component].get(self._generate_secret_field_name())): + return + + # Making sure that the secret receives its label + # (This should have happened by the time we get here, rather an extra security measure.) + secret = self._get_relation_secret(relation.id) + + # Either app scope secret with leader executing, or unit scope secret + leader_or_unit_scope = self.component != self.local_app or self.local_unit.is_leader() + if secret and leader_or_unit_scope: + # Databag reference to the secret URI can be removed, now that it's labelled + relation.data[self.component].pop(self._generate_secret_field_name(), None) + + def _previous_labels(self) -> List[str]: + """Generator for legacy secret label names, for backwards compatibility.""" + result = [] + members = [self._model.app.name] + if self.scope: + members.append(self.scope.value) + result.append(f"{'.'.join(members)}") + return result + + def _no_group_with_databag(self, field: str, full_field: str) -> bool: + """Check that no secret group is attempted to be used together with databag.""" + if not self.secrets_enabled and full_field != field: + logger.error( + f"Can't access {full_field}: no secrets available (i.e. no secret groups either)." + ) + return False + return True + + # Event handlers + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + pass + + def _on_secret_changed_event(self, event: SecretChangedEvent) -> None: + """Event emitted when the secret has changed.""" + pass + + # Overrides of Relation Data handling functions + + def _generate_secret_label( + self, relation_name: str, relation_id: int, group_mapping: SecretGroup + ) -> str: + members = [relation_name, self._model.app.name] + if self.scope: + members.append(self.scope.value) + if group_mapping != SECRET_GROUPS.EXTRA: + members.append(group_mapping) + return f"{'.'.join(members)}" + + def _generate_secret_field_name(self, group_mapping: SecretGroup = SECRET_GROUPS.EXTRA) -> str: + """Generate unique group_mappings for secrets within a relation context.""" + return f"{self.secret_field_name}" + + @juju_secrets_only + def _get_relation_secret( + self, + relation_id: int, + group_mapping: SecretGroup = SECRET_GROUPS.EXTRA, + relation_name: Optional[str] = None, + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret specifically for peer relations. + + In case this code may be executed within a rolling upgrade, and we may need to + migrate secrets from the databag to labels, we make sure to stick the correct + label on the secret, and clean up the local databag. + """ + if not relation_name: + relation_name = self.relation_name + + relation = self._model.get_relation(relation_name, relation_id) + if not relation: + return + + label = self._generate_secret_label(relation_name, relation_id, group_mapping) + secret_uri = relation.data[self.component].get(self._generate_secret_field_name(), None) + + # URI or legacy label is only to applied when moving single legacy secret to a (new) label + if group_mapping == SECRET_GROUPS.EXTRA: + # Fetching the secret with fallback to URI (in case label is not yet known) + # Label would we "stuck" on the secret in case it is found + return self.secrets.get(label, secret_uri, legacy_labels=self._previous_labels()) + return self.secrets.get(label) + + def _get_group_secret_contents( + self, + relation: Relation, + group: SecretGroup, + secret_fields: Union[Set[str], List[str]] = [], ) -> Dict[str, str]: - """Fetch data available (directily or indirectly -- i.e. secrets) from the relation.""" - return self._fetch_relation_data_with_secrets( - self.component, self.secret_fields, relation, fields - ) + """Helper function to retrieve collective, requested contents of a secret.""" + secret_fields = [self._internal_name_to_field(k)[0] for k in secret_fields] + result = super()._get_group_secret_contents(relation, group, secret_fields) + if self.deleted_label: + result = {key: result[key] for key in result if result[key] != self.deleted_label} + if self._additional_secret_group_mapping: + return {self._field_to_internal_name(key, group): result[key] for key in result} + return result + @either_static_or_dynamic_secrets def _fetch_my_specific_relation_data( self, relation: Relation, fields: Optional[List[str]] ) -> Dict[str, str]: - """Fetch data available from the relation for owner/this_app.""" + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" return self._fetch_relation_data_with_secrets( self.component, self.secret_fields, relation, fields ) + @either_static_or_dynamic_secrets def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: - """Update data available from the relation for owner/this_app.""" + """Update data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" self._remove_secret_from_databag(relation, list(data.keys())) _, normal_fields = self._process_secret_fields( relation, @@ -1612,25 +2091,17 @@ def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> Non data=data, uri_to_databag=False, ) + self._remove_secret_field_name_from_databag(relation) normal_content = {k: v for k, v in data.items() if k in normal_fields} self._update_relation_data_without_secrets(self.component, relation, normal_content) + @either_static_or_dynamic_secrets def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: - """Delete data available from the relation for owner/this_app.""" + """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" if self.secret_fields and self.deleted_label: - current_data = self.fetch_my_relation_data([relation.id], fields) - if current_data is not None: - # Check if the secret we want to delete actually exists - # Given the "deleted label", here we can't rely on the default mechanism - # (i.e. 'key not found') - if non_existent := (set(fields) & set(self.secret_fields)) - set( - current_data.get(relation.id, []) - ): - logger.error( - "Non-existing secret %s was attempted to be removed.", - ", ".join(non_existent), - ) + # Legacy, backwards compatibility + self._check_deleted_label(relation, fields) _, normal_fields = self._process_secret_fields( relation, @@ -1668,12 +2139,55 @@ def fetch_relation_field( # Public functions -- inherited - fetch_my_relation_data = DataRelation.fetch_my_relation_data - fetch_my_relation_field = DataRelation.fetch_my_relation_field + fetch_my_relation_data = Data.fetch_my_relation_data + fetch_my_relation_field = Data.fetch_my_relation_field -class DataPeerUnit(DataPeer): - """Unit databag representation.""" +class DataPeerEventHandlers(RequirerEventHandlers): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: RequirerData, unique_key: str = ""): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + pass + + def _on_secret_changed_event(self, event: SecretChangedEvent) -> None: + """Event emitted when the secret has changed.""" + pass + + +class DataPeer(DataPeerData, DataPeerEventHandlers): + """Represents peer relations.""" + + def __init__( + self, + charm, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + unique_key: str = "", + ): + DataPeerData.__init__( + self, + charm.model, + relation_name, + extra_user_roles, + additional_secret_fields, + additional_secret_group_mapping, + secret_field_name, + deleted_label, + ) + DataPeerEventHandlers.__init__(self, charm, self, unique_key) + + +class DataPeerUnitData(DataPeerData): + """Unit data abstraction representation.""" SCOPE = Scope.UNIT @@ -1681,7 +2195,92 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) -# General events +class DataPeerUnit(DataPeerUnitData, DataPeerEventHandlers): + """Unit databag representation.""" + + def __init__( + self, + charm, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + unique_key: str = "", + ): + DataPeerData.__init__( + self, + charm.model, + relation_name, + extra_user_roles, + additional_secret_fields, + additional_secret_group_mapping, + secret_field_name, + deleted_label, + ) + DataPeerEventHandlers.__init__(self, charm, self, unique_key) + + +class DataPeerOtherUnitData(DataPeerUnitData): + """Unit data abstraction representation.""" + + def __init__(self, unit: Unit, *args, **kwargs): + super().__init__(*args, **kwargs) + self.local_unit = unit + self.component = unit + + def update_relation_data(self, relation_id: int, data: dict) -> None: + """This method makes no sense for a Other Peer Relation.""" + raise NotImplementedError("It's not possible to update data of another unit.") + + def delete_relation_data(self, relation_id: int, fields: List[str]) -> None: + """This method makes no sense for a Other Peer Relation.""" + raise NotImplementedError("It's not possible to delete data of another unit.") + + +class DataPeerOtherUnitEventHandlers(DataPeerEventHandlers): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: DataPeerUnitData): + """Manager of base client relations.""" + unique_key = f"{relation_data.relation_name}-{relation_data.local_unit.name}" + super().__init__(charm, relation_data, unique_key=unique_key) + + +class DataPeerOtherUnit(DataPeerOtherUnitData, DataPeerOtherUnitEventHandlers): + """Unit databag representation for another unit than the executor.""" + + def __init__( + self, + unit: Unit, + charm: CharmBase, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + ): + DataPeerOtherUnitData.__init__( + self, + unit, + charm.model, + relation_name, + extra_user_roles, + additional_secret_fields, + additional_secret_group_mapping, + secret_field_name, + deleted_label, + ) + DataPeerOtherUnitEventHandlers.__init__(self, charm, self) + + +################################################################################ +# Cross-charm Relatoins Data Handling and Evenets +################################################################################ + +# Generic events class ExtraRoleEvent(RelationEvent): @@ -1805,6 +2404,17 @@ def database(self) -> Optional[str]: class DatabaseRequestedEvent(DatabaseProvidesEvent, ExtraRoleEvent): """Event emitted when a new database is requested for use on this relation.""" + @property + def external_node_connectivity(self) -> bool: + """Returns the requested external_node_connectivity field.""" + if not self.relation.app: + return False + + return ( + self.relation.data[self.relation.app].get("external-node-connectivity", "false") + == "true" + ) + class DatabaseProvidesEvents(CharmEvents): """Database events. @@ -1915,28 +2525,11 @@ class DatabaseRequiresEvents(CharmEvents): # Database Provider and Requires -class DatabaseProvides(DataProvides): - """Provider-side of the database relations.""" +class DatabaseProviderData(ProviderData): + """Provider-side data of the database relations.""" - on = DatabaseProvidesEvents() # pyright: ignore [reportAssignmentType] - - def __init__(self, charm: CharmBase, relation_name: str) -> None: - super().__init__(charm, relation_name) - - def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: - """Event emitted when the relation has changed.""" - # Leader only - if not self.local_unit.is_leader(): - return - # Check which data has changed to emit customs events. - diff = self._diff(event) - - # Emit a database requested event if the setup key (database name and optional - # extra user roles) was added to the relation databag by the application. - if "database" in diff.added: - getattr(self.on, "database_requested").emit( - event.relation, app=event.app, unit=event.unit - ) + def __init__(self, model: Model, relation_name: str) -> None: + super().__init__(model, relation_name) def set_database(self, relation_id: int, database_name: str) -> None: """Set database name. @@ -2010,37 +2603,140 @@ def set_version(self, relation_id: int, version: str) -> None: self.update_relation_data(relation_id, {"version": version}) -class DatabaseRequires(DataRequires): - """Requires-side of the database relation.""" +class DatabaseProviderEventHandlers(EventHandlers): + """Provider-side of the database relation handlers.""" - on = DatabaseRequiresEvents() # pyright: ignore [reportAssignmentType] + on = DatabaseProvidesEvents() # pyright: ignore [reportAssignmentType] + + def __init__( + self, charm: CharmBase, relation_data: DatabaseProviderData, unique_key: str = "" + ): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + # Just to calm down pyright, it can't parse that the same type is being used in the super() call above + self.relation_data = relation_data + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.relation_data.local_unit.is_leader(): + return + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit a database requested event if the setup key (database name and optional + # extra user roles) was added to the relation databag by the application. + if "database" in diff.added: + getattr(self.on, "database_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + +class DatabaseProvides(DatabaseProviderData, DatabaseProviderEventHandlers): + """Provider-side of the database relations.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + DatabaseProviderData.__init__(self, charm.model, relation_name) + DatabaseProviderEventHandlers.__init__(self, charm, self) + + +class DatabaseRequirerData(RequirerData): + """Requirer-side of the database relation.""" def __init__( self, - charm, + model: Model, relation_name: str, database_name: str, extra_user_roles: Optional[str] = None, relations_aliases: Optional[List[str]] = None, additional_secret_fields: Optional[List[str]] = [], + external_node_connectivity: bool = False, ): """Manager of database client relations.""" - super().__init__(charm, relation_name, extra_user_roles, additional_secret_fields) + super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) self.database = database_name self.relations_aliases = relations_aliases + self.external_node_connectivity = external_node_connectivity + + def is_postgresql_plugin_enabled(self, plugin: str, relation_index: int = 0) -> bool: + """Returns whether a plugin is enabled in the database. + + Args: + plugin: name of the plugin to check. + relation_index: optional relation index to check the database + (default: 0 - first relation). + + PostgreSQL only. + """ + # Psycopg 3 is imported locally to avoid the need of its package installation + # when relating to a database charm other than PostgreSQL. + import psycopg + + # Return False if no relation is established. + if len(self.relations) == 0: + return False + + relation_id = self.relations[relation_index].id + host = self.fetch_relation_field(relation_id, "endpoints") + + # Return False if there is no endpoint available. + if host is None: + return False + + host = host.split(":")[0] + + content = self.fetch_relation_data([relation_id], ["username", "password"]).get( + relation_id, {} + ) + user = content.get("username") + password = content.get("password") + + connection_string = ( + f"host='{host}' dbname='{self.database}' user='{user}' password='{password}'" + ) + try: + with psycopg.connect(connection_string) as connection: + with connection.cursor() as cursor: + cursor.execute( + "SELECT TRUE FROM pg_extension WHERE extname=%s::text;", (plugin,) + ) + return cursor.fetchone() is not None + except psycopg.Error as e: + logger.exception( + f"failed to check whether {plugin} plugin is enabled in the database: %s", str(e) + ) + return False + + +class DatabaseRequirerEventHandlers(RequirerEventHandlers): + """Requires-side of the relation.""" + + on = DatabaseRequiresEvents() # pyright: ignore [reportAssignmentType] + + def __init__( + self, charm: CharmBase, relation_data: DatabaseRequirerData, unique_key: str = "" + ): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data # Define custom event names for each alias. - if relations_aliases: + if self.relation_data.relations_aliases: # Ensure the number of aliases does not exceed the maximum # of connections allowed in the specific relation. - relation_connection_limit = self.charm.meta.requires[relation_name].limit - if len(relations_aliases) != relation_connection_limit: + relation_connection_limit = self.charm.meta.requires[ + self.relation_data.relation_name + ].limit + if len(self.relation_data.relations_aliases) != relation_connection_limit: raise ValueError( f"The number of aliases must match the maximum number of connections allowed in the relation. " - f"Expected {relation_connection_limit}, got {len(relations_aliases)}" + f"Expected {relation_connection_limit}, got {len(self.relation_data.relations_aliases)}" ) - for relation_alias in relations_aliases: + if self.relation_data.relations_aliases: + for relation_alias in self.relation_data.relations_aliases: self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent) self.on.define_event( f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent @@ -2063,32 +2759,32 @@ def _assign_relation_alias(self, relation_id: int) -> None: relation_id: the identifier for a particular relation. """ # If no aliases were provided, return immediately. - if not self.relations_aliases: + if not self.relation_data.relations_aliases: return # Return if an alias was already assigned to this relation # (like when there are more than one unit joining the relation). - relation = self.charm.model.get_relation(self.relation_name, relation_id) - if relation and relation.data[self.local_unit].get("alias"): + relation = self.charm.model.get_relation(self.relation_data.relation_name, relation_id) + if relation and relation.data[self.relation_data.local_unit].get("alias"): return # Retrieve the available aliases (the ones that weren't assigned to any relation). - available_aliases = self.relations_aliases[:] - for relation in self.charm.model.relations[self.relation_name]: - alias = relation.data[self.local_unit].get("alias") + available_aliases = self.relation_data.relations_aliases[:] + for relation in self.charm.model.relations[self.relation_data.relation_name]: + alias = relation.data[self.relation_data.local_unit].get("alias") if alias: logger.debug("Alias %s was already assigned to relation %d", alias, relation.id) available_aliases.remove(alias) # Set the alias in the unit relation databag of the specific relation. - relation = self.charm.model.get_relation(self.relation_name, relation_id) + relation = self.charm.model.get_relation(self.relation_data.relation_name, relation_id) if relation: - relation.data[self.local_unit].update({"alias": available_aliases[0]}) + relation.data[self.relation_data.local_unit].update({"alias": available_aliases[0]}) # We need to set relation alias also on the application level so, # it will be accessible in show-unit juju command, executed for a consumer application unit - if self.local_unit.is_leader(): - self.update_relation_data(relation_id, {"alias": available_aliases[0]}) + if self.relation_data.local_unit.is_leader(): + self.relation_data.update_relation_data(relation_id, {"alias": available_aliases[0]}) def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None: """Emit an aliased event to a particular relation if it has an alias. @@ -2112,60 +2808,11 @@ def _get_relation_alias(self, relation_id: int) -> Optional[str]: Returns: the relation alias or None if the relation was not found. """ - for relation in self.charm.model.relations[self.relation_name]: + for relation in self.charm.model.relations[self.relation_data.relation_name]: if relation.id == relation_id: - return relation.data[self.local_unit].get("alias") + return relation.data[self.relation_data.local_unit].get("alias") return None - def is_postgresql_plugin_enabled(self, plugin: str, relation_index: int = 0) -> bool: - """Returns whether a plugin is enabled in the database. - - Args: - plugin: name of the plugin to check. - relation_index: optional relation index to check the database - (default: 0 - first relation). - - PostgreSQL only. - """ - # Psycopg 3 is imported locally to avoid the need of its package installation - # when relating to a database charm other than PostgreSQL. - import psycopg - - # Return False if no relation is established. - if len(self.relations) == 0: - return False - - relation_id = self.relations[relation_index].id - host = self.fetch_relation_field(relation_id, "endpoints") - - # Return False if there is no endpoint available. - if host is None: - return False - - host = host.split(":")[0] - - content = self.fetch_relation_data([relation_id], ["username", "password"]).get( - relation_id, {} - ) - user = content.get("username") - password = content.get("password") - - connection_string = ( - f"host='{host}' dbname='{self.database}' user='{user}' password='{password}'" - ) - try: - with psycopg.connect(connection_string) as connection: - with connection.cursor() as cursor: - cursor.execute( - "SELECT TRUE FROM pg_extension WHERE extname=%s::text;", (plugin,) - ) - return cursor.fetchone() is not None - except psycopg.Error as e: - logger.exception( - f"failed to check whether {plugin} plugin is enabled in the database: %s", str(e) - ) - return False - def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: """Event emitted when the database relation is created.""" super()._on_relation_created_event(event) @@ -2175,19 +2822,19 @@ def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: # Sets both database and extra user roles in the relation # if the roles are provided. Otherwise, sets only the database. - if not self.local_unit.is_leader(): + if not self.relation_data.local_unit.is_leader(): return - if self.extra_user_roles: - self.update_relation_data( - event.relation.id, - { - "database": self.database, - "extra-user-roles": self.extra_user_roles, - }, - ) - else: - self.update_relation_data(event.relation.id, {"database": self.database}) + event_data = {"database": self.relation_data.database} + + if self.relation_data.extra_user_roles: + event_data["extra-user-roles"] = self.relation_data.extra_user_roles + + # set external-node-connectivity field + if self.relation_data.external_node_connectivity: + event_data["external-node-connectivity"] = "true" + + self.relation_data.update_relation_data(event.relation.id, event_data) def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: """Event emitted when the database relation has changed.""" @@ -2195,12 +2842,12 @@ def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: diff = self._diff(event) # Register all new secrets with their labels - if any(newval for newval in diff.added if self._is_secret_field(newval)): - self._register_secrets_to_relation(event.relation, diff.added) + if any(newval for newval in diff.added if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, diff.added) # Check if the database is created # (the database charm shared the credentials). - secret_field_user = self._generate_secret_field_name(SecretGroup.USER) + secret_field_user = self.relation_data._generate_secret_field_name(SECRET_GROUPS.USER) if ( "username" in diff.added and "password" in diff.added ) or secret_field_user in diff.added: @@ -2246,7 +2893,37 @@ def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: self._emit_aliased_event(event, "read_only_endpoints_changed") -# Kafka related events +class DatabaseRequires(DatabaseRequirerData, DatabaseRequirerEventHandlers): + """Provider-side of the database relations.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str, + database_name: str, + extra_user_roles: Optional[str] = None, + relations_aliases: Optional[List[str]] = None, + additional_secret_fields: Optional[List[str]] = [], + external_node_connectivity: bool = False, + ): + DatabaseRequirerData.__init__( + self, + charm.model, + relation_name, + database_name, + extra_user_roles, + relations_aliases, + additional_secret_fields, + external_node_connectivity, + ) + DatabaseRequirerEventHandlers.__init__(self, charm, self) + + +################################################################################ +# Charm-specific Relations Data and Events +################################################################################ + +# Kafka Events class KafkaProvidesEvent(RelationEvent): @@ -2339,29 +3016,11 @@ class KafkaRequiresEvents(CharmEvents): # Kafka Provides and Requires -class KafkaProvides(DataProvides): +class KafkaProviderData(ProviderData): """Provider-side of the Kafka relation.""" - on = KafkaProvidesEvents() # pyright: ignore [reportAssignmentType] - - def __init__(self, charm: CharmBase, relation_name: str) -> None: - super().__init__(charm, relation_name) - - def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: - """Event emitted when the relation has changed.""" - # Leader only - if not self.local_unit.is_leader(): - return - - # Check which data has changed to emit customs events. - diff = self._diff(event) - - # Emit a topic requested event if the setup key (topic name and optional - # extra user roles) was added to the relation databag by the application. - if "topic" in diff.added: - getattr(self.on, "topic_requested").emit( - event.relation, app=event.app, unit=event.unit - ) + def __init__(self, model: Model, relation_name: str) -> None: + super().__init__(model, relation_name) def set_topic(self, relation_id: int, topic: str) -> None: """Set topic name in the application relation databag. @@ -2400,14 +3059,47 @@ def set_zookeeper_uris(self, relation_id: int, zookeeper_uris: str) -> None: self.update_relation_data(relation_id, {"zookeeper-uris": zookeeper_uris}) -class KafkaRequires(DataRequires): - """Requires-side of the Kafka relation.""" +class KafkaProviderEventHandlers(EventHandlers): + """Provider-side of the Kafka relation.""" - on = KafkaRequiresEvents() # pyright: ignore [reportAssignmentType] + on = KafkaProvidesEvents() # pyright: ignore [reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: KafkaProviderData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.relation_data.local_unit.is_leader(): + return + + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit a topic requested event if the setup key (topic name and optional + # extra user roles) was added to the relation databag by the application. + if "topic" in diff.added: + getattr(self.on, "topic_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + +class KafkaProvides(KafkaProviderData, KafkaProviderEventHandlers): + """Provider-side of the Kafka relation.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + KafkaProviderData.__init__(self, charm.model, relation_name) + KafkaProviderEventHandlers.__init__(self, charm, self) + + +class KafkaRequirerData(RequirerData): + """Requirer-side of the Kafka relation.""" def __init__( self, - charm, + model: Model, relation_name: str, topic: str, extra_user_roles: Optional[str] = None, @@ -2415,9 +3107,7 @@ def __init__( additional_secret_fields: Optional[List[str]] = [], ): """Manager of Kafka client relations.""" - # super().__init__(charm, relation_name) - super().__init__(charm, relation_name, extra_user_roles, additional_secret_fields) - self.charm = charm + super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) self.topic = topic self.consumer_group_prefix = consumer_group_prefix or "" @@ -2433,20 +3123,34 @@ def topic(self, value): raise ValueError(f"Error on topic '{value}', cannot be a wildcard.") self._topic = value + +class KafkaRequirerEventHandlers(RequirerEventHandlers): + """Requires-side of the Kafka relation.""" + + on = KafkaRequiresEvents() # pyright: ignore [reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: KafkaRequirerData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: """Event emitted when the Kafka relation is created.""" super()._on_relation_created_event(event) - if not self.local_unit.is_leader(): + if not self.relation_data.local_unit.is_leader(): return # Sets topic, extra user roles, and "consumer-group-prefix" in the relation - relation_data = { - f: getattr(self, f.replace("-", "_"), "") - for f in ["consumer-group-prefix", "extra-user-roles", "topic"] - } + relation_data = {"topic": self.relation_data.topic} + + if self.relation_data.extra_user_roles: + relation_data["extra-user-roles"] = self.relation_data.extra_user_roles - self.update_relation_data(event.relation.id, relation_data) + if self.relation_data.consumer_group_prefix: + relation_data["consumer-group-prefix"] = self.relation_data.consumer_group_prefix + + self.relation_data.update_relation_data(event.relation.id, relation_data) def _on_secret_changed_event(self, event: SecretChangedEvent): """Event notifying about a new value of a secret.""" @@ -2461,10 +3165,10 @@ def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: # (the Kafka charm shared the credentials). # Register all new secrets with their labels - if any(newval for newval in diff.added if self._is_secret_field(newval)): - self._register_secrets_to_relation(event.relation, diff.added) + if any(newval for newval in diff.added if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, diff.added) - secret_field_user = self._generate_secret_field_name(SecretGroup.USER) + secret_field_user = self.relation_data._generate_secret_field_name(SECRET_GROUPS.USER) if ( "username" in diff.added and "password" in diff.added ) or secret_field_user in diff.added: @@ -2487,6 +3191,30 @@ def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: return +class KafkaRequires(KafkaRequirerData, KafkaRequirerEventHandlers): + """Provider-side of the Kafka relation.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str, + topic: str, + extra_user_roles: Optional[str] = None, + consumer_group_prefix: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ) -> None: + KafkaRequirerData.__init__( + self, + charm.model, + relation_name, + topic, + extra_user_roles, + consumer_group_prefix, + additional_secret_fields, + ) + KafkaRequirerEventHandlers.__init__(self, charm, self) + + # Opensearch related events @@ -2537,28 +3265,11 @@ class OpenSearchRequiresEvents(CharmEvents): # OpenSearch Provides and Requires Objects -class OpenSearchProvides(DataProvides): +class OpenSearchProvidesData(ProviderData): """Provider-side of the OpenSearch relation.""" - on = OpenSearchProvidesEvents() # pyright: ignore[reportAssignmentType] - - def __init__(self, charm: CharmBase, relation_name: str) -> None: - super().__init__(charm, relation_name) - - def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: - """Event emitted when the relation has changed.""" - # Leader only - if not self.local_unit.is_leader(): - return - # Check which data has changed to emit customs events. - diff = self._diff(event) - - # Emit an index requested event if the setup key (index name and optional extra user roles) - # have been added to the relation databag by the application. - if "index" in diff.added: - getattr(self.on, "index_requested").emit( - event.relation, app=event.app, unit=event.unit - ) + def __init__(self, model: Model, relation_name: str) -> None: + super().__init__(model, relation_name) def set_index(self, relation_id: int, index: str) -> None: """Set the index in the application relation databag. @@ -2590,45 +3301,87 @@ def set_version(self, relation_id: int, version: str) -> None: self.update_relation_data(relation_id, {"version": version}) -class OpenSearchRequires(DataRequires): - """Requires-side of the OpenSearch relation.""" +class OpenSearchProvidesEventHandlers(EventHandlers): + """Provider-side of the OpenSearch relation.""" + + on = OpenSearchProvidesEvents() # pyright: ignore[reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: OpenSearchProvidesData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.relation_data.local_unit.is_leader(): + return + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit an index requested event if the setup key (index name and optional extra user roles) + # have been added to the relation databag by the application. + if "index" in diff.added: + getattr(self.on, "index_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + +class OpenSearchProvides(OpenSearchProvidesData, OpenSearchProvidesEventHandlers): + """Provider-side of the OpenSearch relation.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + OpenSearchProvidesData.__init__(self, charm.model, relation_name) + OpenSearchProvidesEventHandlers.__init__(self, charm, self) - on = OpenSearchRequiresEvents() # pyright: ignore[reportAssignmentType] + +class OpenSearchRequiresData(RequirerData): + """Requires data side of the OpenSearch relation.""" def __init__( self, - charm, + model: Model, relation_name: str, index: str, extra_user_roles: Optional[str] = None, additional_secret_fields: Optional[List[str]] = [], ): """Manager of OpenSearch client relations.""" - super().__init__(charm, relation_name, extra_user_roles, additional_secret_fields) - self.charm = charm + super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) self.index = index + +class OpenSearchRequiresEventHandlers(RequirerEventHandlers): + """Requires events side of the OpenSearch relation.""" + + on = OpenSearchRequiresEvents() # pyright: ignore[reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: OpenSearchRequiresData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: """Event emitted when the OpenSearch relation is created.""" super()._on_relation_created_event(event) - if not self.local_unit.is_leader(): + if not self.relation_data.local_unit.is_leader(): return # Sets both index and extra user roles in the relation if the roles are provided. # Otherwise, sets only the index. - data = {"index": self.index} - if self.extra_user_roles: - data["extra-user-roles"] = self.extra_user_roles + data = {"index": self.relation_data.index} + if self.relation_data.extra_user_roles: + data["extra-user-roles"] = self.relation_data.extra_user_roles - self.update_relation_data(event.relation.id, data) + self.relation_data.update_relation_data(event.relation.id, data) def _on_secret_changed_event(self, event: SecretChangedEvent): """Event notifying about a new value of a secret.""" if not event.secret.label: return - relation = self._relation_from_secret_label(event.secret.label) + relation = self.relation_data._relation_from_secret_label(event.secret.label) if not relation: logging.info( f"Received secret {event.secret.label} but couldn't parse, seems irrelevant" @@ -2657,11 +3410,11 @@ def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: diff = self._diff(event) # Register all new secrets with their labels - if any(newval for newval in diff.added if self._is_secret_field(newval)): - self._register_secrets_to_relation(event.relation, diff.added) + if any(newval for newval in diff.added if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, diff.added) - secret_field_user = self._generate_secret_field_name(SecretGroup.USER) - secret_field_tls = self._generate_secret_field_name(SecretGroup.TLS) + secret_field_user = self.relation_data._generate_secret_field_name(SECRET_GROUPS.USER) + secret_field_tls = self.relation_data._generate_secret_field_name(SECRET_GROUPS.TLS) updates = {"username", "password", "tls", "tls-ca", secret_field_user, secret_field_tls} if len(set(diff._asdict().keys()) - updates) < len(diff): logger.info("authentication updated at: %s", datetime.now()) @@ -2691,3 +3444,25 @@ def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: event.relation, app=event.app, unit=event.unit ) # here check if this is the right design return + + +class OpenSearchRequires(OpenSearchRequiresData, OpenSearchRequiresEventHandlers): + """Requires-side of the OpenSearch relation.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str, + index: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ) -> None: + OpenSearchRequiresData.__init__( + self, + charm.model, + relation_name, + index, + extra_user_roles, + additional_secret_fields, + ) + OpenSearchRequiresEventHandlers.__init__(self, charm, self) diff --git a/tests/integration/relations/opensearch_provider/application-charm/metadata.yaml b/tests/integration/relations/opensearch_provider/application-charm/metadata.yaml index b326b5d25..36e01879e 100644 --- a/tests/integration/relations/opensearch_provider/application-charm/metadata.yaml +++ b/tests/integration/relations/opensearch_provider/application-charm/metadata.yaml @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. name: application description: | diff --git a/tests/integration/relations/opensearch_provider/application-charm/requirements.txt b/tests/integration/relations/opensearch_provider/application-charm/requirements.txt index c5dcd5bd6..c34867411 100644 --- a/tests/integration/relations/opensearch_provider/application-charm/requirements.txt +++ b/tests/integration/relations/opensearch_provider/application-charm/requirements.txt @@ -1 +1 @@ -ops==2.9.0 +ops==2.13.0 diff --git a/tests/integration/relations/opensearch_provider/application-charm/src/charm.py b/tests/integration/relations/opensearch_provider/application-charm/src/charm.py index f8ea13dd1..943228b8e 100755 --- a/tests/integration/relations/opensearch_provider/application-charm/src/charm.py +++ b/tests/integration/relations/opensearch_provider/application-charm/src/charm.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Application charm that connects to opensearch using the opensearch-client relation.""" @@ -126,6 +126,10 @@ def _on_run_request_action(self, event: ActionEvent): password = requires.fetch_relation_field(relation_id, "password") hosts = requires.fetch_relation_field(relation_id, "endpoints") + if not username or not password: + event.fail("Secrets not accessible yet.") + return + host = None if not hosts: return diff --git a/tests/integration/relations/test_opensearch_provider.py b/tests/integration/relations/test_opensearch_provider.py index 82c2a4c82..e80890849 100644 --- a/tests/integration/relations/test_opensearch_provider.py +++ b/tests/integration/relations/test_opensearch_provider.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import asyncio import json diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index 191dce517..616af56ed 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import logging diff --git a/tests/integration/test_manual_large_deployment_upgrades.py b/tests/integration/test_manual_large_deployment_upgrades.py new file mode 100644 index 000000000..418ed75bb --- /dev/null +++ b/tests/integration/test_manual_large_deployment_upgrades.py @@ -0,0 +1,220 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import asyncio +import logging + +import pytest +from pytest_operator.plugin import OpsTest + +from .ha.continuous_writes import ContinuousWrites +from .ha.helpers import app_name, assert_continuous_writes_consistency +from .ha.test_horizontal_scaling import IDLE_PERIOD +from .helpers import APP_NAME, MODEL_CONFIG, SERIES, run_action +from .helpers_deployments import get_application_units, wait_until +from .tls.test_tls import TLS_CERTIFICATES_APP_NAME + +logger = logging.getLogger(__name__) + + +OPENSEARCH_ORIGINAL_CHARM_NAME = "opensearch" +OPENSEARCH_INITIAL_CHANNEL = "2/edge" +OPENSEARCH_MAIN_APP_NAME = "main" +OPENSEARCH_FAILOVER_APP_NAME = "failover" + + +charm = None + + +WORKLOAD = { + APP_NAME: 3, + OPENSEARCH_FAILOVER_APP_NAME: 2, + OPENSEARCH_MAIN_APP_NAME: 1, +} + + +@pytest.fixture() +async def c_writes(ops_test: OpsTest): + """Creates instance of the ContinuousWrites.""" + app = (await app_name(ops_test)) or APP_NAME + return ContinuousWrites(ops_test, app) + + +@pytest.fixture() +async def c_writes_runner(ops_test: OpsTest, c_writes: ContinuousWrites): + """Starts continuous write operations and clears writes at the end of the test.""" + await c_writes.start() + yield + await c_writes.clear() + logger.info("\n\n\n\nThe writes have been cleared.\n\n\n\n") + + +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "xlarge"]) +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +@pytest.mark.skip_if_deployed +async def test_large_deployment_deploy_original_charm(ops_test: OpsTest) -> None: + """Build and deploy the charm for large deployment tests.""" + await ops_test.model.set_config(MODEL_CONFIG) + # Deploy TLS Certificates operator. + tls_config = {"ca-common-name": "CN_CA"} + + main_orchestrator_conf = { + "cluster_name": "backup-test", + "init_hold": False, + "roles": "cluster_manager", + } + failover_orchestrator_conf = { + "cluster_name": "backup-test", + "init_hold": True, + "roles": "cluster_manager", + } + data_hot_conf = {"cluster_name": "backup-test", "init_hold": True, "roles": "data.hot"} + + await asyncio.gather( + ops_test.model.deploy(TLS_CERTIFICATES_APP_NAME, channel="stable", config=tls_config), + ops_test.model.deploy( + OPENSEARCH_ORIGINAL_CHARM_NAME, + application_name=OPENSEARCH_MAIN_APP_NAME, + num_units=WORKLOAD[OPENSEARCH_MAIN_APP_NAME], + series=SERIES, + channel=OPENSEARCH_INITIAL_CHANNEL, + config=main_orchestrator_conf, + ), + ops_test.model.deploy( + OPENSEARCH_ORIGINAL_CHARM_NAME, + application_name=OPENSEARCH_FAILOVER_APP_NAME, + num_units=WORKLOAD[OPENSEARCH_FAILOVER_APP_NAME], + series=SERIES, + channel=OPENSEARCH_INITIAL_CHANNEL, + config=failover_orchestrator_conf, + ), + ops_test.model.deploy( + OPENSEARCH_ORIGINAL_CHARM_NAME, + application_name=APP_NAME, + num_units=WORKLOAD[APP_NAME], + series=SERIES, + channel=OPENSEARCH_INITIAL_CHANNEL, + config=data_hot_conf, + ), + ) + + # Large deployment setup + await ops_test.model.integrate("main:peer-cluster-orchestrator", "failover:peer-cluster") + await ops_test.model.integrate("main:peer-cluster-orchestrator", f"{APP_NAME}:peer-cluster") + await ops_test.model.integrate( + "failover:peer-cluster-orchestrator", f"{APP_NAME}:peer-cluster" + ) + + # TLS setup + await ops_test.model.integrate("main", TLS_CERTIFICATES_APP_NAME) + await ops_test.model.integrate("failover", TLS_CERTIFICATES_APP_NAME) + await ops_test.model.integrate(APP_NAME, TLS_CERTIFICATES_APP_NAME) + + # Charms except s3-integrator should be active + await wait_until( + ops_test, + apps=[ + TLS_CERTIFICATES_APP_NAME, + OPENSEARCH_MAIN_APP_NAME, + OPENSEARCH_FAILOVER_APP_NAME, + APP_NAME, + ], + apps_statuses=["active"], + units_statuses=["active"], + wait_for_exact_units={ + TLS_CERTIFICATES_APP_NAME: 1, + OPENSEARCH_MAIN_APP_NAME: WORKLOAD[OPENSEARCH_MAIN_APP_NAME], + OPENSEARCH_FAILOVER_APP_NAME: WORKLOAD[OPENSEARCH_FAILOVER_APP_NAME], + APP_NAME: WORKLOAD[APP_NAME], + }, + idle_period=IDLE_PERIOD, + timeout=3600, + ) + + +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "xlarge"]) +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_manually_upgrade_to_local( + ops_test: OpsTest, c_writes: ContinuousWrites, c_writes_runner +) -> None: + """Test upgrade from usptream to currently locally built version.""" + units = await get_application_units(ops_test, OPENSEARCH_MAIN_APP_NAME) + leader_id = [u.id for u in units if u.is_leader][0] + + action = await run_action( + ops_test, + leader_id, + "pre-upgrade-check", + app=OPENSEARCH_MAIN_APP_NAME, + ) + assert action.status == "completed" + + logger.info("Build charm locally") + global charm + if not charm: + charm = await ops_test.build_charm(".") + + async with ops_test.fast_forward(): + for app, unit_count in WORKLOAD.items(): + application = ops_test.model.applications[app] + units = await get_application_units(ops_test, app) + leader_id = [u.id for u in units if u.is_leader][0] + + logger.info(f"Refresh app {app}, leader {leader_id}") + + await application.refresh(path=charm) + logger.info("Refresh is over, waiting for the charm to settle") + + if unit_count == 1: + # Upgrade already happened for this unit, wait for idle and continue + await wait_until( + ops_test, + apps=[app], + apps_statuses=["active"], + units_statuses=["active"], + idle_period=IDLE_PERIOD, + timeout=3600, + ) + logger.info(f"Upgrade of app {app} finished") + continue + + await wait_until( + ops_test, + apps=[app], + apps_statuses=["blocked"], + units_statuses=["active"], + wait_for_exact_units={ + app: unit_count, + }, + idle_period=120, + timeout=3600, + ) + # Resume the upgrade + action = await run_action( + ops_test, + leader_id, + "resume-upgrade", + app=app, + ) + assert action.status == "completed" + logger.info(f"resume-upgrade: {action}") + + await wait_until( + ops_test, + apps=[app], + apps_statuses=["active"], + units_statuses=["active"], + idle_period=IDLE_PERIOD, + timeout=3600, + ) + logger.info(f"Upgrade of app {app} finished") + + # continuous writes checks + await assert_continuous_writes_consistency( + ops_test, + c_writes, + [APP_NAME, OPENSEARCH_MAIN_APP_NAME], + ) diff --git a/tests/integration/test_small_deployment_upgrades.py b/tests/integration/test_small_deployment_upgrades.py new file mode 100644 index 000000000..28190f4d6 --- /dev/null +++ b/tests/integration/test_small_deployment_upgrades.py @@ -0,0 +1,205 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import subprocess + +import pytest +from pytest_operator.plugin import OpsTest + +from .ha.continuous_writes import ContinuousWrites +from .ha.helpers import app_name, assert_continuous_writes_consistency +from .ha.test_horizontal_scaling import IDLE_PERIOD +from .helpers import APP_NAME, MODEL_CONFIG, SERIES, run_action +from .helpers_deployments import get_application_units, wait_until +from .tls.test_tls import TLS_CERTIFICATES_APP_NAME + +logger = logging.getLogger(__name__) + + +OPENSEARCH_ORIGINAL_CHARM_NAME = "opensearch" +OPENSEARCH_INITIAL_CHANNEL = "2/edge" +MACHINE_ID = 0 + + +charm = None + + +@pytest.fixture() +async def c_writes(ops_test: OpsTest): + """Creates instance of the ContinuousWrites.""" + app = (await app_name(ops_test)) or APP_NAME + return ContinuousWrites(ops_test, app) + + +@pytest.fixture() +async def c_writes_runner(ops_test: OpsTest, c_writes: ContinuousWrites): + """Starts continuous write operations and clears writes at the end of the test.""" + await c_writes.start() + yield + await c_writes.clear() + logger.info("\n\n\n\nThe writes have been cleared.\n\n\n\n") + + +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "xlarge"]) +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +@pytest.mark.skip_if_deployed +async def test_deploy_latest_from_channel(ops_test: OpsTest) -> None: + """Deploy OpenSearch.""" + await ops_test.model.set_config(MODEL_CONFIG) + + await ops_test.model.deploy( + OPENSEARCH_ORIGINAL_CHARM_NAME, + application_name=APP_NAME, + num_units=3, + channel=OPENSEARCH_INITIAL_CHANNEL, + series=SERIES, + ) + + # Deploy TLS Certificates operator. + config = {"ca-common-name": "CN_CA"} + await ops_test.model.deploy(TLS_CERTIFICATES_APP_NAME, channel="stable", config=config) + + # Relate it to OpenSearch to set up TLS. + await ops_test.model.integrate(APP_NAME, TLS_CERTIFICATES_APP_NAME) + await ops_test.model.wait_for_idle( + apps=[TLS_CERTIFICATES_APP_NAME, APP_NAME], + status="active", + timeout=1400, + idle_period=50, + ) + assert len(ops_test.model.applications[APP_NAME].units) == 3 + + +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_upgrade_rollback( + ops_test: OpsTest, c_writes: ContinuousWrites, c_writes_runner +) -> None: + """Test upgrade from upstream to currently locally built version.""" + app = (await app_name(ops_test)) or APP_NAME + units = await get_application_units(ops_test, app) + leader_id = [u.id for u in units if u.is_leader][0] + + application = ops_test.model.applications[APP_NAME] + action = await run_action( + ops_test, + leader_id, + "pre-upgrade-check", + app=app, + ) + assert action.status == "completed" + + logger.info("Build charm locally") + global charm + if not charm: + charm = await ops_test.build_charm(".") + + async with ops_test.fast_forward(): + logger.info("Refresh the charm") + await application.refresh(path=charm) + + await wait_until( + ops_test, + apps=[app], + apps_statuses=["blocked"], + units_statuses=["active"], + wait_for_exact_units={ + APP_NAME: 3, + }, + idle_period=IDLE_PERIOD, + ) + + logger.info("Rolling back") + # Facing the same issue as descripted in: + # https://github.com/juju/python-libjuju/issues/924 + # application = ops_test.model.applications[APP_NAME] + # await application.refresh( + # switch="ch:pguimaraes-opensearch-upgrade-test", + # channel=OPENSEARCH_INITIAL_CHANNEL, + # ) + subprocess.check_output( + f"juju refresh {app} --switch {OPENSEARCH_ORIGINAL_CHARM_NAME} " + f"--channel {OPENSEARCH_INITIAL_CHANNEL}".split(), + ) + + await wait_until( + ops_test, + apps=[app], + apps_statuses=["active"], + units_statuses=["active"], + wait_for_exact_units={ + APP_NAME: 3, + }, + idle_period=IDLE_PERIOD, + ) + + +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "xlarge"]) +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_upgrade_to_local( + ops_test: OpsTest, c_writes: ContinuousWrites, c_writes_runner +) -> None: + """Test upgrade from usptream to currently locally built version.""" + app = (await app_name(ops_test)) or APP_NAME + units = await get_application_units(ops_test, app) + leader_id = [u.id for u in units if u.is_leader][0] + + application = ops_test.model.applications[app] + action = await run_action( + ops_test, + leader_id, + "pre-upgrade-check", + app=app, + ) + assert action.status == "completed" + + logger.info("Build charm locally") + global charm + if not charm: + charm = await ops_test.build_charm(".") + + async with ops_test.fast_forward(): + logger.info("Refresh the charm") + await application.refresh(path=charm) + + await wait_until( + ops_test, + apps=[app], + apps_statuses=["blocked"], + units_statuses=["active"], + wait_for_exact_units={ + APP_NAME: 3, + }, + idle_period=120, + ) + + logger.info("Upgrade finished") + logger.info(subprocess.check_output("juju status".split())) + # Resume the upgrade + action = await run_action( + ops_test, + leader_id, + "resume-upgrade", + app=app, + ) + logger.info(action) + assert action.status == "completed" + + logger.info("Refresh is over, waiting for the charm to settle") + await wait_until( + ops_test, + apps=[app], + apps_statuses=["active"], + units_statuses=["active"], + wait_for_exact_units={ + APP_NAME: 3, + }, + idle_period=IDLE_PERIOD, + ) + + # continuous writes checks + await assert_continuous_writes_consistency(ops_test, c_writes, [app]) diff --git a/tests/integration/tls/__init__.py b/tests/integration/tls/__init__.py index db3bfe1a6..e3979c0f6 100644 --- a/tests/integration/tls/__init__.py +++ b/tests/integration/tls/__init__.py @@ -1,2 +1,2 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. diff --git a/tests/integration/tls/helpers.py b/tests/integration/tls/helpers.py index ce8c18478..74c7ece06 100644 --- a/tests/integration/tls/helpers.py +++ b/tests/integration/tls/helpers.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. from pytest_operator.plugin import OpsTest diff --git a/tests/integration/tls/test_tls.py b/tests/integration/tls/test_tls.py index 234241b09..c81e225eb 100644 --- a/tests/integration/tls/test_tls.py +++ b/tests/integration/tls/test_tls.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import logging diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index ff68b7bbf..dd2988bf8 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. # # Learn more about testing at: https://juju.is/docs/sdk/testing diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index bbc227832..37e0f610b 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import pytest diff --git a/tests/unit/lib/__init__.py b/tests/unit/lib/__init__.py index aec4a8bc6..514c79179 100644 --- a/tests/unit/lib/__init__.py +++ b/tests/unit/lib/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. # # Learn more about testing at: https://juju.is/docs/sdk/testing diff --git a/tests/unit/lib/test_backups.py b/tests/unit/lib/test_backups.py index 1afb4f119..1300a7dea 100644 --- a/tests/unit/lib/test_backups.py +++ b/tests/unit/lib/test_backups.py @@ -1,8 +1,9 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Unit test for the opensearch_plugins library.""" import unittest +from collections import namedtuple from unittest.mock import MagicMock, PropertyMock, patch import charms @@ -15,6 +16,8 @@ RestoreInProgress, ) from charms.opensearch.v0.helper_cluster import IndexStateEnum + +# from charms.opensearch.v0.models import DeploymentType from charms.opensearch.v0.opensearch_backups import ( S3_RELATION, S3_REPOSITORY, @@ -37,6 +40,14 @@ from ops.testing import Harness from charm import OpenSearchOperatorCharm +from lib.charms.opensearch.v0.models import ( + DeploymentDescription, + DeploymentState, + DeploymentType, + PeerClusterConfig, + StartMode, + State, +) from tests.helpers import patch_network_get TEST_BUCKET_NAME = "s3://bucket-test" @@ -50,9 +61,28 @@ 2023-01-01T00:20:00Z | snapshot in progress""" -@pytest.fixture(scope="session") +deployment_desc = namedtuple("deployment_desc", ["typ"]) + + +def create_deployment_desc(): + return DeploymentDescription( + config=PeerClusterConfig( + cluster_name="logs", init_hold=False, roles=["cluster_manager", "data"] + ), + start=StartMode.WITH_PROVIDED_ROLES, + pending_directives=[], + app="opensearch", + typ=DeploymentType.MAIN_ORCHESTRATOR, + state=DeploymentState(value=State.ACTIVE), + ) + + +@pytest.fixture(scope="function") def harness(): harness_obj = Harness(OpenSearchOperatorCharm) + charms.opensearch.v0.opensearch_base_charm.OpenSearchPeerClustersManager.deployment_desc = ( + MagicMock(return_value=create_deployment_desc()) + ) harness_obj.begin() charm = harness_obj.charm # Override the config to simulate the TestPlugin @@ -76,15 +106,12 @@ def harness(): # Replace some unused methods that will be called as part of set_leader with mock charm._put_admin_user = MagicMock() charm._put_kibanaserver_user = MagicMock() + charm._put_or_update_internal_user_leader = MagicMock() + harness_obj.add_relation(PeerRelationName, "opensearch") harness_obj.set_leader(is_leader=True) - return harness_obj - -@pytest.fixture(scope="session", autouse=True) -def cleanup_harnes(harness): - yield - harness.cleanup() + return harness_obj @pytest.fixture(scope="function") @@ -456,7 +483,11 @@ class TestBackups(unittest.TestCase): def setUp(self) -> None: self.harness = Harness(OpenSearchOperatorCharm) self.addCleanup(self.harness.cleanup) + charms.opensearch.v0.opensearch_base_charm.OpenSearchPeerClustersManager.deployment_desc = MagicMock( + return_value=create_deployment_desc() + ) self.harness.begin() + self.charm = self.harness.charm # Override the config to simulate the TestPlugin # As config.yaml does not exist, the setup below simulates it @@ -481,6 +512,7 @@ def setUp(self) -> None: # Replace some unused methods that will be called as part of set_leader with mock self.charm._put_admin_user = MagicMock() self.charm._put_kibanaserver_user = MagicMock() + self.charm._put_or_update_internal_user_leader = MagicMock() self.peer_id = self.harness.add_relation(PeerRelationName, "opensearch") self.harness.set_leader(is_leader=True) diff --git a/tests/unit/lib/test_helper_charm.py b/tests/unit/lib/test_helper_charm.py index f1bdaadfb..ea12d8882 100644 --- a/tests/unit/lib/test_helper_charm.py +++ b/tests/unit/lib/test_helper_charm.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Unit test for the helper_cluster library.""" diff --git a/tests/unit/lib/test_helper_cluster.py b/tests/unit/lib/test_helper_cluster.py index 463fb805b..81c62c338 100644 --- a/tests/unit/lib/test_helper_cluster.py +++ b/tests/unit/lib/test_helper_cluster.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Unit test for the helper_cluster library.""" diff --git a/tests/unit/lib/test_helper_conf_setter.py b/tests/unit/lib/test_helper_conf_setter.py index 83daa5c90..e86c9cf22 100644 --- a/tests/unit/lib/test_helper_conf_setter.py +++ b/tests/unit/lib/test_helper_conf_setter.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Unit test for the helper_conf_setter library.""" diff --git a/tests/unit/lib/test_helper_networking.py b/tests/unit/lib/test_helper_networking.py index de83e6fdf..2a41831ed 100644 --- a/tests/unit/lib/test_helper_networking.py +++ b/tests/unit/lib/test_helper_networking.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Unit test for the helper_cluster library.""" diff --git a/tests/unit/lib/test_helper_security.py b/tests/unit/lib/test_helper_security.py index 3e1dd7c3c..195692440 100644 --- a/tests/unit/lib/test_helper_security.py +++ b/tests/unit/lib/test_helper_security.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Unit test for the helper_cluster library.""" diff --git a/tests/unit/lib/test_ml_plugins.py b/tests/unit/lib/test_ml_plugins.py index d216d6376..60a1cc7e9 100644 --- a/tests/unit/lib/test_ml_plugins.py +++ b/tests/unit/lib/test_ml_plugins.py @@ -1,12 +1,12 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Unit test for the opensearch_plugins library.""" import unittest -from collections import namedtuple from unittest.mock import MagicMock, PropertyMock, patch import charms +from charms.opensearch.v0.models import Node from charms.opensearch.v0.opensearch_health import HealthColors from charms.opensearch.v0.opensearch_plugins import OpenSearchKnn, PluginState from ops.testing import Harness @@ -99,12 +99,25 @@ def test_disable_via_config_change( self.plugin_manager._opensearch_config.add_plugin = MagicMock() self.charm.status = MagicMock() mock_is_node_up.return_value = True - # Mock the get_relation() method, as this file is focused on testing the ml plugins - # and not simulating the entire start process of the charm - PeerRelation = namedtuple("PeerRelation", ["data"]) - rel = PeerRelation(data={self.charm.unit: {"started": "True"}}) - self.charm.model.get_relation = MagicMock(return_value=rel) + self.charm._get_nodes = MagicMock( + return_value=[ + Node( + name=f"{self.charm.app.name}-0", + roles=["cluster_manager"], + ip="1.1.1.1", + app_name=self.charm.app.name, + unit_number=0, + ), + ] + ) + self.charm._get_nodes = MagicMock(return_value=[1]) + self.charm.planned_units = MagicMock(return_value=1) + self.charm.plugin_manager.check_plugin_manager_ready = MagicMock() self.charm._restart_opensearch_event = MagicMock() self.harness.update_config({"plugin_opensearch_knn": False}) + self.charm.plugin_manager.check_plugin_manager_ready.assert_called() self.charm._restart_opensearch_event.emit.assert_called_once() + self.plugin_manager._opensearch_config.add_plugin.assert_called_once_with( + {"knn.plugin.enabled": "false"} + ) diff --git a/tests/unit/lib/test_opensearch_base_charm.py b/tests/unit/lib/test_opensearch_base_charm.py index 6fd0f52ea..369fbaf28 100644 --- a/tests/unit/lib/test_opensearch_base_charm.py +++ b/tests/unit/lib/test_opensearch_base_charm.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Unit test for the helper_cluster library.""" diff --git a/tests/unit/lib/test_opensearch_config.py b/tests/unit/lib/test_opensearch_config.py index 5c7656a56..ace000874 100644 --- a/tests/unit/lib/test_opensearch_config.py +++ b/tests/unit/lib/test_opensearch_config.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Unit test for the helper_cluster library.""" diff --git a/tests/unit/lib/test_opensearch_internal_data.py b/tests/unit/lib/test_opensearch_internal_data.py index 5ebb6af0e..8c0af69d0 100644 --- a/tests/unit/lib/test_opensearch_internal_data.py +++ b/tests/unit/lib/test_opensearch_internal_data.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Unit test for the helper_cluster library.""" diff --git a/tests/unit/lib/test_opensearch_keystore.py b/tests/unit/lib/test_opensearch_keystore.py index 12dcba4f7..723b8a280 100644 --- a/tests/unit/lib/test_opensearch_keystore.py +++ b/tests/unit/lib/test_opensearch_keystore.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Unit test for the opensearch_plugins library.""" diff --git a/tests/unit/lib/test_opensearch_peer_clusters.py b/tests/unit/lib/test_opensearch_peer_clusters.py index fe39618d2..f858418ec 100644 --- a/tests/unit/lib/test_opensearch_peer_clusters.py +++ b/tests/unit/lib/test_opensearch_peer_clusters.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Unit test for the opensearch_peer_clusters library.""" diff --git a/tests/unit/lib/test_opensearch_relation_provider.py b/tests/unit/lib/test_opensearch_relation_provider.py index c8c808968..3272d3568 100644 --- a/tests/unit/lib/test_opensearch_relation_provider.py +++ b/tests/unit/lib/test_opensearch_relation_provider.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import unittest diff --git a/tests/unit/lib/test_opensearch_secrets.py b/tests/unit/lib/test_opensearch_secrets.py index 495014cb7..beb030aca 100644 --- a/tests/unit/lib/test_opensearch_secrets.py +++ b/tests/unit/lib/test_opensearch_secrets.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. from unittest.mock import MagicMock, patch @@ -54,11 +54,14 @@ def setUp(self): self.harness.add_relation_unit(self.client_rel_id, "application/0") @patch("charm.OpenSearchOperatorCharm._put_or_update_internal_user_unit") + @patch("charm.OpenSearchOperatorCharm._put_or_update_internal_user_leader") @patch( "charms.opensearch.v0.opensearch_relation_provider.OpenSearchProvider.update_dashboards_password" ) @patch("charm.OpenSearchOperatorCharm.store_tls_resources") - def test_on_secret_changed_app(self, mock_store_tls_resources, mock_update_dashboard_pw, _): + def test_on_secret_changed_app( + self, mock_store_tls_resources, mock_update_dashboard_pw, _, __ + ): event = MagicMock() event.secret = MagicMock() diff --git a/tests/unit/lib/test_opensearch_tls.py b/tests/unit/lib/test_opensearch_tls.py index ec1642778..1996395fb 100644 --- a/tests/unit/lib/test_opensearch_tls.py +++ b/tests/unit/lib/test_opensearch_tls.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Unit test for the helper_cluster library.""" @@ -36,7 +36,6 @@ def setUp(self, _) -> None: self.harness.add_network("1.1.1.1", endpoint=PeerRelationName) self.harness.add_network("1.1.1.1", endpoint=TLS_RELATION) self.harness.begin() - self.charm = self.harness.charm self.harness.add_relation(PeerRelationName, self.charm.app.name) self.harness.add_relation(TLS_RELATION, self.charm.app.name) @@ -241,3 +240,37 @@ def test_on_certificate_expiring(self, _, deployment_desc, request_certificate_c self.charm.tls._on_certificate_expiring(event_mock) request_certificate_creation.assert_called_once() + + @patch( + "charms.tls_certificates_interface.v3.tls_certificates.TLSCertificatesRequiresV3.request_certificate_renewal" + ) + @patch( + f"{BASE_LIB_PATH}.opensearch_peer_clusters.OpenSearchPeerClustersManager.deployment_desc" + ) + @patch("charm.OpenSearchOperatorCharm._put_or_update_internal_user_leader") + def test_on_certificate_invalidated(self, _, deployment_desc, request_certificate_renewal): + """Test _on_certificate_invalidated event.""" + csr = "csr_12345" + cert = "cert_12345" + key = create_utf8_encoded_private_key() + secret_key = CertType.UNIT_TRANSPORT.val + + self.secret_store.put_object( + Scope.UNIT, + secret_key, + {"csr": csr, "cert": cert, "key": key}, + ) + + deployment_desc.return_value = DeploymentDescription( + config=PeerClusterConfig(cluster_name="", init_hold=False, roles=[]), + start=StartMode.WITH_GENERATED_ROLES, + pending_directives=[], + typ=DeploymentType.MAIN_ORCHESTRATOR, + app=self.charm.app.name, + state=DeploymentState(value=State.ACTIVE), + ) + + event_mock = MagicMock(certificate=cert) + self.charm.tls._on_certificate_invalidated(event_mock) + + request_certificate_renewal.assert_called_once() diff --git a/tests/unit/lib/test_opensearch_users.py b/tests/unit/lib/test_opensearch_users.py index 3be0c8245..827ecd75e 100644 --- a/tests/unit/lib/test_opensearch_users.py +++ b/tests/unit/lib/test_opensearch_users.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Unit tests for the opensearch_users library.""" diff --git a/tests/unit/lib/test_plugins.py b/tests/unit/lib/test_plugins.py index d2b32eba5..f5604d4ef 100644 --- a/tests/unit/lib/test_plugins.py +++ b/tests/unit/lib/test_plugins.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. """Unit test for the opensearch_plugins library.""" @@ -6,9 +6,11 @@ from unittest.mock import MagicMock, PropertyMock, call, patch import charms +from charms.opensearch.v0.constants_charm import PeerRelationName from charms.opensearch.v0.opensearch_backups import OpenSearchBackupPlugin from charms.opensearch.v0.opensearch_exceptions import OpenSearchCmdError from charms.opensearch.v0.opensearch_health import HealthColors +from charms.opensearch.v0.opensearch_internal_data import Scope from charms.opensearch.v0.opensearch_plugins import ( OpenSearchPlugin, OpenSearchPluginConfig, @@ -103,6 +105,10 @@ def setUp(self) -> None: self.addCleanup(self.harness.cleanup) self.harness.begin() self.charm = self.harness.charm + + self.peers_data = self.charm.peers_data + self.rel_id = self.harness.add_relation(PeerRelationName, self.charm.app.name) + # Override the config to simulate the TestPlugin # As config.yaml does not exist, the setup below simulates it self.harness.model._config = {"plugin_test": True, "plugin_test_already_installed": False} @@ -205,6 +211,10 @@ def test_failed_install_plugin_missing_dependency(self, _, mock_version) -> None ) def test_check_plugin_called_on_config_changed(self, mock_version, deployment_desc) -> None: """Triggers a config change and should call plugin manager.""" + self.harness.set_leader(True) + self.peers_data.put(Scope.APP, "security_index_initialised", True) + self.harness.set_leader(False) + deployment_desc.return_value = "something" self.plugin_manager.run = MagicMock(return_value=False) self.charm.opensearch_config.update_host_if_needed = MagicMock(return_value=False) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 2b582050f..b7fb12116 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -1,4 +1,4 @@ -# Copyright 2023 Canonical Ltd. +# Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import tempfile diff --git a/workload_version b/workload_version index d8b698973..fb2c0766b 100644 --- a/workload_version +++ b/workload_version @@ -1 +1 @@ -2.12.0 +2.13.0