diff --git a/great_expectations/expectations/core/expect_column_kl_divergence_to_be_less_than.py b/great_expectations/expectations/core/expect_column_kl_divergence_to_be_less_than.py index a12ce992fcef..55c51b03ee60 100644 --- a/great_expectations/expectations/core/expect_column_kl_divergence_to_be_less_than.py +++ b/great_expectations/expectations/core/expect_column_kl_divergence_to_be_less_than.py @@ -1,5 +1,5 @@ import logging -from typing import TYPE_CHECKING, Dict, Optional, Tuple +from typing import TYPE_CHECKING, Dict, Optional import altair as alt import numpy as np @@ -44,9 +44,11 @@ parse_row_condition_string_pandas_engine, substitute_none_for_missing, ) -from great_expectations.validator.computed_metric import MetricValue # noqa: TCH001 from great_expectations.validator.metric_configuration import MetricConfiguration -from great_expectations.validator.metrics_calculator import MetricsCalculator +from great_expectations.validator.metrics_calculator import ( + MetricsCalculator, + _MetricsDict, +) from great_expectations.validator.validator import ( ValidationDependencies, ) @@ -248,9 +250,8 @@ def get_validation_dependencies( execution_engine=execution_engine, show_progress_bars=True, ) - resolved_metrics: Dict[ - Tuple[str, str, str], MetricValue - ] = metrics_calculator.compute_metrics( + resolved_metrics: _MetricsDict + resolved_metrics, _ = metrics_calculator.compute_metrics( metric_configurations=[partition_metric_configuration], runtime_configuration=None, min_graph_edges_pbar_enable=0, diff --git a/great_expectations/experimental/metric_repository/column_descriptive_metrics_metric_retriever.py b/great_expectations/experimental/metric_repository/column_descriptive_metrics_metric_retriever.py index d127659790e1..14b31480cef3 100644 --- a/great_expectations/experimental/metric_repository/column_descriptive_metrics_metric_retriever.py +++ b/great_expectations/experimental/metric_repository/column_descriptive_metrics_metric_retriever.py @@ -317,7 +317,7 @@ def _compute_metrics( ( computed_metrics, aborted_metrics, - ) = validator.compute_metrics_with_aborted_metrics( + ) = validator.compute_metrics( metric_configurations=metric_configs, runtime_configuration={"catch_exceptions": True}, ) diff --git a/great_expectations/rule_based_profiler/helpers/util.py b/great_expectations/rule_based_profiler/helpers/util.py index 4e6655ca9a6b..dbd15bea8bfc 100644 --- a/great_expectations/rule_based_profiler/helpers/util.py +++ b/great_expectations/rule_based_profiler/helpers/util.py @@ -72,6 +72,9 @@ from great_expectations.data_context.data_context.abstract_data_context import ( AbstractDataContext, ) + from great_expectations.validator.metrics_calculator import ( + _MetricsDict, + ) from great_expectations.validator.validator import Validator logger = logging.getLogger(__name__) @@ -360,9 +363,8 @@ def get_resolved_metrics_by_key( # Step 1: Gather "MetricConfiguration" objects corresponding to all possible key values/combinations. # and compute all metric values (resolve "MetricConfiguration" objects ) using a single method call. - resolved_metrics: Dict[ - Tuple[str, str, str], MetricValue - ] = validator.compute_metrics( + resolved_metrics: _MetricsDict + resolved_metrics, _ = validator.compute_metrics( metric_configurations=[ metric_configuration for key, metric_configurations_for_key in metric_configurations_by_key.items() diff --git a/great_expectations/validator/metrics_calculator.py b/great_expectations/validator/metrics_calculator.py index 0e74ecebb6f6..4b75adc19fb1 100644 --- a/great_expectations/validator/metrics_calculator.py +++ b/great_expectations/validator/metrics_calculator.py @@ -138,7 +138,8 @@ def get_metrics( Returns: Return Dictionary with requested metrics resolved, with metric_name as key and computed metric as value. """ - resolved_metrics: _MetricsDict = self.compute_metrics( + resolved_metrics: _MetricsDict + resolved_metrics, _ = self.compute_metrics( metric_configurations=list(metrics.values()), runtime_configuration=None, min_graph_edges_pbar_enable=0, @@ -148,7 +149,7 @@ def get_metrics( for metric_configuration in metrics.values() } - def compute_metrics_with_aborted_metrics( + def compute_metrics( self, metric_configurations: List[MetricConfiguration], runtime_configuration: Optional[dict] = None, @@ -162,7 +163,9 @@ def compute_metrics_with_aborted_metrics( min_graph_edges_pbar_enable: Minumum number of graph edges to warrant showing progress bars. Returns: - Dictionary with requested metrics resolved, with unique metric ID as key and computed metric as value. + Tuple of two elements, the first is a dictionary with requested metrics resolved, + with unique metric ID as key and computed metric as value. The second is a dictionary of the + aborted metrics information, with metric ID as key if any metrics were aborted. """ graph: ValidationGraph = self.build_metric_dependency_graph( metric_configurations=metric_configurations, @@ -172,39 +175,13 @@ def compute_metrics_with_aborted_metrics( aborted_metrics_info: _AbortedMetricsInfoDict ( resolved_metrics, - aborted_metrics_info, + aborted_metrics, ) = self.resolve_validation_graph_and_handle_aborted_metrics_info( graph=graph, runtime_configuration=runtime_configuration, min_graph_edges_pbar_enable=min_graph_edges_pbar_enable, ) - return resolved_metrics, aborted_metrics_info - - def compute_metrics( - self, - metric_configurations: List[MetricConfiguration], - runtime_configuration: Optional[dict] = None, - min_graph_edges_pbar_enable: int = 0, - # Set to low number (e.g., 3) to suppress progress bar for small graphs. - ) -> _MetricsDict: - """ - Args: - metric_configurations: List of desired MetricConfiguration objects to be resolved. - runtime_configuration: Additional run-time settings (see "Validator.DEFAULT_RUNTIME_CONFIGURATION"). - min_graph_edges_pbar_enable: Minumum number of graph edges to warrant showing progress bars. - - Returns: - Dictionary with requested metrics resolved, with unique metric ID as key and computed metric as value. - """ - - # Note: Dropping aborted metrics for backward compatibility. - # This is a temporary solution until we can change all the callers to handle aborted metrics. - resolved_metrics, _ = self.compute_metrics_with_aborted_metrics( - metric_configurations=metric_configurations, - runtime_configuration=runtime_configuration, - min_graph_edges_pbar_enable=min_graph_edges_pbar_enable, - ) - return resolved_metrics + return resolved_metrics, aborted_metrics def build_metric_dependency_graph( self, diff --git a/great_expectations/validator/validator.py b/great_expectations/validator/validator.py index 22cc78d749c9..ce2bb6f4d0d0 100644 --- a/great_expectations/validator/validator.py +++ b/great_expectations/validator/validator.py @@ -357,30 +357,6 @@ def compute_metrics( runtime_configuration: Optional[dict] = None, min_graph_edges_pbar_enable: int = 0, # Set to low number (e.g., 3) to suppress progress bar for small graphs. - ) -> _MetricsDict: - """ - Convenience method that computes requested metrics (specified as elements of "MetricConfiguration" list). - - Args: - metric_configurations: List of desired MetricConfiguration objects to be resolved. - runtime_configuration: Additional run-time settings (see "Validator.DEFAULT_RUNTIME_CONFIGURATION"). - min_graph_edges_pbar_enable: Minumum number of graph edges to warrant showing progress bars. - - Returns: - Dictionary with requested metrics resolved, with unique metric ID as key and computed metric as value. - """ - return self._metrics_calculator.compute_metrics( - metric_configurations=metric_configurations, - runtime_configuration=runtime_configuration, - min_graph_edges_pbar_enable=min_graph_edges_pbar_enable, - ) - - def compute_metrics_with_aborted_metrics( - self, - metric_configurations: List[MetricConfiguration], - runtime_configuration: Optional[dict] = None, - min_graph_edges_pbar_enable: int = 0, - # Set to low number (e.g., 3) to suppress progress bar for small graphs. ) -> tuple[_MetricsDict, _AbortedMetricsInfoDict]: """ Convenience method that computes requested metrics (specified as elements of "MetricConfiguration" list). @@ -391,11 +367,11 @@ def compute_metrics_with_aborted_metrics( min_graph_edges_pbar_enable: Minumum number of graph edges to warrant showing progress bars. Returns: - Tuple with two elements. The first is a dictionary with requested metrics resolved, with unique metric - ID as key and computed metric as value. The second is a dictionary with information about any metrics - that were aborted during computation, using the unique metric ID as key. + Tuple of two elements, the first is a dictionary with requested metrics resolved, + with unique metric ID as key and computed metric as value. The second is a dictionary of the + aborted metrics information, with metric ID as key if any metrics were aborted. """ - return self._metrics_calculator.compute_metrics_with_aborted_metrics( + return self._metrics_calculator.compute_metrics( metric_configurations=metric_configurations, runtime_configuration=runtime_configuration, min_graph_edges_pbar_enable=min_graph_edges_pbar_enable, diff --git a/tests/experimental/metric_repository/test_column_descriptive_metrics_metric_retriever.py b/tests/experimental/metric_repository/test_column_descriptive_metrics_metric_retriever.py index a43ed36fd7b6..859097bef626 100644 --- a/tests/experimental/metric_repository/test_column_descriptive_metrics_metric_retriever.py +++ b/tests/experimental/metric_repository/test_column_descriptive_metrics_metric_retriever.py @@ -45,7 +45,7 @@ def test_get_metrics(): ("column_values.null.count", "column=col2", ()): 1, } aborted_metrics = {} - mock_validator.compute_metrics_with_aborted_metrics.return_value = ( + mock_validator.compute_metrics.return_value = ( computed_metrics, aborted_metrics, ) @@ -182,7 +182,7 @@ def test_get_metrics_metrics_missing(): ("column_values.null.count", "column=col2", ()): 1, } mock_aborted_metrics = {} - mock_validator.compute_metrics_with_aborted_metrics.return_value = ( + mock_validator.compute_metrics.return_value = ( mock_computed_metrics, mock_aborted_metrics, ) @@ -344,7 +344,7 @@ def test_get_metrics_with_exception(): ("column_values.null.count", "column=col1", ()): 1, ("column_values.null.count", "column=col2", ()): 1, } - mock_validator.compute_metrics_with_aborted_metrics.return_value = ( + mock_validator.compute_metrics.return_value = ( computed_metrics, aborted_metrics, ) @@ -502,7 +502,7 @@ def test_get_metrics_with_column_type_missing(): ("column_values.null.count", "column=col1", ()): 1, ("column_values.null.count", "column=col2", ()): 1, } - mock_validator.compute_metrics_with_aborted_metrics.return_value = ( + mock_validator.compute_metrics.return_value = ( computed_metrics, aborted_metrics, ) @@ -642,7 +642,7 @@ def test_get_metrics_only_gets_a_validator_once(): ("column_values.null.count", "column=col1", ()): 1, ("column_values.null.count", "column=col2", ()): 1, } - mock_validator.compute_metrics_with_aborted_metrics.return_value = ( + mock_validator.compute_metrics.return_value = ( computed_metrics, aborted_metrics, ) diff --git a/tests/validator/test_metrics_calculator.py b/tests/validator/test_metrics_calculator.py index 0bb8d2d08634..0392e0f76b45 100644 --- a/tests/validator/test_metrics_calculator.py +++ b/tests/validator/test_metrics_calculator.py @@ -93,7 +93,9 @@ def test_column_partition_metric( "allow_relative_error": False, }, ) - results = metrics_calculator.compute_metrics(metric_configurations=[desired_metric]) + results, _ = metrics_calculator.compute_metrics( + metric_configurations=[desired_metric] + ) increment = float(n_bins + 1) / n_bins assert all( @@ -112,7 +114,9 @@ def test_column_partition_metric( "allow_relative_error": False, }, ) - results = metrics_calculator.compute_metrics(metric_configurations=[desired_metric]) + results, _ = metrics_calculator.compute_metrics( + metric_configurations=[desired_metric] + ) increment = datetime.timedelta( seconds=(seconds_in_week * float(n_bins + 1) / n_bins)