Skip to content

Commit

Permalink
Fix progress bar showing finished status for unselected reals
Browse files Browse the repository at this point in the history
This commit fixes the bug where the gui shows the total count of realizations as the ensemble_size rather than the number of realizations actually selected for the run.
  • Loading branch information
jonathan-eq committed Nov 26, 2024
1 parent 71105ba commit 26ae63e
Show file tree
Hide file tree
Showing 7 changed files with 156 additions and 17 deletions.
31 changes: 25 additions & 6 deletions src/ert/run_models/base_run_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ def __init__(
self.minimum_required_realizations = minimum_required_realizations
self.active_realizations = copy.copy(active_realizations)
self.start_iteration = start_iteration
self.validate()
self.validate_active_realizations_count()

def log_at_startup(self) -> None:
keys_to_drop = [
Expand Down Expand Up @@ -406,9 +406,14 @@ def get_current_status(self) -> dict[str, int]:
for real in all_realizations.values():
status[str(real["status"])] += 1

status["Finished"] += self.active_realizations.count(False)
status["Finished"] += self._get_number_of_finished_realizations_from_reruns()
return status

def _get_number_of_finished_realizations_from_reruns(self) -> int:
return self.active_realizations.count(
False
) - self._initial_realizations_mask.count(False)

def get_memory_consumption(self) -> int:
max_memory_consumption: int = 0
if self._iter_snapshot.keys():
Expand All @@ -425,7 +430,7 @@ def _current_progress(self) -> tuple[float, int]:
done_realizations = self.active_realizations.count(False)
all_realizations = self._iter_snapshot[current_iter].reals
current_progress = 0.0
realization_count = len(self.active_realizations)
realization_count = self.get_number_of_active_realizations()

if all_realizations:
for real in all_realizations.values():
Expand Down Expand Up @@ -643,14 +648,17 @@ def get_number_of_existing_runpaths(self) -> int:
return [real_path.exists() for real_path in realization_set].count(True)

def get_number_of_active_realizations(self) -> int:
return self._initial_realizations_mask.count(True)

def get_number_of_successful_realizations(self) -> int:
return self.active_realizations.count(True)

def rm_run_path(self) -> None:
for run_path in self.paths:
if Path(run_path).exists():
shutil.rmtree(run_path)

def validate(self) -> None:
def validate_active_realizations_count(self) -> None:
active_realizations_count = self.get_number_of_active_realizations()
min_realization_count = self.minimum_required_realizations

Expand All @@ -661,6 +669,17 @@ def validate(self) -> None:
f"({min_realization_count})"
)

def validate_successful_realizations_count(self) -> None:
successful_reallizations_count = self.get_number_of_successful_realizations()
min_realization_count = self.minimum_required_realizations

if successful_reallizations_count < min_realization_count:
raise ValueError(
f"Number of successful realizations ({successful_reallizations_count}) is less "
f"than the specified MIN_REALIZATIONS"
f"({min_realization_count})"
)

@tracer.start_as_current_span(f"{__name__}.run_workflows")
def run_workflows(
self, runtime: HookRuntime, storage: Storage, ensemble: Ensemble
Expand Down Expand Up @@ -701,7 +720,7 @@ def _evaluate_and_postprocess(
self.active_realizations[iens] = False

num_successful_realizations = len(successful_realizations)
self.validate()
self.validate_successful_realizations_count()
logger.info(f"Experiment ran on QUEUESYSTEM: {self._queue_config.queue_system}")
logger.info(f"Experiment ran with number of realizations: {self.ensemble_size}")
logger.info(
Expand Down Expand Up @@ -750,7 +769,7 @@ def __init__(
def update(
self, prior: Ensemble, posterior_name: str, weight: float = 1.0
) -> Ensemble:
self.validate()
self.validate_successful_realizations_count()
self.send_event(
RunModelUpdateBeginEvent(iteration=prior.iteration, run_id=prior.id)
)
Expand Down
1 change: 0 additions & 1 deletion src/ert/run_models/iterated_ensemble_smoother.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ def analyzeStep(
iteration: int,
initial_mask: npt.NDArray[np.bool_],
) -> None:
self.validate()
self.run_workflows(HookRuntime.PRE_UPDATE, self._storage, prior_storage)
try:
_, self.sies_smoother = iterative_smoother_update(
Expand Down
4 changes: 2 additions & 2 deletions tests/ert/ui_tests/cli/test_missing_runpath.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def test_missing_runpath_has_isolated_failures(tmp_path, monkeypatch):
try:
with pytest.raises(
ErtCliError,
match=r"active realizations \(9\) is less than .* MIN_REALIZATIONS\(10\)",
match=r"successful realizations \(9\) is less than .* MIN_REALIZATIONS\(10\)",
):
run_cli_with_pm(
["ensemble_experiment", "config.ert", "--disable-monitoring"]
Expand Down Expand Up @@ -97,7 +97,7 @@ def test_failing_writes_lead_to_isolated_failures(tmp_path, monkeypatch, pytestc
with (
pytest.raises(
ErtCliError,
match=r"(?s)active realizations \(9\) is less than .* MIN_REALIZATIONS\(10\).*"
match=r"(?s)successful realizations \(9\) is less than .* MIN_REALIZATIONS\(10\).*"
r"Driver reported: Could not create submit script: Don't like realization-1",
),
patch_raising_named_temporary_file(queue_system.lower()),
Expand Down
5 changes: 4 additions & 1 deletion tests/ert/unit_tests/cli/test_model_hook_order.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,10 @@
@pytest.fixture
def patch_base_run_model(monkeypatch):
monkeypatch.setattr(base_run_model, "create_run_path", MagicMock())
monkeypatch.setattr(BaseRunModel, "validate", MagicMock())
monkeypatch.setattr(BaseRunModel, "validate_active_realizations_count", MagicMock())
monkeypatch.setattr(
BaseRunModel, "validate_successful_realizations_count", MagicMock()
)
monkeypatch.setattr(BaseRunModel, "set_env_key", MagicMock())


Expand Down
108 changes: 104 additions & 4 deletions tests/ert/unit_tests/run_models/test_base_run_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import pytest

from ert.config import ErtConfig, ModelConfig
from ert.ensemble_evaluator.snapshot import EnsembleSnapshot
from ert.run_models import BaseRunModel
from ert.storage import Storage
from ert.substitutions import Substitutions
Expand All @@ -18,7 +19,7 @@ def patch_abstractmethods(monkeypatch):


def test_base_run_model_supports_restart(minimum_case):
BaseRunModel.validate = MagicMock()
BaseRunModel.validate_active_realizations_count = MagicMock()
brm = BaseRunModel(minimum_case, None, None, minimum_case.queue_config, [True])
assert brm.support_restart

Expand All @@ -40,7 +41,7 @@ def __init__(self, status):
],
)
def test_active_realizations(initials):
BaseRunModel.validate = MagicMock()
BaseRunModel.validate_active_realizations_count = MagicMock()
brm = BaseRunModel(MagicMock(), None, None, None, initials)
brm._initial_realizations_mask = initials
assert brm.ensemble_size == len(initials)
Expand All @@ -60,7 +61,7 @@ def test_active_realizations(initials):
],
)
def test_failed_realizations(initials, completed, any_failed, failures):
BaseRunModel.validate = MagicMock()
BaseRunModel.validate_active_realizations_count = MagicMock()
brm = BaseRunModel(MagicMock(), None, None, None, initials)
brm._initial_realizations_mask = initials
brm._completed_realizations_mask = completed
Expand Down Expand Up @@ -182,7 +183,7 @@ def test_num_cpu_is_propagated_from_config_to_ensemble(run_args):
# Given NUM_CPU in the config file has a special value
config = ErtConfig.from_file_contents("NUM_REALIZATIONS 2\nNUM_CPU 42")
# Set up a BaseRunModel object from the config above:
BaseRunModel.validate = MagicMock()
BaseRunModel.validate_active_realizations_count = MagicMock()
brm = BaseRunModel(
config=config,
storage=MagicMock(spec=Storage),
Expand All @@ -199,3 +200,102 @@ def test_num_cpu_is_propagated_from_config_to_ensemble(run_args):
# Assert the built ensemble has the correct NUM_CPU information
assert ensemble.reals[0].num_cpu == 42
assert ensemble.reals[1].num_cpu == 42


@pytest.mark.parametrize(
"initial_active_realizations, new_active_realizations, real_status_dict, expected_result",
[
pytest.param(
[True, True, True],
[False, False, False],
{},
{"Finished": 3},
id="all_realizations_in_previous_run_succeeded",
),
pytest.param(
[True, True, True],
[False, True, False],
{},
{"Finished": 2},
id="some_realizations_in_previous_run_succeeded",
),
pytest.param(
[True, True, True],
[True, True, True],
{},
{"Finished": 0},
id="no_realizations_in_previous_run_succeeded",
),
pytest.param(
[False, True, True],
[False, False, True],
{},
{"Finished": 1},
id="did_not_run_all_realizations_and_some_succeeded",
),
pytest.param(
[False, True, True],
[False, True, True],
{},
{"Finished": 0},
id="did_not_run_all_realizations_and_none_succeeded",
),
pytest.param(
[True, True, True],
[True, True, True],
{"0": "Finished", "1": "Finished", "2": "Finished"},
{"Finished": 3},
id="ran_all_realizations_and_all_succeeded",
),
pytest.param(
[True, True, True],
[True, True, True],
{"0": "Finished", "1": "Finished", "2": "Failed"},
{"Finished": 2, "Failed": 1},
id="ran_all_realizations_and_some_failed",
),
pytest.param(
[True, True, True],
[True, True, True],
{"0": "Finished", "1": "Running", "2": "Failed"},
{"Finished": 1, "Failed": 1, "Running": 1},
id="ran_all_realizations_and_result_was_mixed",
),
pytest.param(
[True, True, True],
[True, True, False],
{"0": "Finished", "1": "Finished"},
{"Finished": 3},
id="reran_some_realizations_and_all_finished",
),
pytest.param(
[False, True, True],
[False, True, False],
{"1": "Finished"},
{"Finished": 2},
id="did_not_run_all_realizations_then_reran_and_the_realizations_finished",
),
],
)
def test_get_current_status(
initial_active_realizations,
new_active_realizations,
real_status_dict: dict[str, str],
expected_result,
):
"""Active realizations gets changed when we choose to rerun, and the result from the previous run should be included in the current_status."""
config = ErtConfig.from_file_contents("NUM_REALIZATIONS 3")
brm = BaseRunModel(
config=config,
storage=MagicMock(spec=Storage),
queue_config=config.queue_config,
status_queue=MagicMock(spec=SimpleQueue),
active_realizations=initial_active_realizations,
)
snapshot_dict_reals = {}
for index, realization_status in real_status_dict.items():
snapshot_dict_reals[index] = {"status": realization_status}
iter_snapshot = EnsembleSnapshot.from_nested_dict({"reals": snapshot_dict_reals})
brm._iter_snapshot[0] = iter_snapshot
brm.active_realizations = new_active_realizations
assert dict(brm.get_current_status()) == expected_result
3 changes: 2 additions & 1 deletion tests/ert/unit_tests/run_models/test_ensemble_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@ def get_run_path_mock(realizations, iteration=None):
return [f"out/realization-{r}/iter-{iteration}" for r in realizations]
return [f"out/realization-{r}" for r in realizations]

EnsembleExperiment.validate = MagicMock()
EnsembleExperiment.validate_active_realizations_count = MagicMock()
EnsembleExperiment.validate_successful_realizations_count = MagicMock()
ensemble_experiment = EnsembleExperiment(
*[MagicMock()] * 2 + [active_mask, MagicMock(), None] + [MagicMock()] * 4
)
Expand Down
21 changes: 19 additions & 2 deletions tests/ert/unit_tests/run_models/test_model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,8 +205,17 @@ def test_multiple_data_assimilation_restart_paths(
prior_ensemble_id=str(uuid1()),
experiment_name=None,
)

monkeypatch.setattr(
ert.run_models.base_run_model.BaseRunModel,
"validate_active_realizations_count",
MagicMock(),
)

monkeypatch.setattr(
ert.run_models.base_run_model.BaseRunModel, "validate", MagicMock()
ert.run_models.base_run_model.BaseRunModel,
"validate_successful_realizations_count",
MagicMock(),
)
storage_mock = MagicMock()
ensemble_mock = MagicMock()
Expand Down Expand Up @@ -261,7 +270,15 @@ def test_evaluate_ensemble_paths(
):
monkeypatch.chdir(tmp_path)
monkeypatch.setattr(
ert.run_models.base_run_model.BaseRunModel, "validate", MagicMock()
ert.run_models.base_run_model.BaseRunModel,
"validate_active_realizations_count",
MagicMock(),
)

monkeypatch.setattr(
ert.run_models.base_run_model.BaseRunModel,
"validate_successful_realizations_count",
MagicMock(),
)
storage_mock = MagicMock()
ensemble_mock = MagicMock()
Expand Down

0 comments on commit 26ae63e

Please sign in to comment.