diff --git a/changes.d/6039.feat.md b/changes.d/6039.feat.md new file mode 100644 index 00000000000..bcac4ec37dc --- /dev/null +++ b/changes.d/6039.feat.md @@ -0,0 +1 @@ +Added a new task run mode "skip" in which tasks instantly generate their required outputs without actually running. This allows us to configure tasks to "skip" ahead of time, e.g. to skip a cycle of tasks that is no longer needed. \ No newline at end of file diff --git a/cylc/flow/cfgspec/workflow.py b/cylc/flow/cfgspec/workflow.py index 934897bdbb4..f42d0c49489 100644 --- a/cylc/flow/cfgspec/workflow.py +++ b/cylc/flow/cfgspec/workflow.py @@ -56,7 +56,10 @@ from cylc.flow.platforms import ( fail_if_platform_and_host_conflict, get_platform_deprecated_settings, is_platform_definition_subshell) +from cylc.flow.run_modes import RunMode from cylc.flow.task_events_mgr import EventData +from cylc.flow.run_modes import TASK_CONFIG_RUN_MODES + # Regex to check whether a string is a command REC_COMMAND = re.compile(r'(`|\$\()\s*(.*)\s*([`)])$') @@ -1334,6 +1337,36 @@ def get_script_common_text(this: str, example: Optional[str] = None): "[platforms][]submission retry delays" ) ) + Conf( + 'run mode', VDR.V_STRING, + options=list(TASK_CONFIG_RUN_MODES), + default=RunMode.LIVE.value, + desc=f''' + When the workflow is running in live mode, run this *task* + in one of the following modes: + + ``{RunMode.LIVE.value}`` (default): + {RunMode.LIVE.describe()} + ``{RunMode.SKIP.value}``: + {RunMode.SKIP.describe()} + + .. note:: + + This is primarily intended to be set at runtime via + a broadcast; Cylc will warn you about any tasks + set to run in skip mode in the workflow + configuration at validation time. + If you are using skip mode to create a dummy task, + you can ignore this warning. + + .. seealso:: + + - :ref:`task-run-modes.skip` + - :cylc:conf:`flow.cylc[runtime][][skip]` + + .. versionadded:: 8.4.0 + + ''') with Conf('meta', desc=r''' Metadata for the task or task family. @@ -1406,13 +1439,51 @@ def get_script_common_text(this: str, example: Optional[str] = None): determine how an event handler responds to task failure events. ''') + with Conf('skip', desc=''' + Task configuration for :ref:`task-run-modes.skip`. + .. seealso:: + + - :ref:`task-run-modes.skip` + - :cylc:conf:`flow.cylc[runtime][]run mode` + + .. versionadded:: 8.4.0 + '''): + Conf( + 'outputs', + VDR.V_STRING_LIST, + desc=''' + Outputs to be emitted by a task in skip mode. + + * By default, all required outputs will be generated + plus succeeded if success is optional. + * If skip-mode outputs is specified and does not + include either succeeded or failed then succeeded + will be produced. + * The outputs submitted and started are always + produced and do not need to be defined in here. + + .. versionadded:: 8.4.0 + ''' + ) + Conf( + 'disable task event handlers', + VDR.V_BOOLEAN, + default=True, + desc=''' + Task event handlers are turned off by default for + skip mode tasks. Changing this setting to ``False`` + will re-enable task event handlers. + + .. versionadded:: 8.4.0 + ''' + ) with Conf('simulation', desc=''' Task configuration for workflow *simulation* and *dummy* run modes. For a full description of simulation and dummy run modes see - :ref:`SimulationMode`. + :ref:`workflow-run-modes.simulation`. '''): Conf('default run length', VDR.V_INTERVAL, DurationFloat(10), desc=''' diff --git a/cylc/flow/commands.py b/cylc/flow/commands.py index b10474e7a66..c45194752ab 100644 --- a/cylc/flow/commands.py +++ b/cylc/flow/commands.py @@ -83,11 +83,9 @@ from cylc.flow.log_level import log_level_to_verbosity from cylc.flow.network.schema import WorkflowStopMode from cylc.flow.parsec.exceptions import ParsecError +from cylc.flow.run_modes import RunMode from cylc.flow.task_id import TaskID -from cylc.flow.workflow_status import ( - RunMode, - StopMode, -) +from cylc.flow.workflow_status import StopMode if TYPE_CHECKING: diff --git a/cylc/flow/config.py b/cylc/flow/config.py index 7c81a888d0d..ea31695d2e5 100644 --- a/cylc/flow/config.py +++ b/cylc/flow/config.py @@ -81,7 +81,8 @@ is_relative_to, ) from cylc.flow.task_qualifiers import ALT_QUALIFIERS -from cylc.flow.simulation import configure_sim_modes +from cylc.flow.run_modes.simulation import configure_sim_mode +from cylc.flow.run_modes.skip import skip_mode_validate from cylc.flow.subprocctx import SubFuncContext from cylc.flow.task_events_mgr import ( EventData, @@ -98,6 +99,7 @@ get_trigger_completion_variable_maps, trigger_to_completion_variable, ) +from cylc.flow.run_modes import RunMode from cylc.flow.task_trigger import TaskTrigger, Dependency from cylc.flow.taskdef import TaskDef from cylc.flow.unicode_rules import ( @@ -113,7 +115,6 @@ WorkflowFiles, check_deprecation, ) -from cylc.flow.workflow_status import RunMode from cylc.flow.xtrigger_mgr import XtriggerCollator if TYPE_CHECKING: @@ -512,9 +513,10 @@ def __init__( self.process_runahead_limit() - run_mode = self.run_mode() + run_mode = RunMode.get(self.options) if run_mode in {RunMode.SIMULATION, RunMode.DUMMY}: - configure_sim_modes(self.taskdefs.values(), run_mode) + for taskdef in self.taskdefs.values(): + configure_sim_mode(taskdef.rtconfig, None, False) self.configure_workflow_state_polling_tasks() @@ -566,6 +568,8 @@ def __init__( self.mem_log("config.py: end init config") + skip_mode_validate(self.taskdefs) + @staticmethod def _warn_if_queues_have_implicit_tasks( config, taskdefs, max_warning_lines @@ -1700,10 +1704,6 @@ def process_config_env(self): ] ) - def run_mode(self) -> str: - """Return the run mode.""" - return RunMode.get(self.options) - def _check_task_event_handlers(self): """Check custom event handler templates can be expanded. @@ -2455,7 +2455,9 @@ def _get_taskdef(self, name: str) -> TaskDef: # Get the taskdef object for generating the task proxy class taskd = TaskDef( - name, rtcfg, self.run_mode(), self.start_point, + name, + rtcfg, + self.start_point, self.initial_point) # TODO - put all taskd.foo items in a single config dict diff --git a/cylc/flow/data_messages.proto b/cylc/flow/data_messages.proto index c0af5094c0d..f259a735f0a 100644 --- a/cylc/flow/data_messages.proto +++ b/cylc/flow/data_messages.proto @@ -128,6 +128,7 @@ message PbRuntime { optional string environment = 16; optional string outputs = 17; optional string completion = 18; + optional string run_mode = 19; } diff --git a/cylc/flow/data_messages_pb2.py b/cylc/flow/data_messages_pb2.py index 7fb5ae84d24..0f16888d6bd 100644 --- a/cylc/flow/data_messages_pb2.py +++ b/cylc/flow/data_messages_pb2.py @@ -14,7 +14,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x64\x61ta_messages.proto\"\x96\x01\n\x06PbMeta\x12\x12\n\x05title\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x03URL\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x19\n\x0cuser_defined\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_titleB\x0e\n\x0c_descriptionB\x06\n\x04_URLB\x0f\n\r_user_defined\"\xaa\x01\n\nPbTimeZone\x12\x12\n\x05hours\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x14\n\x07minutes\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x19\n\x0cstring_basic\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1c\n\x0fstring_extended\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_hoursB\n\n\x08_minutesB\x0f\n\r_string_basicB\x12\n\x10_string_extended\"\'\n\x0fPbTaskProxyRefs\x12\x14\n\x0ctask_proxies\x18\x01 \x03(\t\"\xd4\x0c\n\nPbWorkflow\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06status\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04host\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x11\n\x04port\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x12\n\x05owner\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\r\n\x05tasks\x18\x08 \x03(\t\x12\x10\n\x08\x66\x61milies\x18\t \x03(\t\x12\x1c\n\x05\x65\x64ges\x18\n \x01(\x0b\x32\x08.PbEdgesH\x07\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\x0b \x01(\x05H\x08\x88\x01\x01\x12\x19\n\x0c\x63ylc_version\x18\x0c \x01(\tH\t\x88\x01\x01\x12\x19\n\x0clast_updated\x18\r \x01(\x01H\n\x88\x01\x01\x12\x1a\n\x04meta\x18\x0e \x01(\x0b\x32\x07.PbMetaH\x0b\x88\x01\x01\x12&\n\x19newest_active_cycle_point\x18\x10 \x01(\tH\x0c\x88\x01\x01\x12&\n\x19oldest_active_cycle_point\x18\x11 \x01(\tH\r\x88\x01\x01\x12\x15\n\x08reloaded\x18\x12 \x01(\x08H\x0e\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x0f\x88\x01\x01\x12\x19\n\x0c\x63ycling_mode\x18\x14 \x01(\tH\x10\x88\x01\x01\x12\x32\n\x0cstate_totals\x18\x15 \x03(\x0b\x32\x1c.PbWorkflow.StateTotalsEntry\x12\x1d\n\x10workflow_log_dir\x18\x16 \x01(\tH\x11\x88\x01\x01\x12(\n\x0etime_zone_info\x18\x17 \x01(\x0b\x32\x0b.PbTimeZoneH\x12\x88\x01\x01\x12\x17\n\ntree_depth\x18\x18 \x01(\x05H\x13\x88\x01\x01\x12\x15\n\rjob_log_names\x18\x19 \x03(\t\x12\x14\n\x0cns_def_order\x18\x1a \x03(\t\x12\x0e\n\x06states\x18\x1b \x03(\t\x12\x14\n\x0ctask_proxies\x18\x1c \x03(\t\x12\x16\n\x0e\x66\x61mily_proxies\x18\x1d \x03(\t\x12\x17\n\nstatus_msg\x18\x1e \x01(\tH\x14\x88\x01\x01\x12\x1a\n\ris_held_total\x18\x1f \x01(\x05H\x15\x88\x01\x01\x12\x0c\n\x04jobs\x18 \x03(\t\x12\x15\n\x08pub_port\x18! \x01(\x05H\x16\x88\x01\x01\x12\x17\n\nbroadcasts\x18\" \x01(\tH\x17\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18# \x01(\x05H\x18\x88\x01\x01\x12=\n\x12latest_state_tasks\x18$ \x03(\x0b\x32!.PbWorkflow.LatestStateTasksEntry\x12\x13\n\x06pruned\x18% \x01(\x08H\x19\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18& \x01(\x05H\x1a\x88\x01\x01\x12\x1b\n\x0estates_updated\x18\' \x01(\x08H\x1b\x88\x01\x01\x12\x1c\n\x0fn_edge_distance\x18( \x01(\x05H\x1c\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1aI\n\x15LatestStateTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x05value\x18\x02 \x01(\x0b\x32\x10.PbTaskProxyRefs:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\t\n\x07_statusB\x07\n\x05_hostB\x07\n\x05_portB\x08\n\x06_ownerB\x08\n\x06_edgesB\x0e\n\x0c_api_versionB\x0f\n\r_cylc_versionB\x0f\n\r_last_updatedB\x07\n\x05_metaB\x1c\n\x1a_newest_active_cycle_pointB\x1c\n\x1a_oldest_active_cycle_pointB\x0b\n\t_reloadedB\x0b\n\t_run_modeB\x0f\n\r_cycling_modeB\x13\n\x11_workflow_log_dirB\x11\n\x0f_time_zone_infoB\r\n\x0b_tree_depthB\r\n\x0b_status_msgB\x10\n\x0e_is_held_totalB\x0b\n\t_pub_portB\r\n\x0b_broadcastsB\x12\n\x10_is_queued_totalB\t\n\x07_prunedB\x14\n\x12_is_runahead_totalB\x11\n\x0f_states_updatedB\x12\n\x10_n_edge_distance\"\xe1\x06\n\tPbRuntime\x12\x15\n\x08platform\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06script\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0binit_script\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x17\n\nenv_script\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\nerr_script\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x18\n\x0b\x65xit_script\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x17\n\npre_script\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x18\n\x0bpost_script\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x19\n\x0cwork_sub_dir\x18\t \x01(\tH\x08\x88\x01\x01\x12(\n\x1b\x65xecution_polling_intervals\x18\n \x01(\tH\t\x88\x01\x01\x12#\n\x16\x65xecution_retry_delays\x18\x0b \x01(\tH\n\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12)\n\x1csubmission_polling_intervals\x18\r \x01(\tH\x0c\x88\x01\x01\x12$\n\x17submission_retry_delays\x18\x0e \x01(\tH\r\x88\x01\x01\x12\x17\n\ndirectives\x18\x0f \x01(\tH\x0e\x88\x01\x01\x12\x18\n\x0b\x65nvironment\x18\x10 \x01(\tH\x0f\x88\x01\x01\x12\x14\n\x07outputs\x18\x11 \x01(\tH\x10\x88\x01\x01\x12\x17\n\ncompletion\x18\x12 \x01(\tH\x11\x88\x01\x01\x42\x0b\n\t_platformB\t\n\x07_scriptB\x0e\n\x0c_init_scriptB\r\n\x0b_env_scriptB\r\n\x0b_err_scriptB\x0e\n\x0c_exit_scriptB\r\n\x0b_pre_scriptB\x0e\n\x0c_post_scriptB\x0f\n\r_work_sub_dirB\x1e\n\x1c_execution_polling_intervalsB\x19\n\x17_execution_retry_delaysB\x17\n\x15_execution_time_limitB\x1f\n\x1d_submission_polling_intervalsB\x1a\n\x18_submission_retry_delaysB\r\n\x0b_directivesB\x0e\n\x0c_environmentB\n\n\x08_outputsB\r\n\x0b_completion\"\x9d\x05\n\x05PbJob\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nsubmit_num\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\ntask_proxy\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1b\n\x0esubmitted_time\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x19\n\x0cstarted_time\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x1a\n\rfinished_time\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x06job_id\x18\t \x01(\tH\x08\x88\x01\x01\x12\x1c\n\x0fjob_runner_name\x18\n \x01(\tH\t\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0e \x01(\x02H\n\x88\x01\x01\x12\x15\n\x08platform\x18\x0f \x01(\tH\x0b\x88\x01\x01\x12\x18\n\x0bjob_log_dir\x18\x11 \x01(\tH\x0c\x88\x01\x01\x12\x11\n\x04name\x18\x1e \x01(\tH\r\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x1f \x01(\tH\x0e\x88\x01\x01\x12\x10\n\x08messages\x18 \x03(\t\x12 \n\x07runtime\x18! \x01(\x0b\x32\n.PbRuntimeH\x0f\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\r\n\x0b_submit_numB\x08\n\x06_stateB\r\n\x0b_task_proxyB\x11\n\x0f_submitted_timeB\x0f\n\r_started_timeB\x10\n\x0e_finished_timeB\t\n\x07_job_idB\x12\n\x10_job_runner_nameB\x17\n\x15_execution_time_limitB\x0b\n\t_platformB\x0e\n\x0c_job_log_dirB\x07\n\x05_nameB\x0e\n\x0c_cycle_pointB\n\n\x08_runtimeJ\x04\x08\x1d\x10\x1e\"\xe2\x02\n\x06PbTask\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x1e\n\x11mean_elapsed_time\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x0f\n\x07proxies\x18\x07 \x03(\t\x12\x11\n\tnamespace\x18\x08 \x03(\t\x12\x0f\n\x07parents\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x06\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x07\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x14\n\x12_mean_elapsed_timeB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xd8\x01\n\nPbPollTask\x12\x18\n\x0blocal_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08workflow\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x19\n\x0cremote_proxy\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\treq_state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x19\n\x0cgraph_string\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_local_proxyB\x0b\n\t_workflowB\x0f\n\r_remote_proxyB\x0c\n\n_req_stateB\x0f\n\r_graph_string\"\xcb\x01\n\x0bPbCondition\x12\x17\n\ntask_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nexpr_alias\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\treq_state\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x14\n\x07message\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_task_proxyB\r\n\x0b_expr_aliasB\x0c\n\n_req_stateB\x0c\n\n_satisfiedB\n\n\x08_message\"\x96\x01\n\x0ePbPrerequisite\x12\x17\n\nexpression\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\nconditions\x18\x02 \x03(\x0b\x32\x0c.PbCondition\x12\x14\n\x0c\x63ycle_points\x18\x03 \x03(\t\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_expressionB\x0c\n\n_satisfied\"\x8c\x01\n\x08PbOutput\x12\x12\n\x05label\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07message\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x11\n\x04time\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xa5\x01\n\tPbTrigger\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05label\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07message\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x11\n\x04time\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x05\n\x03_idB\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\x91\x08\n\x0bPbTaskProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04task\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x18\n\x0bjob_submits\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12*\n\x07outputs\x18\t \x03(\x0b\x32\x19.PbTaskProxy.OutputsEntry\x12\x11\n\tnamespace\x18\x0b \x03(\t\x12&\n\rprerequisites\x18\x0c \x03(\x0b\x32\x0f.PbPrerequisite\x12\x0c\n\x04jobs\x18\r \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\x0f \x01(\tH\x07\x88\x01\x01\x12\x11\n\x04name\x18\x10 \x01(\tH\x08\x88\x01\x01\x12\x14\n\x07is_held\x18\x11 \x01(\x08H\t\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x12 \x03(\t\x12\x11\n\tancestors\x18\x13 \x03(\t\x12\x16\n\tflow_nums\x18\x14 \x01(\tH\n\x88\x01\x01\x12=\n\x11\x65xternal_triggers\x18\x17 \x03(\x0b\x32\".PbTaskProxy.ExternalTriggersEntry\x12.\n\txtriggers\x18\x18 \x03(\x0b\x32\x1b.PbTaskProxy.XtriggersEntry\x12\x16\n\tis_queued\x18\x19 \x01(\x08H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x1a \x01(\x08H\x0c\x88\x01\x01\x12\x16\n\tflow_wait\x18\x1b \x01(\x08H\r\x88\x01\x01\x12 \n\x07runtime\x18\x1c \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x12\x18\n\x0bgraph_depth\x18\x1d \x01(\x05H\x0f\x88\x01\x01\x1a\x39\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.PbOutput:\x02\x38\x01\x1a\x43\n\x15\x45xternalTriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x1a<\n\x0eXtriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_taskB\x08\n\x06_stateB\x0e\n\x0c_cycle_pointB\x08\n\x06_depthB\x0e\n\x0c_job_submitsB\x0f\n\r_first_parentB\x07\n\x05_nameB\n\n\x08_is_heldB\x0c\n\n_flow_numsB\x0c\n\n_is_queuedB\x0e\n\x0c_is_runaheadB\x0c\n\n_flow_waitB\n\n\x08_runtimeB\x0e\n\x0c_graph_depth\"\xc8\x02\n\x08PbFamily\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x0f\n\x07proxies\x18\x06 \x03(\t\x12\x0f\n\x07parents\x18\x07 \x03(\t\x12\x13\n\x0b\x63hild_tasks\x18\x08 \x03(\t\x12\x16\n\x0e\x63hild_families\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x05\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x06\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xae\x06\n\rPbFamilyProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x11\n\x04name\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x13\n\x06\x66\x61mily\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05state\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12\x19\n\x0c\x66irst_parent\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x0b\x63hild_tasks\x18\n \x03(\t\x12\x16\n\x0e\x63hild_families\x18\x0b \x03(\t\x12\x14\n\x07is_held\x18\x0c \x01(\x08H\x08\x88\x01\x01\x12\x11\n\tancestors\x18\r \x03(\t\x12\x0e\n\x06states\x18\x0e \x03(\t\x12\x35\n\x0cstate_totals\x18\x0f \x03(\x0b\x32\x1f.PbFamilyProxy.StateTotalsEntry\x12\x1a\n\ris_held_total\x18\x10 \x01(\x05H\t\x88\x01\x01\x12\x16\n\tis_queued\x18\x11 \x01(\x08H\n\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18\x12 \x01(\x05H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x13 \x01(\x08H\x0c\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18\x14 \x01(\x05H\r\x88\x01\x01\x12 \n\x07runtime\x18\x15 \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x12\x18\n\x0bgraph_depth\x18\x16 \x01(\x05H\x0f\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x0e\n\x0c_cycle_pointB\x07\n\x05_nameB\t\n\x07_familyB\x08\n\x06_stateB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_is_heldB\x10\n\x0e_is_held_totalB\x0c\n\n_is_queuedB\x12\n\x10_is_queued_totalB\x0e\n\x0c_is_runaheadB\x14\n\x12_is_runahead_totalB\n\n\x08_runtimeB\x0e\n\x0c_graph_depth\"\xbc\x01\n\x06PbEdge\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x13\n\x06source\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06target\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07suicide\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x11\n\x04\x63ond\x18\x06 \x01(\x08H\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\t\n\x07_sourceB\t\n\x07_targetB\n\n\x08_suicideB\x07\n\x05_cond\"{\n\x07PbEdges\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x02 \x03(\t\x12+\n\x16workflow_polling_tasks\x18\x03 \x03(\x0b\x32\x0b.PbPollTask\x12\x0e\n\x06leaves\x18\x04 \x03(\t\x12\x0c\n\x04\x66\x65\x65t\x18\x05 \x03(\tB\x05\n\x03_id\"\xf2\x01\n\x10PbEntireWorkflow\x12\"\n\x08workflow\x18\x01 \x01(\x0b\x32\x0b.PbWorkflowH\x00\x88\x01\x01\x12\x16\n\x05tasks\x18\x02 \x03(\x0b\x32\x07.PbTask\x12\"\n\x0ctask_proxies\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x14\n\x04jobs\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x1b\n\x08\x66\x61milies\x18\x05 \x03(\x0b\x32\t.PbFamily\x12&\n\x0e\x66\x61mily_proxies\x18\x06 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x16\n\x05\x65\x64ges\x18\x07 \x03(\x0b\x32\x07.PbEdgeB\x0b\n\t_workflow\"\xaf\x01\n\x07\x45\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbEdge\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbEdge\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xb3\x01\n\x07\x46\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x18\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\t.PbFamily\x12\x1a\n\x07updated\x18\x04 \x03(\x0b\x32\t.PbFamily\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xbe\x01\n\x08\x46PDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1d\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x1f\n\x07updated\x18\x04 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xad\x01\n\x07JDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x15\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x06.PbJob\x12\x17\n\x07updated\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xaf\x01\n\x07TDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbTask\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbTask\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xba\x01\n\x08TPDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1b\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x1d\n\x07updated\x18\x04 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xc3\x01\n\x07WDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1f\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x0b\x32\x0b.PbWorkflowH\x01\x88\x01\x01\x12!\n\x07updated\x18\x03 \x01(\x0b\x32\x0b.PbWorkflowH\x02\x88\x01\x01\x12\x15\n\x08reloaded\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x13\n\x06pruned\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x07\n\x05_timeB\x08\n\x06_addedB\n\n\x08_updatedB\x0b\n\t_reloadedB\t\n\x07_pruned\"\xd1\x01\n\tAllDeltas\x12\x1a\n\x08\x66\x61milies\x18\x01 \x01(\x0b\x32\x08.FDeltas\x12!\n\x0e\x66\x61mily_proxies\x18\x02 \x01(\x0b\x32\t.FPDeltas\x12\x16\n\x04jobs\x18\x03 \x01(\x0b\x32\x08.JDeltas\x12\x17\n\x05tasks\x18\x04 \x01(\x0b\x32\x08.TDeltas\x12\x1f\n\x0ctask_proxies\x18\x05 \x01(\x0b\x32\t.TPDeltas\x12\x17\n\x05\x65\x64ges\x18\x06 \x01(\x0b\x32\x08.EDeltas\x12\x1a\n\x08workflow\x18\x07 \x01(\x0b\x32\x08.WDeltasb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x64\x61ta_messages.proto\"\x96\x01\n\x06PbMeta\x12\x12\n\x05title\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x03URL\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x19\n\x0cuser_defined\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_titleB\x0e\n\x0c_descriptionB\x06\n\x04_URLB\x0f\n\r_user_defined\"\xaa\x01\n\nPbTimeZone\x12\x12\n\x05hours\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x14\n\x07minutes\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x19\n\x0cstring_basic\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1c\n\x0fstring_extended\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_hoursB\n\n\x08_minutesB\x0f\n\r_string_basicB\x12\n\x10_string_extended\"\'\n\x0fPbTaskProxyRefs\x12\x14\n\x0ctask_proxies\x18\x01 \x03(\t\"\xd4\x0c\n\nPbWorkflow\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06status\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04host\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x11\n\x04port\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x12\n\x05owner\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\r\n\x05tasks\x18\x08 \x03(\t\x12\x10\n\x08\x66\x61milies\x18\t \x03(\t\x12\x1c\n\x05\x65\x64ges\x18\n \x01(\x0b\x32\x08.PbEdgesH\x07\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\x0b \x01(\x05H\x08\x88\x01\x01\x12\x19\n\x0c\x63ylc_version\x18\x0c \x01(\tH\t\x88\x01\x01\x12\x19\n\x0clast_updated\x18\r \x01(\x01H\n\x88\x01\x01\x12\x1a\n\x04meta\x18\x0e \x01(\x0b\x32\x07.PbMetaH\x0b\x88\x01\x01\x12&\n\x19newest_active_cycle_point\x18\x10 \x01(\tH\x0c\x88\x01\x01\x12&\n\x19oldest_active_cycle_point\x18\x11 \x01(\tH\r\x88\x01\x01\x12\x15\n\x08reloaded\x18\x12 \x01(\x08H\x0e\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x0f\x88\x01\x01\x12\x19\n\x0c\x63ycling_mode\x18\x14 \x01(\tH\x10\x88\x01\x01\x12\x32\n\x0cstate_totals\x18\x15 \x03(\x0b\x32\x1c.PbWorkflow.StateTotalsEntry\x12\x1d\n\x10workflow_log_dir\x18\x16 \x01(\tH\x11\x88\x01\x01\x12(\n\x0etime_zone_info\x18\x17 \x01(\x0b\x32\x0b.PbTimeZoneH\x12\x88\x01\x01\x12\x17\n\ntree_depth\x18\x18 \x01(\x05H\x13\x88\x01\x01\x12\x15\n\rjob_log_names\x18\x19 \x03(\t\x12\x14\n\x0cns_def_order\x18\x1a \x03(\t\x12\x0e\n\x06states\x18\x1b \x03(\t\x12\x14\n\x0ctask_proxies\x18\x1c \x03(\t\x12\x16\n\x0e\x66\x61mily_proxies\x18\x1d \x03(\t\x12\x17\n\nstatus_msg\x18\x1e \x01(\tH\x14\x88\x01\x01\x12\x1a\n\ris_held_total\x18\x1f \x01(\x05H\x15\x88\x01\x01\x12\x0c\n\x04jobs\x18 \x03(\t\x12\x15\n\x08pub_port\x18! \x01(\x05H\x16\x88\x01\x01\x12\x17\n\nbroadcasts\x18\" \x01(\tH\x17\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18# \x01(\x05H\x18\x88\x01\x01\x12=\n\x12latest_state_tasks\x18$ \x03(\x0b\x32!.PbWorkflow.LatestStateTasksEntry\x12\x13\n\x06pruned\x18% \x01(\x08H\x19\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18& \x01(\x05H\x1a\x88\x01\x01\x12\x1b\n\x0estates_updated\x18\' \x01(\x08H\x1b\x88\x01\x01\x12\x1c\n\x0fn_edge_distance\x18( \x01(\x05H\x1c\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1aI\n\x15LatestStateTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x05value\x18\x02 \x01(\x0b\x32\x10.PbTaskProxyRefs:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\t\n\x07_statusB\x07\n\x05_hostB\x07\n\x05_portB\x08\n\x06_ownerB\x08\n\x06_edgesB\x0e\n\x0c_api_versionB\x0f\n\r_cylc_versionB\x0f\n\r_last_updatedB\x07\n\x05_metaB\x1c\n\x1a_newest_active_cycle_pointB\x1c\n\x1a_oldest_active_cycle_pointB\x0b\n\t_reloadedB\x0b\n\t_run_modeB\x0f\n\r_cycling_modeB\x13\n\x11_workflow_log_dirB\x11\n\x0f_time_zone_infoB\r\n\x0b_tree_depthB\r\n\x0b_status_msgB\x10\n\x0e_is_held_totalB\x0b\n\t_pub_portB\r\n\x0b_broadcastsB\x12\n\x10_is_queued_totalB\t\n\x07_prunedB\x14\n\x12_is_runahead_totalB\x11\n\x0f_states_updatedB\x12\n\x10_n_edge_distance\"\x85\x07\n\tPbRuntime\x12\x15\n\x08platform\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06script\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0binit_script\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x17\n\nenv_script\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\nerr_script\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x18\n\x0b\x65xit_script\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x17\n\npre_script\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x18\n\x0bpost_script\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x19\n\x0cwork_sub_dir\x18\t \x01(\tH\x08\x88\x01\x01\x12(\n\x1b\x65xecution_polling_intervals\x18\n \x01(\tH\t\x88\x01\x01\x12#\n\x16\x65xecution_retry_delays\x18\x0b \x01(\tH\n\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12)\n\x1csubmission_polling_intervals\x18\r \x01(\tH\x0c\x88\x01\x01\x12$\n\x17submission_retry_delays\x18\x0e \x01(\tH\r\x88\x01\x01\x12\x17\n\ndirectives\x18\x0f \x01(\tH\x0e\x88\x01\x01\x12\x18\n\x0b\x65nvironment\x18\x10 \x01(\tH\x0f\x88\x01\x01\x12\x14\n\x07outputs\x18\x11 \x01(\tH\x10\x88\x01\x01\x12\x17\n\ncompletion\x18\x12 \x01(\tH\x11\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x12\x88\x01\x01\x42\x0b\n\t_platformB\t\n\x07_scriptB\x0e\n\x0c_init_scriptB\r\n\x0b_env_scriptB\r\n\x0b_err_scriptB\x0e\n\x0c_exit_scriptB\r\n\x0b_pre_scriptB\x0e\n\x0c_post_scriptB\x0f\n\r_work_sub_dirB\x1e\n\x1c_execution_polling_intervalsB\x19\n\x17_execution_retry_delaysB\x17\n\x15_execution_time_limitB\x1f\n\x1d_submission_polling_intervalsB\x1a\n\x18_submission_retry_delaysB\r\n\x0b_directivesB\x0e\n\x0c_environmentB\n\n\x08_outputsB\r\n\x0b_completionB\x0b\n\t_run_mode\"\x9d\x05\n\x05PbJob\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nsubmit_num\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\ntask_proxy\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1b\n\x0esubmitted_time\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x19\n\x0cstarted_time\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x1a\n\rfinished_time\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x06job_id\x18\t \x01(\tH\x08\x88\x01\x01\x12\x1c\n\x0fjob_runner_name\x18\n \x01(\tH\t\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0e \x01(\x02H\n\x88\x01\x01\x12\x15\n\x08platform\x18\x0f \x01(\tH\x0b\x88\x01\x01\x12\x18\n\x0bjob_log_dir\x18\x11 \x01(\tH\x0c\x88\x01\x01\x12\x11\n\x04name\x18\x1e \x01(\tH\r\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x1f \x01(\tH\x0e\x88\x01\x01\x12\x10\n\x08messages\x18 \x03(\t\x12 \n\x07runtime\x18! \x01(\x0b\x32\n.PbRuntimeH\x0f\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\r\n\x0b_submit_numB\x08\n\x06_stateB\r\n\x0b_task_proxyB\x11\n\x0f_submitted_timeB\x0f\n\r_started_timeB\x10\n\x0e_finished_timeB\t\n\x07_job_idB\x12\n\x10_job_runner_nameB\x17\n\x15_execution_time_limitB\x0b\n\t_platformB\x0e\n\x0c_job_log_dirB\x07\n\x05_nameB\x0e\n\x0c_cycle_pointB\n\n\x08_runtimeJ\x04\x08\x1d\x10\x1e\"\xe2\x02\n\x06PbTask\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x1e\n\x11mean_elapsed_time\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x0f\n\x07proxies\x18\x07 \x03(\t\x12\x11\n\tnamespace\x18\x08 \x03(\t\x12\x0f\n\x07parents\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x06\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x07\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x14\n\x12_mean_elapsed_timeB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xd8\x01\n\nPbPollTask\x12\x18\n\x0blocal_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08workflow\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x19\n\x0cremote_proxy\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\treq_state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x19\n\x0cgraph_string\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_local_proxyB\x0b\n\t_workflowB\x0f\n\r_remote_proxyB\x0c\n\n_req_stateB\x0f\n\r_graph_string\"\xcb\x01\n\x0bPbCondition\x12\x17\n\ntask_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nexpr_alias\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\treq_state\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x14\n\x07message\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_task_proxyB\r\n\x0b_expr_aliasB\x0c\n\n_req_stateB\x0c\n\n_satisfiedB\n\n\x08_message\"\x96\x01\n\x0ePbPrerequisite\x12\x17\n\nexpression\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\nconditions\x18\x02 \x03(\x0b\x32\x0c.PbCondition\x12\x14\n\x0c\x63ycle_points\x18\x03 \x03(\t\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_expressionB\x0c\n\n_satisfied\"\x8c\x01\n\x08PbOutput\x12\x12\n\x05label\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07message\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x11\n\x04time\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xa5\x01\n\tPbTrigger\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05label\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07message\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x11\n\x04time\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x05\n\x03_idB\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\x91\x08\n\x0bPbTaskProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04task\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x18\n\x0bjob_submits\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12*\n\x07outputs\x18\t \x03(\x0b\x32\x19.PbTaskProxy.OutputsEntry\x12\x11\n\tnamespace\x18\x0b \x03(\t\x12&\n\rprerequisites\x18\x0c \x03(\x0b\x32\x0f.PbPrerequisite\x12\x0c\n\x04jobs\x18\r \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\x0f \x01(\tH\x07\x88\x01\x01\x12\x11\n\x04name\x18\x10 \x01(\tH\x08\x88\x01\x01\x12\x14\n\x07is_held\x18\x11 \x01(\x08H\t\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x12 \x03(\t\x12\x11\n\tancestors\x18\x13 \x03(\t\x12\x16\n\tflow_nums\x18\x14 \x01(\tH\n\x88\x01\x01\x12=\n\x11\x65xternal_triggers\x18\x17 \x03(\x0b\x32\".PbTaskProxy.ExternalTriggersEntry\x12.\n\txtriggers\x18\x18 \x03(\x0b\x32\x1b.PbTaskProxy.XtriggersEntry\x12\x16\n\tis_queued\x18\x19 \x01(\x08H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x1a \x01(\x08H\x0c\x88\x01\x01\x12\x16\n\tflow_wait\x18\x1b \x01(\x08H\r\x88\x01\x01\x12 \n\x07runtime\x18\x1c \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x12\x18\n\x0bgraph_depth\x18\x1d \x01(\x05H\x0f\x88\x01\x01\x1a\x39\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.PbOutput:\x02\x38\x01\x1a\x43\n\x15\x45xternalTriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x1a<\n\x0eXtriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_taskB\x08\n\x06_stateB\x0e\n\x0c_cycle_pointB\x08\n\x06_depthB\x0e\n\x0c_job_submitsB\x0f\n\r_first_parentB\x07\n\x05_nameB\n\n\x08_is_heldB\x0c\n\n_flow_numsB\x0c\n\n_is_queuedB\x0e\n\x0c_is_runaheadB\x0c\n\n_flow_waitB\n\n\x08_runtimeB\x0e\n\x0c_graph_depth\"\xc8\x02\n\x08PbFamily\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x0f\n\x07proxies\x18\x06 \x03(\t\x12\x0f\n\x07parents\x18\x07 \x03(\t\x12\x13\n\x0b\x63hild_tasks\x18\x08 \x03(\t\x12\x16\n\x0e\x63hild_families\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x05\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x06\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xae\x06\n\rPbFamilyProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x11\n\x04name\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x13\n\x06\x66\x61mily\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05state\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12\x19\n\x0c\x66irst_parent\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x0b\x63hild_tasks\x18\n \x03(\t\x12\x16\n\x0e\x63hild_families\x18\x0b \x03(\t\x12\x14\n\x07is_held\x18\x0c \x01(\x08H\x08\x88\x01\x01\x12\x11\n\tancestors\x18\r \x03(\t\x12\x0e\n\x06states\x18\x0e \x03(\t\x12\x35\n\x0cstate_totals\x18\x0f \x03(\x0b\x32\x1f.PbFamilyProxy.StateTotalsEntry\x12\x1a\n\ris_held_total\x18\x10 \x01(\x05H\t\x88\x01\x01\x12\x16\n\tis_queued\x18\x11 \x01(\x08H\n\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18\x12 \x01(\x05H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x13 \x01(\x08H\x0c\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18\x14 \x01(\x05H\r\x88\x01\x01\x12 \n\x07runtime\x18\x15 \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x12\x18\n\x0bgraph_depth\x18\x16 \x01(\x05H\x0f\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x0e\n\x0c_cycle_pointB\x07\n\x05_nameB\t\n\x07_familyB\x08\n\x06_stateB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_is_heldB\x10\n\x0e_is_held_totalB\x0c\n\n_is_queuedB\x12\n\x10_is_queued_totalB\x0e\n\x0c_is_runaheadB\x14\n\x12_is_runahead_totalB\n\n\x08_runtimeB\x0e\n\x0c_graph_depth\"\xbc\x01\n\x06PbEdge\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x13\n\x06source\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06target\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07suicide\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x11\n\x04\x63ond\x18\x06 \x01(\x08H\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\t\n\x07_sourceB\t\n\x07_targetB\n\n\x08_suicideB\x07\n\x05_cond\"{\n\x07PbEdges\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x02 \x03(\t\x12+\n\x16workflow_polling_tasks\x18\x03 \x03(\x0b\x32\x0b.PbPollTask\x12\x0e\n\x06leaves\x18\x04 \x03(\t\x12\x0c\n\x04\x66\x65\x65t\x18\x05 \x03(\tB\x05\n\x03_id\"\xf2\x01\n\x10PbEntireWorkflow\x12\"\n\x08workflow\x18\x01 \x01(\x0b\x32\x0b.PbWorkflowH\x00\x88\x01\x01\x12\x16\n\x05tasks\x18\x02 \x03(\x0b\x32\x07.PbTask\x12\"\n\x0ctask_proxies\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x14\n\x04jobs\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x1b\n\x08\x66\x61milies\x18\x05 \x03(\x0b\x32\t.PbFamily\x12&\n\x0e\x66\x61mily_proxies\x18\x06 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x16\n\x05\x65\x64ges\x18\x07 \x03(\x0b\x32\x07.PbEdgeB\x0b\n\t_workflow\"\xaf\x01\n\x07\x45\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbEdge\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbEdge\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xb3\x01\n\x07\x46\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x18\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\t.PbFamily\x12\x1a\n\x07updated\x18\x04 \x03(\x0b\x32\t.PbFamily\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xbe\x01\n\x08\x46PDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1d\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x1f\n\x07updated\x18\x04 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xad\x01\n\x07JDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x15\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x06.PbJob\x12\x17\n\x07updated\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xaf\x01\n\x07TDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbTask\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbTask\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xba\x01\n\x08TPDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1b\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x1d\n\x07updated\x18\x04 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xc3\x01\n\x07WDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1f\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x0b\x32\x0b.PbWorkflowH\x01\x88\x01\x01\x12!\n\x07updated\x18\x03 \x01(\x0b\x32\x0b.PbWorkflowH\x02\x88\x01\x01\x12\x15\n\x08reloaded\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x13\n\x06pruned\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x07\n\x05_timeB\x08\n\x06_addedB\n\n\x08_updatedB\x0b\n\t_reloadedB\t\n\x07_pruned\"\xd1\x01\n\tAllDeltas\x12\x1a\n\x08\x66\x61milies\x18\x01 \x01(\x0b\x32\x08.FDeltas\x12!\n\x0e\x66\x61mily_proxies\x18\x02 \x01(\x0b\x32\t.FPDeltas\x12\x16\n\x04jobs\x18\x03 \x01(\x0b\x32\x08.JDeltas\x12\x17\n\x05tasks\x18\x04 \x01(\x0b\x32\x08.TDeltas\x12\x1f\n\x0ctask_proxies\x18\x05 \x01(\x0b\x32\t.TPDeltas\x12\x17\n\x05\x65\x64ges\x18\x06 \x01(\x0b\x32\x08.EDeltas\x12\x1a\n\x08workflow\x18\x07 \x01(\x0b\x32\x08.WDeltasb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -46,55 +46,55 @@ _globals['_PBWORKFLOW_LATESTSTATETASKSENTRY']._serialized_start=1493 _globals['_PBWORKFLOW_LATESTSTATETASKSENTRY']._serialized_end=1566 _globals['_PBRUNTIME']._serialized_start=2014 - _globals['_PBRUNTIME']._serialized_end=2879 - _globals['_PBJOB']._serialized_start=2882 - _globals['_PBJOB']._serialized_end=3551 - _globals['_PBTASK']._serialized_start=3554 - _globals['_PBTASK']._serialized_end=3908 - _globals['_PBPOLLTASK']._serialized_start=3911 - _globals['_PBPOLLTASK']._serialized_end=4127 - _globals['_PBCONDITION']._serialized_start=4130 - _globals['_PBCONDITION']._serialized_end=4333 - _globals['_PBPREREQUISITE']._serialized_start=4336 - _globals['_PBPREREQUISITE']._serialized_end=4486 - _globals['_PBOUTPUT']._serialized_start=4489 - _globals['_PBOUTPUT']._serialized_end=4629 - _globals['_PBTRIGGER']._serialized_start=4632 - _globals['_PBTRIGGER']._serialized_end=4797 - _globals['_PBTASKPROXY']._serialized_start=4800 - _globals['_PBTASKPROXY']._serialized_end=5841 - _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_start=5451 - _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_end=5508 - _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_start=5510 - _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_end=5577 - _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_start=5579 - _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_end=5639 - _globals['_PBFAMILY']._serialized_start=5844 - _globals['_PBFAMILY']._serialized_end=6172 - _globals['_PBFAMILYPROXY']._serialized_start=6175 - _globals['_PBFAMILYPROXY']._serialized_end=6989 + _globals['_PBRUNTIME']._serialized_end=2915 + _globals['_PBJOB']._serialized_start=2918 + _globals['_PBJOB']._serialized_end=3587 + _globals['_PBTASK']._serialized_start=3590 + _globals['_PBTASK']._serialized_end=3944 + _globals['_PBPOLLTASK']._serialized_start=3947 + _globals['_PBPOLLTASK']._serialized_end=4163 + _globals['_PBCONDITION']._serialized_start=4166 + _globals['_PBCONDITION']._serialized_end=4369 + _globals['_PBPREREQUISITE']._serialized_start=4372 + _globals['_PBPREREQUISITE']._serialized_end=4522 + _globals['_PBOUTPUT']._serialized_start=4525 + _globals['_PBOUTPUT']._serialized_end=4665 + _globals['_PBTRIGGER']._serialized_start=4668 + _globals['_PBTRIGGER']._serialized_end=4833 + _globals['_PBTASKPROXY']._serialized_start=4836 + _globals['_PBTASKPROXY']._serialized_end=5877 + _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_start=5487 + _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_end=5544 + _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_start=5546 + _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_end=5613 + _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_start=5615 + _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_end=5675 + _globals['_PBFAMILY']._serialized_start=5880 + _globals['_PBFAMILY']._serialized_end=6208 + _globals['_PBFAMILYPROXY']._serialized_start=6211 + _globals['_PBFAMILYPROXY']._serialized_end=7025 _globals['_PBFAMILYPROXY_STATETOTALSENTRY']._serialized_start=1441 _globals['_PBFAMILYPROXY_STATETOTALSENTRY']._serialized_end=1491 - _globals['_PBEDGE']._serialized_start=6992 - _globals['_PBEDGE']._serialized_end=7180 - _globals['_PBEDGES']._serialized_start=7182 - _globals['_PBEDGES']._serialized_end=7305 - _globals['_PBENTIREWORKFLOW']._serialized_start=7308 - _globals['_PBENTIREWORKFLOW']._serialized_end=7550 - _globals['_EDELTAS']._serialized_start=7553 - _globals['_EDELTAS']._serialized_end=7728 - _globals['_FDELTAS']._serialized_start=7731 - _globals['_FDELTAS']._serialized_end=7910 - _globals['_FPDELTAS']._serialized_start=7913 - _globals['_FPDELTAS']._serialized_end=8103 - _globals['_JDELTAS']._serialized_start=8106 - _globals['_JDELTAS']._serialized_end=8279 - _globals['_TDELTAS']._serialized_start=8282 - _globals['_TDELTAS']._serialized_end=8457 - _globals['_TPDELTAS']._serialized_start=8460 - _globals['_TPDELTAS']._serialized_end=8646 - _globals['_WDELTAS']._serialized_start=8649 - _globals['_WDELTAS']._serialized_end=8844 - _globals['_ALLDELTAS']._serialized_start=8847 - _globals['_ALLDELTAS']._serialized_end=9056 + _globals['_PBEDGE']._serialized_start=7028 + _globals['_PBEDGE']._serialized_end=7216 + _globals['_PBEDGES']._serialized_start=7218 + _globals['_PBEDGES']._serialized_end=7341 + _globals['_PBENTIREWORKFLOW']._serialized_start=7344 + _globals['_PBENTIREWORKFLOW']._serialized_end=7586 + _globals['_EDELTAS']._serialized_start=7589 + _globals['_EDELTAS']._serialized_end=7764 + _globals['_FDELTAS']._serialized_start=7767 + _globals['_FDELTAS']._serialized_end=7946 + _globals['_FPDELTAS']._serialized_start=7949 + _globals['_FPDELTAS']._serialized_end=8139 + _globals['_JDELTAS']._serialized_start=8142 + _globals['_JDELTAS']._serialized_end=8315 + _globals['_TDELTAS']._serialized_start=8318 + _globals['_TDELTAS']._serialized_end=8493 + _globals['_TPDELTAS']._serialized_start=8496 + _globals['_TPDELTAS']._serialized_end=8682 + _globals['_WDELTAS']._serialized_start=8685 + _globals['_WDELTAS']._serialized_end=8880 + _globals['_ALLDELTAS']._serialized_start=8883 + _globals['_ALLDELTAS']._serialized_end=9092 # @@protoc_insertion_point(module_scope) diff --git a/cylc/flow/data_messages_pb2.pyi b/cylc/flow/data_messages_pb2.pyi index 4e96c6ed2da..8c80f7f8f10 100644 --- a/cylc/flow/data_messages_pb2.pyi +++ b/cylc/flow/data_messages_pb2.pyi @@ -6,7 +6,7 @@ from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Map DESCRIPTOR: _descriptor.FileDescriptor class PbMeta(_message.Message): - __slots__ = ["title", "description", "URL", "user_defined"] + __slots__ = ("title", "description", "URL", "user_defined") TITLE_FIELD_NUMBER: _ClassVar[int] DESCRIPTION_FIELD_NUMBER: _ClassVar[int] URL_FIELD_NUMBER: _ClassVar[int] @@ -18,7 +18,7 @@ class PbMeta(_message.Message): def __init__(self, title: _Optional[str] = ..., description: _Optional[str] = ..., URL: _Optional[str] = ..., user_defined: _Optional[str] = ...) -> None: ... class PbTimeZone(_message.Message): - __slots__ = ["hours", "minutes", "string_basic", "string_extended"] + __slots__ = ("hours", "minutes", "string_basic", "string_extended") HOURS_FIELD_NUMBER: _ClassVar[int] MINUTES_FIELD_NUMBER: _ClassVar[int] STRING_BASIC_FIELD_NUMBER: _ClassVar[int] @@ -30,22 +30,22 @@ class PbTimeZone(_message.Message): def __init__(self, hours: _Optional[int] = ..., minutes: _Optional[int] = ..., string_basic: _Optional[str] = ..., string_extended: _Optional[str] = ...) -> None: ... class PbTaskProxyRefs(_message.Message): - __slots__ = ["task_proxies"] + __slots__ = ("task_proxies",) TASK_PROXIES_FIELD_NUMBER: _ClassVar[int] task_proxies: _containers.RepeatedScalarFieldContainer[str] def __init__(self, task_proxies: _Optional[_Iterable[str]] = ...) -> None: ... class PbWorkflow(_message.Message): - __slots__ = ["stamp", "id", "name", "status", "host", "port", "owner", "tasks", "families", "edges", "api_version", "cylc_version", "last_updated", "meta", "newest_active_cycle_point", "oldest_active_cycle_point", "reloaded", "run_mode", "cycling_mode", "state_totals", "workflow_log_dir", "time_zone_info", "tree_depth", "job_log_names", "ns_def_order", "states", "task_proxies", "family_proxies", "status_msg", "is_held_total", "jobs", "pub_port", "broadcasts", "is_queued_total", "latest_state_tasks", "pruned", "is_runahead_total", "states_updated", "n_edge_distance"] + __slots__ = ("stamp", "id", "name", "status", "host", "port", "owner", "tasks", "families", "edges", "api_version", "cylc_version", "last_updated", "meta", "newest_active_cycle_point", "oldest_active_cycle_point", "reloaded", "run_mode", "cycling_mode", "state_totals", "workflow_log_dir", "time_zone_info", "tree_depth", "job_log_names", "ns_def_order", "states", "task_proxies", "family_proxies", "status_msg", "is_held_total", "jobs", "pub_port", "broadcasts", "is_queued_total", "latest_state_tasks", "pruned", "is_runahead_total", "states_updated", "n_edge_distance") class StateTotalsEntry(_message.Message): - __slots__ = ["key", "value"] + __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str value: int def __init__(self, key: _Optional[str] = ..., value: _Optional[int] = ...) -> None: ... class LatestStateTasksEntry(_message.Message): - __slots__ = ["key", "value"] + __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str @@ -132,7 +132,7 @@ class PbWorkflow(_message.Message): def __init__(self, stamp: _Optional[str] = ..., id: _Optional[str] = ..., name: _Optional[str] = ..., status: _Optional[str] = ..., host: _Optional[str] = ..., port: _Optional[int] = ..., owner: _Optional[str] = ..., tasks: _Optional[_Iterable[str]] = ..., families: _Optional[_Iterable[str]] = ..., edges: _Optional[_Union[PbEdges, _Mapping]] = ..., api_version: _Optional[int] = ..., cylc_version: _Optional[str] = ..., last_updated: _Optional[float] = ..., meta: _Optional[_Union[PbMeta, _Mapping]] = ..., newest_active_cycle_point: _Optional[str] = ..., oldest_active_cycle_point: _Optional[str] = ..., reloaded: bool = ..., run_mode: _Optional[str] = ..., cycling_mode: _Optional[str] = ..., state_totals: _Optional[_Mapping[str, int]] = ..., workflow_log_dir: _Optional[str] = ..., time_zone_info: _Optional[_Union[PbTimeZone, _Mapping]] = ..., tree_depth: _Optional[int] = ..., job_log_names: _Optional[_Iterable[str]] = ..., ns_def_order: _Optional[_Iterable[str]] = ..., states: _Optional[_Iterable[str]] = ..., task_proxies: _Optional[_Iterable[str]] = ..., family_proxies: _Optional[_Iterable[str]] = ..., status_msg: _Optional[str] = ..., is_held_total: _Optional[int] = ..., jobs: _Optional[_Iterable[str]] = ..., pub_port: _Optional[int] = ..., broadcasts: _Optional[str] = ..., is_queued_total: _Optional[int] = ..., latest_state_tasks: _Optional[_Mapping[str, PbTaskProxyRefs]] = ..., pruned: bool = ..., is_runahead_total: _Optional[int] = ..., states_updated: bool = ..., n_edge_distance: _Optional[int] = ...) -> None: ... class PbRuntime(_message.Message): - __slots__ = ["platform", "script", "init_script", "env_script", "err_script", "exit_script", "pre_script", "post_script", "work_sub_dir", "execution_polling_intervals", "execution_retry_delays", "execution_time_limit", "submission_polling_intervals", "submission_retry_delays", "directives", "environment", "outputs", "completion"] + __slots__ = ("platform", "script", "init_script", "env_script", "err_script", "exit_script", "pre_script", "post_script", "work_sub_dir", "execution_polling_intervals", "execution_retry_delays", "execution_time_limit", "submission_polling_intervals", "submission_retry_delays", "directives", "environment", "outputs", "completion", "run_mode") PLATFORM_FIELD_NUMBER: _ClassVar[int] SCRIPT_FIELD_NUMBER: _ClassVar[int] INIT_SCRIPT_FIELD_NUMBER: _ClassVar[int] @@ -151,6 +151,7 @@ class PbRuntime(_message.Message): ENVIRONMENT_FIELD_NUMBER: _ClassVar[int] OUTPUTS_FIELD_NUMBER: _ClassVar[int] COMPLETION_FIELD_NUMBER: _ClassVar[int] + RUN_MODE_FIELD_NUMBER: _ClassVar[int] platform: str script: str init_script: str @@ -169,10 +170,11 @@ class PbRuntime(_message.Message): environment: str outputs: str completion: str - def __init__(self, platform: _Optional[str] = ..., script: _Optional[str] = ..., init_script: _Optional[str] = ..., env_script: _Optional[str] = ..., err_script: _Optional[str] = ..., exit_script: _Optional[str] = ..., pre_script: _Optional[str] = ..., post_script: _Optional[str] = ..., work_sub_dir: _Optional[str] = ..., execution_polling_intervals: _Optional[str] = ..., execution_retry_delays: _Optional[str] = ..., execution_time_limit: _Optional[str] = ..., submission_polling_intervals: _Optional[str] = ..., submission_retry_delays: _Optional[str] = ..., directives: _Optional[str] = ..., environment: _Optional[str] = ..., outputs: _Optional[str] = ..., completion: _Optional[str] = ...) -> None: ... + run_mode: str + def __init__(self, platform: _Optional[str] = ..., script: _Optional[str] = ..., init_script: _Optional[str] = ..., env_script: _Optional[str] = ..., err_script: _Optional[str] = ..., exit_script: _Optional[str] = ..., pre_script: _Optional[str] = ..., post_script: _Optional[str] = ..., work_sub_dir: _Optional[str] = ..., execution_polling_intervals: _Optional[str] = ..., execution_retry_delays: _Optional[str] = ..., execution_time_limit: _Optional[str] = ..., submission_polling_intervals: _Optional[str] = ..., submission_retry_delays: _Optional[str] = ..., directives: _Optional[str] = ..., environment: _Optional[str] = ..., outputs: _Optional[str] = ..., completion: _Optional[str] = ..., run_mode: _Optional[str] = ...) -> None: ... class PbJob(_message.Message): - __slots__ = ["stamp", "id", "submit_num", "state", "task_proxy", "submitted_time", "started_time", "finished_time", "job_id", "job_runner_name", "execution_time_limit", "platform", "job_log_dir", "name", "cycle_point", "messages", "runtime"] + __slots__ = ("stamp", "id", "submit_num", "state", "task_proxy", "submitted_time", "started_time", "finished_time", "job_id", "job_runner_name", "execution_time_limit", "platform", "job_log_dir", "name", "cycle_point", "messages", "runtime") STAMP_FIELD_NUMBER: _ClassVar[int] ID_FIELD_NUMBER: _ClassVar[int] SUBMIT_NUM_FIELD_NUMBER: _ClassVar[int] @@ -210,7 +212,7 @@ class PbJob(_message.Message): def __init__(self, stamp: _Optional[str] = ..., id: _Optional[str] = ..., submit_num: _Optional[int] = ..., state: _Optional[str] = ..., task_proxy: _Optional[str] = ..., submitted_time: _Optional[str] = ..., started_time: _Optional[str] = ..., finished_time: _Optional[str] = ..., job_id: _Optional[str] = ..., job_runner_name: _Optional[str] = ..., execution_time_limit: _Optional[float] = ..., platform: _Optional[str] = ..., job_log_dir: _Optional[str] = ..., name: _Optional[str] = ..., cycle_point: _Optional[str] = ..., messages: _Optional[_Iterable[str]] = ..., runtime: _Optional[_Union[PbRuntime, _Mapping]] = ...) -> None: ... class PbTask(_message.Message): - __slots__ = ["stamp", "id", "name", "meta", "mean_elapsed_time", "depth", "proxies", "namespace", "parents", "first_parent", "runtime"] + __slots__ = ("stamp", "id", "name", "meta", "mean_elapsed_time", "depth", "proxies", "namespace", "parents", "first_parent", "runtime") STAMP_FIELD_NUMBER: _ClassVar[int] ID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] @@ -236,7 +238,7 @@ class PbTask(_message.Message): def __init__(self, stamp: _Optional[str] = ..., id: _Optional[str] = ..., name: _Optional[str] = ..., meta: _Optional[_Union[PbMeta, _Mapping]] = ..., mean_elapsed_time: _Optional[float] = ..., depth: _Optional[int] = ..., proxies: _Optional[_Iterable[str]] = ..., namespace: _Optional[_Iterable[str]] = ..., parents: _Optional[_Iterable[str]] = ..., first_parent: _Optional[str] = ..., runtime: _Optional[_Union[PbRuntime, _Mapping]] = ...) -> None: ... class PbPollTask(_message.Message): - __slots__ = ["local_proxy", "workflow", "remote_proxy", "req_state", "graph_string"] + __slots__ = ("local_proxy", "workflow", "remote_proxy", "req_state", "graph_string") LOCAL_PROXY_FIELD_NUMBER: _ClassVar[int] WORKFLOW_FIELD_NUMBER: _ClassVar[int] REMOTE_PROXY_FIELD_NUMBER: _ClassVar[int] @@ -250,7 +252,7 @@ class PbPollTask(_message.Message): def __init__(self, local_proxy: _Optional[str] = ..., workflow: _Optional[str] = ..., remote_proxy: _Optional[str] = ..., req_state: _Optional[str] = ..., graph_string: _Optional[str] = ...) -> None: ... class PbCondition(_message.Message): - __slots__ = ["task_proxy", "expr_alias", "req_state", "satisfied", "message"] + __slots__ = ("task_proxy", "expr_alias", "req_state", "satisfied", "message") TASK_PROXY_FIELD_NUMBER: _ClassVar[int] EXPR_ALIAS_FIELD_NUMBER: _ClassVar[int] REQ_STATE_FIELD_NUMBER: _ClassVar[int] @@ -264,7 +266,7 @@ class PbCondition(_message.Message): def __init__(self, task_proxy: _Optional[str] = ..., expr_alias: _Optional[str] = ..., req_state: _Optional[str] = ..., satisfied: bool = ..., message: _Optional[str] = ...) -> None: ... class PbPrerequisite(_message.Message): - __slots__ = ["expression", "conditions", "cycle_points", "satisfied"] + __slots__ = ("expression", "conditions", "cycle_points", "satisfied") EXPRESSION_FIELD_NUMBER: _ClassVar[int] CONDITIONS_FIELD_NUMBER: _ClassVar[int] CYCLE_POINTS_FIELD_NUMBER: _ClassVar[int] @@ -276,7 +278,7 @@ class PbPrerequisite(_message.Message): def __init__(self, expression: _Optional[str] = ..., conditions: _Optional[_Iterable[_Union[PbCondition, _Mapping]]] = ..., cycle_points: _Optional[_Iterable[str]] = ..., satisfied: bool = ...) -> None: ... class PbOutput(_message.Message): - __slots__ = ["label", "message", "satisfied", "time"] + __slots__ = ("label", "message", "satisfied", "time") LABEL_FIELD_NUMBER: _ClassVar[int] MESSAGE_FIELD_NUMBER: _ClassVar[int] SATISFIED_FIELD_NUMBER: _ClassVar[int] @@ -288,7 +290,7 @@ class PbOutput(_message.Message): def __init__(self, label: _Optional[str] = ..., message: _Optional[str] = ..., satisfied: bool = ..., time: _Optional[float] = ...) -> None: ... class PbTrigger(_message.Message): - __slots__ = ["id", "label", "message", "satisfied", "time"] + __slots__ = ("id", "label", "message", "satisfied", "time") ID_FIELD_NUMBER: _ClassVar[int] LABEL_FIELD_NUMBER: _ClassVar[int] MESSAGE_FIELD_NUMBER: _ClassVar[int] @@ -302,23 +304,23 @@ class PbTrigger(_message.Message): def __init__(self, id: _Optional[str] = ..., label: _Optional[str] = ..., message: _Optional[str] = ..., satisfied: bool = ..., time: _Optional[float] = ...) -> None: ... class PbTaskProxy(_message.Message): - __slots__ = ["stamp", "id", "task", "state", "cycle_point", "depth", "job_submits", "outputs", "namespace", "prerequisites", "jobs", "first_parent", "name", "is_held", "edges", "ancestors", "flow_nums", "external_triggers", "xtriggers", "is_queued", "is_runahead", "flow_wait", "runtime", "graph_depth"] + __slots__ = ("stamp", "id", "task", "state", "cycle_point", "depth", "job_submits", "outputs", "namespace", "prerequisites", "jobs", "first_parent", "name", "is_held", "edges", "ancestors", "flow_nums", "external_triggers", "xtriggers", "is_queued", "is_runahead", "flow_wait", "runtime", "graph_depth") class OutputsEntry(_message.Message): - __slots__ = ["key", "value"] + __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str value: PbOutput def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[PbOutput, _Mapping]] = ...) -> None: ... class ExternalTriggersEntry(_message.Message): - __slots__ = ["key", "value"] + __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str value: PbTrigger def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[PbTrigger, _Mapping]] = ...) -> None: ... class XtriggersEntry(_message.Message): - __slots__ = ["key", "value"] + __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str @@ -375,7 +377,7 @@ class PbTaskProxy(_message.Message): def __init__(self, stamp: _Optional[str] = ..., id: _Optional[str] = ..., task: _Optional[str] = ..., state: _Optional[str] = ..., cycle_point: _Optional[str] = ..., depth: _Optional[int] = ..., job_submits: _Optional[int] = ..., outputs: _Optional[_Mapping[str, PbOutput]] = ..., namespace: _Optional[_Iterable[str]] = ..., prerequisites: _Optional[_Iterable[_Union[PbPrerequisite, _Mapping]]] = ..., jobs: _Optional[_Iterable[str]] = ..., first_parent: _Optional[str] = ..., name: _Optional[str] = ..., is_held: bool = ..., edges: _Optional[_Iterable[str]] = ..., ancestors: _Optional[_Iterable[str]] = ..., flow_nums: _Optional[str] = ..., external_triggers: _Optional[_Mapping[str, PbTrigger]] = ..., xtriggers: _Optional[_Mapping[str, PbTrigger]] = ..., is_queued: bool = ..., is_runahead: bool = ..., flow_wait: bool = ..., runtime: _Optional[_Union[PbRuntime, _Mapping]] = ..., graph_depth: _Optional[int] = ...) -> None: ... class PbFamily(_message.Message): - __slots__ = ["stamp", "id", "name", "meta", "depth", "proxies", "parents", "child_tasks", "child_families", "first_parent", "runtime"] + __slots__ = ("stamp", "id", "name", "meta", "depth", "proxies", "parents", "child_tasks", "child_families", "first_parent", "runtime") STAMP_FIELD_NUMBER: _ClassVar[int] ID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] @@ -401,9 +403,9 @@ class PbFamily(_message.Message): def __init__(self, stamp: _Optional[str] = ..., id: _Optional[str] = ..., name: _Optional[str] = ..., meta: _Optional[_Union[PbMeta, _Mapping]] = ..., depth: _Optional[int] = ..., proxies: _Optional[_Iterable[str]] = ..., parents: _Optional[_Iterable[str]] = ..., child_tasks: _Optional[_Iterable[str]] = ..., child_families: _Optional[_Iterable[str]] = ..., first_parent: _Optional[str] = ..., runtime: _Optional[_Union[PbRuntime, _Mapping]] = ...) -> None: ... class PbFamilyProxy(_message.Message): - __slots__ = ["stamp", "id", "cycle_point", "name", "family", "state", "depth", "first_parent", "child_tasks", "child_families", "is_held", "ancestors", "states", "state_totals", "is_held_total", "is_queued", "is_queued_total", "is_runahead", "is_runahead_total", "runtime", "graph_depth"] + __slots__ = ("stamp", "id", "cycle_point", "name", "family", "state", "depth", "first_parent", "child_tasks", "child_families", "is_held", "ancestors", "states", "state_totals", "is_held_total", "is_queued", "is_queued_total", "is_runahead", "is_runahead_total", "runtime", "graph_depth") class StateTotalsEntry(_message.Message): - __slots__ = ["key", "value"] + __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str @@ -454,7 +456,7 @@ class PbFamilyProxy(_message.Message): def __init__(self, stamp: _Optional[str] = ..., id: _Optional[str] = ..., cycle_point: _Optional[str] = ..., name: _Optional[str] = ..., family: _Optional[str] = ..., state: _Optional[str] = ..., depth: _Optional[int] = ..., first_parent: _Optional[str] = ..., child_tasks: _Optional[_Iterable[str]] = ..., child_families: _Optional[_Iterable[str]] = ..., is_held: bool = ..., ancestors: _Optional[_Iterable[str]] = ..., states: _Optional[_Iterable[str]] = ..., state_totals: _Optional[_Mapping[str, int]] = ..., is_held_total: _Optional[int] = ..., is_queued: bool = ..., is_queued_total: _Optional[int] = ..., is_runahead: bool = ..., is_runahead_total: _Optional[int] = ..., runtime: _Optional[_Union[PbRuntime, _Mapping]] = ..., graph_depth: _Optional[int] = ...) -> None: ... class PbEdge(_message.Message): - __slots__ = ["stamp", "id", "source", "target", "suicide", "cond"] + __slots__ = ("stamp", "id", "source", "target", "suicide", "cond") STAMP_FIELD_NUMBER: _ClassVar[int] ID_FIELD_NUMBER: _ClassVar[int] SOURCE_FIELD_NUMBER: _ClassVar[int] @@ -470,7 +472,7 @@ class PbEdge(_message.Message): def __init__(self, stamp: _Optional[str] = ..., id: _Optional[str] = ..., source: _Optional[str] = ..., target: _Optional[str] = ..., suicide: bool = ..., cond: bool = ...) -> None: ... class PbEdges(_message.Message): - __slots__ = ["id", "edges", "workflow_polling_tasks", "leaves", "feet"] + __slots__ = ("id", "edges", "workflow_polling_tasks", "leaves", "feet") ID_FIELD_NUMBER: _ClassVar[int] EDGES_FIELD_NUMBER: _ClassVar[int] WORKFLOW_POLLING_TASKS_FIELD_NUMBER: _ClassVar[int] @@ -484,7 +486,7 @@ class PbEdges(_message.Message): def __init__(self, id: _Optional[str] = ..., edges: _Optional[_Iterable[str]] = ..., workflow_polling_tasks: _Optional[_Iterable[_Union[PbPollTask, _Mapping]]] = ..., leaves: _Optional[_Iterable[str]] = ..., feet: _Optional[_Iterable[str]] = ...) -> None: ... class PbEntireWorkflow(_message.Message): - __slots__ = ["workflow", "tasks", "task_proxies", "jobs", "families", "family_proxies", "edges"] + __slots__ = ("workflow", "tasks", "task_proxies", "jobs", "families", "family_proxies", "edges") WORKFLOW_FIELD_NUMBER: _ClassVar[int] TASKS_FIELD_NUMBER: _ClassVar[int] TASK_PROXIES_FIELD_NUMBER: _ClassVar[int] @@ -502,7 +504,7 @@ class PbEntireWorkflow(_message.Message): def __init__(self, workflow: _Optional[_Union[PbWorkflow, _Mapping]] = ..., tasks: _Optional[_Iterable[_Union[PbTask, _Mapping]]] = ..., task_proxies: _Optional[_Iterable[_Union[PbTaskProxy, _Mapping]]] = ..., jobs: _Optional[_Iterable[_Union[PbJob, _Mapping]]] = ..., families: _Optional[_Iterable[_Union[PbFamily, _Mapping]]] = ..., family_proxies: _Optional[_Iterable[_Union[PbFamilyProxy, _Mapping]]] = ..., edges: _Optional[_Iterable[_Union[PbEdge, _Mapping]]] = ...) -> None: ... class EDeltas(_message.Message): - __slots__ = ["time", "checksum", "added", "updated", "pruned", "reloaded"] + __slots__ = ("time", "checksum", "added", "updated", "pruned", "reloaded") TIME_FIELD_NUMBER: _ClassVar[int] CHECKSUM_FIELD_NUMBER: _ClassVar[int] ADDED_FIELD_NUMBER: _ClassVar[int] @@ -518,7 +520,7 @@ class EDeltas(_message.Message): def __init__(self, time: _Optional[float] = ..., checksum: _Optional[int] = ..., added: _Optional[_Iterable[_Union[PbEdge, _Mapping]]] = ..., updated: _Optional[_Iterable[_Union[PbEdge, _Mapping]]] = ..., pruned: _Optional[_Iterable[str]] = ..., reloaded: bool = ...) -> None: ... class FDeltas(_message.Message): - __slots__ = ["time", "checksum", "added", "updated", "pruned", "reloaded"] + __slots__ = ("time", "checksum", "added", "updated", "pruned", "reloaded") TIME_FIELD_NUMBER: _ClassVar[int] CHECKSUM_FIELD_NUMBER: _ClassVar[int] ADDED_FIELD_NUMBER: _ClassVar[int] @@ -534,7 +536,7 @@ class FDeltas(_message.Message): def __init__(self, time: _Optional[float] = ..., checksum: _Optional[int] = ..., added: _Optional[_Iterable[_Union[PbFamily, _Mapping]]] = ..., updated: _Optional[_Iterable[_Union[PbFamily, _Mapping]]] = ..., pruned: _Optional[_Iterable[str]] = ..., reloaded: bool = ...) -> None: ... class FPDeltas(_message.Message): - __slots__ = ["time", "checksum", "added", "updated", "pruned", "reloaded"] + __slots__ = ("time", "checksum", "added", "updated", "pruned", "reloaded") TIME_FIELD_NUMBER: _ClassVar[int] CHECKSUM_FIELD_NUMBER: _ClassVar[int] ADDED_FIELD_NUMBER: _ClassVar[int] @@ -550,7 +552,7 @@ class FPDeltas(_message.Message): def __init__(self, time: _Optional[float] = ..., checksum: _Optional[int] = ..., added: _Optional[_Iterable[_Union[PbFamilyProxy, _Mapping]]] = ..., updated: _Optional[_Iterable[_Union[PbFamilyProxy, _Mapping]]] = ..., pruned: _Optional[_Iterable[str]] = ..., reloaded: bool = ...) -> None: ... class JDeltas(_message.Message): - __slots__ = ["time", "checksum", "added", "updated", "pruned", "reloaded"] + __slots__ = ("time", "checksum", "added", "updated", "pruned", "reloaded") TIME_FIELD_NUMBER: _ClassVar[int] CHECKSUM_FIELD_NUMBER: _ClassVar[int] ADDED_FIELD_NUMBER: _ClassVar[int] @@ -566,7 +568,7 @@ class JDeltas(_message.Message): def __init__(self, time: _Optional[float] = ..., checksum: _Optional[int] = ..., added: _Optional[_Iterable[_Union[PbJob, _Mapping]]] = ..., updated: _Optional[_Iterable[_Union[PbJob, _Mapping]]] = ..., pruned: _Optional[_Iterable[str]] = ..., reloaded: bool = ...) -> None: ... class TDeltas(_message.Message): - __slots__ = ["time", "checksum", "added", "updated", "pruned", "reloaded"] + __slots__ = ("time", "checksum", "added", "updated", "pruned", "reloaded") TIME_FIELD_NUMBER: _ClassVar[int] CHECKSUM_FIELD_NUMBER: _ClassVar[int] ADDED_FIELD_NUMBER: _ClassVar[int] @@ -582,7 +584,7 @@ class TDeltas(_message.Message): def __init__(self, time: _Optional[float] = ..., checksum: _Optional[int] = ..., added: _Optional[_Iterable[_Union[PbTask, _Mapping]]] = ..., updated: _Optional[_Iterable[_Union[PbTask, _Mapping]]] = ..., pruned: _Optional[_Iterable[str]] = ..., reloaded: bool = ...) -> None: ... class TPDeltas(_message.Message): - __slots__ = ["time", "checksum", "added", "updated", "pruned", "reloaded"] + __slots__ = ("time", "checksum", "added", "updated", "pruned", "reloaded") TIME_FIELD_NUMBER: _ClassVar[int] CHECKSUM_FIELD_NUMBER: _ClassVar[int] ADDED_FIELD_NUMBER: _ClassVar[int] @@ -598,7 +600,7 @@ class TPDeltas(_message.Message): def __init__(self, time: _Optional[float] = ..., checksum: _Optional[int] = ..., added: _Optional[_Iterable[_Union[PbTaskProxy, _Mapping]]] = ..., updated: _Optional[_Iterable[_Union[PbTaskProxy, _Mapping]]] = ..., pruned: _Optional[_Iterable[str]] = ..., reloaded: bool = ...) -> None: ... class WDeltas(_message.Message): - __slots__ = ["time", "added", "updated", "reloaded", "pruned"] + __slots__ = ("time", "added", "updated", "reloaded", "pruned") TIME_FIELD_NUMBER: _ClassVar[int] ADDED_FIELD_NUMBER: _ClassVar[int] UPDATED_FIELD_NUMBER: _ClassVar[int] @@ -612,7 +614,7 @@ class WDeltas(_message.Message): def __init__(self, time: _Optional[float] = ..., added: _Optional[_Union[PbWorkflow, _Mapping]] = ..., updated: _Optional[_Union[PbWorkflow, _Mapping]] = ..., reloaded: bool = ..., pruned: _Optional[str] = ...) -> None: ... class AllDeltas(_message.Message): - __slots__ = ["families", "family_proxies", "jobs", "tasks", "task_proxies", "edges", "workflow"] + __slots__ = ("families", "family_proxies", "jobs", "tasks", "task_proxies", "edges", "workflow") FAMILIES_FIELD_NUMBER: _ClassVar[int] FAMILY_PROXIES_FIELD_NUMBER: _ClassVar[int] JOBS_FIELD_NUMBER: _ClassVar[int] diff --git a/cylc/flow/data_store_mgr.py b/cylc/flow/data_store_mgr.py index 98119a9ec0d..47c2a4c9efa 100644 --- a/cylc/flow/data_store_mgr.py +++ b/cylc/flow/data_store_mgr.py @@ -84,6 +84,7 @@ pdeepcopy, poverride ) +from cylc.flow.run_modes import RunMode from cylc.flow.workflow_status import ( get_workflow_status, get_workflow_status_msg, @@ -259,6 +260,7 @@ def runtime_from_config(rtconfig): pre_script=rtconfig['pre-script'], post_script=rtconfig['post-script'], work_sub_dir=rtconfig['work sub-directory'], + run_mode=rtconfig['run mode'], execution_time_limit=str(rtconfig['execution time limit'] or ''), execution_polling_intervals=listjoin( rtconfig['execution polling intervals'] @@ -698,8 +700,7 @@ def generate_definition_elements(self): time_zone_info = TIME_ZONE_LOCAL_INFO for key, val in time_zone_info.items(): setbuff(workflow.time_zone_info, key, val) - - workflow.run_mode = config.run_mode() + workflow.run_mode = RunMode.get(config.options).value workflow.cycling_mode = config.cfg['scheduling']['cycling mode'] workflow.workflow_log_dir = self.schd.workflow_log_dir workflow.job_log_names.extend(list(JOB_LOG_OPTS.values())) diff --git a/cylc/flow/etc/syntax/cylc.lang b/cylc/flow/etc/syntax/cylc.lang index c3f43da2c95..6179a18f750 100644 --- a/cylc/flow/etc/syntax/cylc.lang +++ b/cylc/flow/etc/syntax/cylc.lang @@ -93,7 +93,6 @@ work sub-directory warning handlers verbose mode - user to title time limit buffer @@ -116,15 +115,17 @@ stall handlers speedup factor special tasks + skip simulation shutdown handlers + sequential xtriggers sequential script scheduling scheduler runtime runahead limit - run-dir + run mode retry handlers retrieve job logs retry delays retrieve job logs max size @@ -198,10 +199,12 @@ cycle point format custom handlers critical handlers + completion clock-trigger clock-expire batch system batch submit command template + alt-cylc-run-dir allow implicit tasks abort on workflow timeout abort on stall timeout diff --git a/cylc/flow/etc/syntax/cylc.xml b/cylc/flow/etc/syntax/cylc.xml index da11c1215e1..be74c2fa4ad 100644 --- a/cylc/flow/etc/syntax/cylc.xml +++ b/cylc/flow/etc/syntax/cylc.xml @@ -20,7 +20,6 @@ - @@ -43,15 +42,17 @@ + + - + @@ -125,10 +126,12 @@ + + diff --git a/cylc/flow/network/resolvers.py b/cylc/flow/network/resolvers.py index fc9b67eeef5..79b91f97ee7 100644 --- a/cylc/flow/network/resolvers.py +++ b/cylc/flow/network/resolvers.py @@ -49,10 +49,10 @@ from cylc.flow.id import Tokens from cylc.flow.network.schema import ( DEF_TYPES, - RUNTIME_FIELD_TO_CFG_MAP, NodesEdges, PROXY_NODES, SUB_RESOLVERS, + runtime_schema_to_cfg, sort_elements, ) @@ -791,10 +791,8 @@ def broadcast( # Convert schema field names to workflow config setting names if # applicable: for i, dict_ in enumerate(settings): - settings[i] = { - RUNTIME_FIELD_TO_CFG_MAP.get(key, key): value - for key, value in dict_.items() - } + settings[i] = runtime_schema_to_cfg(dict_) + if mode == 'put_broadcast': return self.schd.task_events_mgr.broadcast_mgr.put_broadcast( cycle_points, namespaces, settings) diff --git a/cylc/flow/network/schema.py b/cylc/flow/network/schema.py index a1b9fc1c50c..84e019b17e0 100644 --- a/cylc/flow/network/schema.py +++ b/cylc/flow/network/schema.py @@ -71,6 +71,8 @@ FLOW_NONE, ) from cylc.flow.id import Tokens +from cylc.flow.run_modes import ( + TASK_CONFIG_RUN_MODES, WORKFLOW_RUN_MODES, RunMode) from cylc.flow.task_outputs import SORT_ORDERS from cylc.flow.task_state import ( TASK_STATUS_DESC, @@ -89,6 +91,7 @@ if TYPE_CHECKING: + from enum import Enum from graphql import ResolveInfo from graphql.type.definition import ( GraphQLList, @@ -620,6 +623,21 @@ class Meta: string_extended = String() +# The run mode for the workflow. +WorkflowRunMode = graphene.Enum( + 'WorkflowRunMode', + [(m.capitalize(), m) for m in WORKFLOW_RUN_MODES], + description=lambda x: RunMode(x.value).describe() if x else None, +) + +# The run mode for the task. +TaskRunMode = graphene.Enum( + 'TaskRunMode', + [(m.capitalize(), m) for m in TASK_CONFIG_RUN_MODES], + description=lambda x: RunMode(x.value).describe() if x else None, +) + + class Workflow(ObjectType): class Meta: description = 'Global workflow info.' @@ -847,6 +865,7 @@ class Meta: directives = graphene.List(RuntimeSetting, resolver=resolve_json_dump) environment = graphene.List(RuntimeSetting, resolver=resolve_json_dump) outputs = graphene.List(RuntimeSetting, resolver=resolve_json_dump) + run_mode = TaskRunMode(default_value=TaskRunMode.Live.name) RUNTIME_FIELD_TO_CFG_MAP = { @@ -865,6 +884,20 @@ class Meta: """Map GQL Runtime fields' names to workflow config setting names.""" +def runtime_schema_to_cfg(runtime: dict) -> dict: + """Covert GQL Runtime field names to workflow config setting names and + perform any necessary processing on the values.""" + # We have to manually lowercase the run_mode field because we don't define + # a proper schema for BroadcastSetting (it's just GenericScalar) so + # Graphene has no way to know that it should be a TaskRunMode enum. + return { + RUNTIME_FIELD_TO_CFG_MAP.get(key, key): ( + value.lower() if key == 'run_mode' else value + ) + for key, value in runtime.items() + } + + class Job(ObjectType): class Meta: description = "Jobs." @@ -1527,9 +1560,9 @@ class RuntimeConfiguration(String): class BroadcastMode(graphene.Enum): - Set = 'put_broadcast' - Clear = 'clear_broadcast' - Expire = 'expire_broadcast' + Set = cast('Enum', 'put_broadcast') + Clear = cast('Enum', 'clear_broadcast') + Expire = cast('Enum', 'expire_broadcast') @property def description(self): @@ -1654,10 +1687,10 @@ class WorkflowStopMode(graphene.Enum): # * Graphene requires special enums. # * We only want to offer a subset of stop modes (REQUEST_* only). - Clean = StopMode.REQUEST_CLEAN.value # type: graphene.Enum - Kill = StopMode.REQUEST_KILL.value # type: graphene.Enum - Now = StopMode.REQUEST_NOW.value # type: graphene.Enum - NowNow = StopMode.REQUEST_NOW_NOW.value # type: graphene.Enum + Clean = cast('Enum', StopMode.REQUEST_CLEAN.value) + Kill = cast('Enum', StopMode.REQUEST_KILL.value) + Now = cast('Enum', StopMode.REQUEST_NOW.value) + NowNow = cast('Enum', StopMode.REQUEST_NOW_NOW.value) @property def description(self): @@ -1714,7 +1747,7 @@ class Arguments: mode = BroadcastMode( # use the enum name as the default value # https://github.com/graphql-python/graphql-core-legacy/issues/166 - default_value=BroadcastMode.Set.name, # type: ignore + default_value=BroadcastMode.Set.name, description='What type of broadcast is this?', required=True ) diff --git a/cylc/flow/platforms.py b/cylc/flow/platforms.py index 373b6058fdb..c75faa5283b 100644 --- a/cylc/flow/platforms.py +++ b/cylc/flow/platforms.py @@ -31,6 +31,7 @@ PlatformLookupError, CylcError, NoHostsError, NoPlatformsError) from cylc.flow.cfgspec.glbl_cfg import glbl_cfg from cylc.flow.hostuserutil import is_remote_host +from cylc.flow.run_modes import JOBLESS_MODES if TYPE_CHECKING: from cylc.flow.parsec.OrderedDict import OrderedDictWithDefaults @@ -266,7 +267,7 @@ def platform_from_name( return platform_data # If platform name in run mode and not otherwise defined: - if platform_name == 'SIMULATION': + if platform_name in JOBLESS_MODES: platform_data = deepcopy(platforms['localhost']) platform_data['name'] = 'localhost' return platform_data @@ -662,6 +663,7 @@ def get_install_target_to_platforms_map( else: install_target = get_install_target_from_platform(platform) ret.setdefault(install_target, []).append(platform) + return ret diff --git a/cylc/flow/prerequisite.py b/cylc/flow/prerequisite.py index d9889feaae1..8b5c2cd7941 100644 --- a/cylc/flow/prerequisite.py +++ b/cylc/flow/prerequisite.py @@ -39,6 +39,7 @@ from cylc.flow.data_messages_pb2 import PbCondition, PbPrerequisite from cylc.flow.exceptions import TriggerExpressionError from cylc.flow.id import quick_relative_id +from cylc.flow.run_modes import RunMode if TYPE_CHECKING: @@ -71,6 +72,7 @@ def coerce(tuple_: AnyPrereqTuple) -> 'PrereqTuple': SatisfiedState = Literal[ 'satisfied naturally', 'satisfied from database', + 'satisfied by skip mode', 'force satisfied', False ] @@ -255,14 +257,18 @@ def _eval_satisfied(self) -> bool: return res def satisfy_me( - self, outputs: Iterable['Tokens'], forced: bool = False + self, + outputs: Iterable['Tokens'], + mode: Optional[RunMode] = None, + forced: bool = False, ) -> 'Set[Tokens]': - """Set the given outputs as satisfied. + """Set the given outputs as satisfied (if they are not already). Return outputs that match. Args: outputs: List of outputs to satisfy. + mode: Task run mode. forced: If True, records that this should not be undone by `cylc remove`. """ @@ -274,9 +280,11 @@ def satisfy_me( if output_tuple not in self._satisfied: continue valid.add(output) - if self._satisfied[output_tuple] != 'satisfied naturally': + if not self._satisfied[output_tuple]: self[output_tuple] = ( - 'force satisfied' if forced else 'satisfied naturally' + 'force satisfied' if forced + else 'satisfied by skip mode' if mode == RunMode.SKIP + else 'satisfied naturally' ) return valid @@ -320,8 +328,8 @@ def api_dump(self) -> Optional[PbPrerequisite]: def set_satisfied(self) -> None: """Force this prerequisite into the satisfied state. - State can be overridden by calling `self.satisfy_me`. - + Sets all of the outputs in this prerequisite to satisfied if not + already. """ for task_output in self._satisfied: if not self._satisfied[task_output]: diff --git a/cylc/flow/run_modes/__init__.py b/cylc/flow/run_modes/__init__.py new file mode 100644 index 00000000000..91d65dba4b1 --- /dev/null +++ b/cylc/flow/run_modes/__init__.py @@ -0,0 +1,136 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from enum import Enum +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple + +if TYPE_CHECKING: + from optparse import Values + from cylc.flow.task_job_mgr import TaskJobManager + from cylc.flow.task_proxy import TaskProxy + + # The interface for submitting jobs + SubmissionInterface = Callable[ + [ # Args: + # the task job manager instance + 'TaskJobManager', + # the task to submit + 'TaskProxy', + # the task's runtime config (with broadcasts applied) + Dict[str, Any], + # the workflow ID + str, + # the current time as (float_unix_time, str_ISO8601) + Tuple[float, str] + ], + # Return False if the job requires live-mode submission + # (dummy mode does this), else return True. + bool + ] + + +class RunMode(Enum): + """The possible run modes of a task/workflow.""" + + LIVE = 'live' + """Tasks will submit their configured jobs.""" + + SIMULATION = 'simulation' + """Simulates job submission with configurable exection time + and succeeded/failed outcomes (but does not submit real jobs).""" + + DUMMY = 'dummy' + """Submits real jobs with empty scripts.""" + + SKIP = 'skip' + """Skips job submission; sets required outputs (by default) or + configured outputs.""" + + def describe(self): + """Return user friendly description of run mode. + + For use by configuration spec documenter. + """ + if self == self.LIVE: + return "Task will submit their configured jobs." + if self == self.SKIP: + return ( + "Skips job submission; sets required outputs" + " (by default) or configured outputs.") + if self == self.DUMMY: + return "Submits real jobs with empty scripts." + + # self == self.SIMULATION: + return ( + "Simulates job submission with configurable" + " exection time and succeeded/failed outcomes" + " (but does not submit real jobs).") + + @staticmethod + def get(options: 'Values') -> "RunMode": + """Return the workflow run mode from the options.""" + run_mode = getattr(options, 'run_mode', None) + if run_mode: + return RunMode(run_mode) + return RunMode.LIVE + + def get_submit_method(self) -> 'Optional[SubmissionInterface]': + """Return the job submission method for this run mode. + + This returns None for live-mode jobs as these use a + different code pathway for job submission. + """ + submit_task_job: 'Optional[SubmissionInterface]' = None + if self == RunMode.DUMMY: + from cylc.flow.run_modes.dummy import submit_task_job + elif self == RunMode.SIMULATION: + from cylc.flow.run_modes.simulation import submit_task_job + elif self == RunMode.SKIP: + from cylc.flow.run_modes.skip import submit_task_job + return submit_task_job + + +def disable_task_event_handlers(itask: 'TaskProxy'): + """Should we disable event handlers for this task? + + No event handlers in simulation mode, or in skip mode + if we don't deliberately enable them: + """ + mode = itask.run_mode + return ( + mode == RunMode.SIMULATION + or ( + mode == RunMode.SKIP + and itask.platform.get( + 'disable task event handlers', False) + ) + ) + + +# Modes available for running a whole workflow: +WORKFLOW_RUN_MODES = frozenset(i.value for i in { + RunMode.LIVE, RunMode.DUMMY, RunMode.SIMULATION}) + +# Modes which can be set in task config: +TASK_CONFIG_RUN_MODES = frozenset( + i.value for i in (RunMode.LIVE, RunMode.SKIP)) +# And those only available to the workflow: +WORKFLOW_ONLY_MODES = frozenset( + i.value for i in RunMode) - TASK_CONFIG_RUN_MODES + +# Modes which completely ignore the standard submission path: +JOBLESS_MODES = frozenset(i.value for i in { + RunMode.SKIP, RunMode.SIMULATION}) diff --git a/cylc/flow/run_modes/dummy.py b/cylc/flow/run_modes/dummy.py new file mode 100644 index 00000000000..26d887d87dc --- /dev/null +++ b/cylc/flow/run_modes/dummy.py @@ -0,0 +1,125 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +"""Utilities supporting dummy mode. + +Dummy mode shares settings with simulation mode. +""" + +from typing import TYPE_CHECKING, Any, Dict, Tuple + +from cylc.flow.run_modes.simulation import ( + ModeSettings, + disable_platforms, + get_simulated_run_len, + parse_fail_cycle_points +) +from cylc.flow.run_modes import RunMode +from cylc.flow.platforms import get_platform + + +if TYPE_CHECKING: + from cylc.flow.task_job_mgr import TaskJobManager + from cylc.flow.task_proxy import TaskProxy + from typing_extensions import Literal + + +CLEAR_THESE_SCRIPTS = [ + 'init-script', + 'env-script', + 'pre-script', + 'post-script', + 'err-script', + 'exit-script', +] + + +def submit_task_job( + task_job_mgr: 'TaskJobManager', + itask: 'TaskProxy', + rtconfig: Dict[str, Any], + workflow: str, + now: Tuple[float, str] +) -> 'Literal[False]': + """Submit a task in dummy mode. + + Returns: + False indicating that TaskJobManager needs to continue running the + live mode path. + """ + configure_dummy_mode( + rtconfig, itask.tdef.rtconfig['simulation']['fail cycle points']) + + itask.summary['started_time'] = now[0] + task_job_mgr._set_retry_timers(itask, rtconfig) + + itask.mode_settings = ModeSettings( + itask, + task_job_mgr.workflow_db_mgr, + rtconfig + ) + + itask.platform = get_platform() + itask.platform['name'] = RunMode.DUMMY.value + itask.summary['job_runner_name'] = RunMode.DUMMY.value + itask.summary[task_job_mgr.KEY_EXECUTE_TIME_LIMIT] = ( + itask.mode_settings.simulated_run_length) + itask.jobs.append( + task_job_mgr.get_simulation_job_conf(itask, workflow)) + task_job_mgr.workflow_db_mgr.put_insert_task_jobs( + itask, { + 'time_submit': now[1], + 'try_num': itask.get_try_num(), + } + ) + + return False + + +def configure_dummy_mode(rtc: Dict[str, Any], fallback: str) -> None: + """Adjust task defs for dummy mode. + """ + rtc['submission retry delays'] = [1] + # Generate dummy scripting. + + for script in CLEAR_THESE_SCRIPTS: + rtc[script] = '' + + rtc['script'] = build_dummy_script( + rtc, get_simulated_run_len(rtc)) + disable_platforms(rtc) + # Disable environment, in case it depends on env-script. + rtc['environment'] = {} + rtc["simulation"][ + "fail cycle points" + ] = parse_fail_cycle_points( + rtc["simulation"]["fail cycle points"], fallback + ) + + +def build_dummy_script(rtc: Dict[str, Any], sleep_sec: int) -> str: + """Create fake scripting for dummy mode script. + """ + script = "sleep %d" % sleep_sec + # Dummy message outputs. + for msg in rtc['outputs'].values(): + script += "\ncylc message '%s'" % msg + if rtc['simulation']['fail try 1 only']: + arg1 = "true" + else: + arg1 = "false" + arg2 = " ".join(rtc['simulation']['fail cycle points']) + script += "\ncylc__job__dummy_result %s %s || exit 1" % (arg1, arg2) + return script diff --git a/cylc/flow/simulation.py b/cylc/flow/run_modes/simulation.py similarity index 56% rename from cylc/flow/simulation.py rename to cylc/flow/run_modes/simulation.py index 8ec4d279cb9..900a2c1fc4f 100644 --- a/cylc/flow/simulation.py +++ b/cylc/flow/run_modes/simulation.py @@ -13,40 +13,105 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -"""Utilities supporting simulation and skip modes +"""Utilities supporting simulation mode """ from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union +from logging import INFO +from typing import ( + TYPE_CHECKING, Any, Dict, List, Tuple, Union) from time import time from metomi.isodatetime.parsers import DurationParser from cylc.flow import LOG +from cylc.flow.cycling import PointBase from cylc.flow.cycling.loader import get_point from cylc.flow.exceptions import PointParsingError from cylc.flow.platforms import FORBIDDEN_WITH_PLATFORM +from cylc.flow.task_outputs import TASK_OUTPUT_SUBMITTED from cylc.flow.task_state import ( TASK_STATUS_RUNNING, TASK_STATUS_FAILED, TASK_STATUS_SUCCEEDED, ) from cylc.flow.wallclock import get_unix_time_from_time_string -from cylc.flow.workflow_status import RunMode +from cylc.flow.run_modes import RunMode if TYPE_CHECKING: from cylc.flow.task_events_mgr import TaskEventsManager + from cylc.flow.task_job_mgr import TaskJobManager from cylc.flow.task_proxy import TaskProxy from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager - from cylc.flow.cycling import PointBase + from typing_extensions import Literal + + +def submit_task_job( + task_job_mgr: 'TaskJobManager', + itask: 'TaskProxy', + rtconfig: Dict[str, Any], + workflow: str, + now: Tuple[float, str] +) -> 'Literal[True]': + """Submit a task in simulation mode. + + Returns: + True - indicating that TaskJobManager need take no further action. + """ + configure_sim_mode( + rtconfig, + itask.tdef.rtconfig['simulation']['fail cycle points']) + itask.summary['started_time'] = now[0] + task_job_mgr._set_retry_timers(itask, rtconfig) + itask.mode_settings = ModeSettings( + itask, + task_job_mgr.workflow_db_mgr, + rtconfig + ) + itask.waiting_on_job_prep = False + itask.submit_num += 1 + + itask.platform = { + 'name': RunMode.SIMULATION.value, + 'install target': 'localhost', + 'hosts': ['localhost'], + 'submission retry delays': [], + 'execution retry delays': [] + } + itask.summary['job_runner_name'] = RunMode.SIMULATION.value + itask.summary[task_job_mgr.KEY_EXECUTE_TIME_LIMIT] = ( + itask.mode_settings.simulated_run_length + ) + itask.jobs.append( + task_job_mgr.get_simulation_job_conf(itask, workflow) + ) + task_job_mgr.task_events_mgr.process_message( + itask, INFO, TASK_OUTPUT_SUBMITTED, + ) + task_job_mgr.workflow_db_mgr.put_insert_task_jobs( + itask, { + 'time_submit': now[1], + 'time_run': now[1], + 'try_num': itask.get_try_num(), + 'flow_nums': str(list(itask.flow_nums)), + 'is_manual_submit': itask.is_manual_submit, + 'job_runner_name': RunMode.SIMULATION.value, + 'platform_name': RunMode.SIMULATION.value, + 'submit_status': 0 # Submission has succeeded + } + ) + itask.state.status = TASK_STATUS_RUNNING + return True @dataclass class ModeSettings: """A store of state for simulation modes. - Used instead of modifying the runtime config. + Used instead of modifying the runtime config. We want to leave the + config unchanged so that clearing a broadcast change of run mode + clears the run mode settings. Args: itask: @@ -79,20 +144,18 @@ def __init__( db_mgr: 'WorkflowDatabaseManager', rtconfig: Dict[str, Any] ): - # itask.summary['started_time'] and mode_settings.timeout need # repopulating from the DB on workflow restart: started_time = itask.summary['started_time'] try_num = None if started_time is None: - # Get DB info + # This is a restart - Get DB info db_info = db_mgr.pri_dao.select_task_job( itask.tokens['cycle'], itask.tokens['task'], itask.tokens['job'], ) - # Get the started time: if db_info['time_submit']: started_time = get_unix_time_from_time_string( db_info["time_submit"]) @@ -100,28 +163,20 @@ def __init__( else: started_time = time() - # Get the try number: try_num = db_info["try_num"] # Parse fail cycle points: - if rtconfig != itask.tdef.rtconfig: - try: - rtconfig["simulation"][ - "fail cycle points" - ] = parse_fail_cycle_points( - rtconfig["simulation"]["fail cycle points"] - ) - except PointParsingError as exc: - # Broadcast Fail CP didn't parse - LOG.warning( - 'Broadcast fail cycle point was invalid:\n' - f' {exc.args[0]}' - ) - rtconfig['simulation'][ - 'fail cycle points' - ] = itask.tdef.rtconfig['simulation']['fail cycle points'] + if not rtconfig: + rtconfig = itask.tdef.rtconfig + if rtconfig and rtconfig != itask.tdef.rtconfig: + rtconfig["simulation"][ + "fail cycle points" + ] = parse_fail_cycle_points( + rtconfig["simulation"]["fail cycle points"], + itask.tdef.rtconfig['simulation']['fail cycle points'] + ) - # Calculate simulation info: + # Calculate simulation outcome and run-time: self.simulated_run_length = ( get_simulated_run_len(rtconfig)) self.sim_task_fails = sim_task_failed( @@ -132,37 +187,47 @@ def __init__( self.timeout = started_time + self.simulated_run_length -def configure_sim_modes(taskdefs, sim_mode): - """Adjust task defs for simulation and dummy mode. - +def configure_sim_mode(rtc, fallback, warnonly: bool = True): + """Adjust task defs for simulation mode. + + Example: + >>> this = configure_sim_mode + >>> rtc = { + ... 'submission retry delays': [42, 24, 23], + ... 'environment': {'DoNot': '"WantThis"'}, + ... 'simulation': {'fail cycle points': ['all']} + ... } + >>> this(rtc, [53]) + >>> rtc['submission retry delays'] + [1] + >>> rtc['environment'] + {} + >>> rtc['simulation'] + {'fail cycle points': None} + >>> rtc['platform'] + 'localhost' """ - dummy_mode = (sim_mode == RunMode.DUMMY) - - for tdef in taskdefs: - # Compute simulated run time by scaling the execution limit. - rtc = tdef.rtconfig - - rtc['submission retry delays'] = [1] - - if dummy_mode: - # Generate dummy scripting. - rtc['init-script'] = "" - rtc['env-script'] = "" - rtc['pre-script'] = "" - rtc['post-script'] = "" - rtc['script'] = build_dummy_script( - rtc, get_simulated_run_len(rtc)) + if not warnonly: + parse_fail_cycle_points( + rtc["simulation"]["fail cycle points"], + fallback, + warnonly + ) + return + rtc['submission retry delays'] = [1] - disable_platforms(rtc) + disable_platforms(rtc) - # Disable environment, in case it depends on env-script. - rtc['environment'] = {} + # Disable environment, in case it depends on env-script. + rtc['environment'] = {} - rtc["simulation"][ - "fail cycle points" - ] = parse_fail_cycle_points( - rtc["simulation"]["fail cycle points"] - ) + rtc["simulation"][ + "fail cycle points" + ] = parse_fail_cycle_points( + rtc["simulation"]["fail cycle points"], + fallback, + warnonly + ) def get_simulated_run_len(rtc: Dict[str, Any]) -> int: @@ -184,24 +249,6 @@ def get_simulated_run_len(rtc: Dict[str, Any]) -> int: return sleep_sec -def build_dummy_script(rtc: Dict[str, Any], sleep_sec: int) -> str: - """Create fake scripting for dummy mode. - - This is for Dummy mode only. - """ - script = "sleep %d" % sleep_sec - # Dummy message outputs. - for msg in rtc['outputs'].values(): - script += "\ncylc message '%s'" % msg - if rtc['simulation']['fail try 1 only']: - arg1 = "true" - else: - arg1 = "false" - arg2 = " ".join(rtc['simulation']['fail cycle points']) - script += "\ncylc__job__dummy_result %s %s || exit 1" % (arg1, arg2) - return script - - def disable_platforms( rtc: Dict[str, Any] ) -> None: @@ -222,33 +269,52 @@ def disable_platforms( def parse_fail_cycle_points( - f_pts_orig: List[str] + fail_at_points_updated: List[str], + fail_at_points_config, + warnonly: bool = True ) -> 'Union[None, List[PointBase]]': """Parse `[simulation][fail cycle points]`. - None for "fail all points". - Else a list of cycle point objects. + Args: + fail_at_points_updated: Fail cycle points from a broadcast. + fail_at_points_config: + Fail cycle points from original workflow config, which would + have caused the scheduler to fail on config parsing. This check is + designed to prevent broadcasts from taking the scheduler down. + Examples: >>> this = parse_fail_cycle_points - >>> this(['all']) is None + >>> this(['all'], ['42']) is None True - >>> this([]) + >>> this([], ['42']) [] - >>> this(None) is None + >>> this(None, ['42']) is None True """ - f_pts: 'Optional[List[PointBase]]' = [] + fail_at_points: 'List[PointBase]' = [] if ( - f_pts_orig is None - or f_pts_orig and 'all' in f_pts_orig + fail_at_points_updated is None + or fail_at_points_updated + and 'all' in fail_at_points_updated ): - f_pts = None - elif f_pts_orig: - f_pts = [] - for point_str in f_pts_orig: - f_pts.append(get_point(point_str).standardise()) - return f_pts + return None + elif fail_at_points_updated: + for point_str in fail_at_points_updated: + if isinstance(point_str, PointBase): + fail_at_points.append(point_str) + else: + try: + fail_at_points.append(get_point(point_str).standardise()) + except PointParsingError as exc: + if warnonly: + LOG.warning(exc.args[0]) + return fail_at_points_config + else: + raise exc + return fail_at_points def sim_time_check( @@ -265,14 +331,24 @@ def sim_time_check( """ now = time() sim_task_state_changed: bool = False + for itask in itasks: - if itask.state.status != TASK_STATUS_RUNNING: + if ( + itask.state.status != TASK_STATUS_RUNNING + or ( + itask.run_mode + and itask.run_mode != RunMode.SIMULATION + ) + ): continue # This occurs if the workflow has been restarted. if itask.mode_settings is None: rtconfig = task_events_manager.broadcast_mgr.get_updated_rtconfig( itask) + rtconfig = configure_sim_mode( + rtconfig, + itask.tdef.rtconfig['simulation']['fail cycle points']) itask.mode_settings = ModeSettings( itask, db_mgr, diff --git a/cylc/flow/run_modes/skip.py b/cylc/flow/run_modes/skip.py new file mode 100644 index 00000000000..49736883911 --- /dev/null +++ b/cylc/flow/run_modes/skip.py @@ -0,0 +1,180 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +"""Utilities supporting skip modes +""" +from logging import INFO +from typing import ( + TYPE_CHECKING, Dict, List, Tuple) + +from cylc.flow import LOG +from cylc.flow.exceptions import WorkflowConfigError +from cylc.flow.task_outputs import ( + TASK_OUTPUT_SUBMITTED, + TASK_OUTPUT_SUCCEEDED, + TASK_OUTPUT_FAILED, + TASK_OUTPUT_STARTED +) +from cylc.flow.run_modes import RunMode + +if TYPE_CHECKING: + from cylc.flow.taskdef import TaskDef + from cylc.flow.task_job_mgr import TaskJobManager + from cylc.flow.task_proxy import TaskProxy + from typing_extensions import Literal + + +def submit_task_job( + task_job_mgr: 'TaskJobManager', + itask: 'TaskProxy', + rtconfig: Dict, + _workflow: str, + now: Tuple[float, str] +) -> 'Literal[True]': + """Submit a task in skip mode. + + Returns: + True - indicating that TaskJobManager need take no further action. + """ + task_job_mgr._set_retry_timers(itask, rtconfig) + itask.summary['started_time'] = now[0] + itask.waiting_on_job_prep = False + itask.submit_num += 1 + + itask.platform = { + 'name': RunMode.SKIP.value, + 'install target': 'localhost', + 'hosts': ['localhost'], + 'disable task event handlers': + rtconfig['skip']['disable task event handlers'], + 'execution polling intervals': [], + 'submission retry delays': [], + 'execution retry delays': [] + } + itask.summary['job_runner_name'] = RunMode.SKIP.value + itask.jobs.append( + task_job_mgr.get_simulation_job_conf(itask, _workflow) + ) + itask.run_mode = RunMode.SKIP + task_job_mgr.workflow_db_mgr.put_insert_task_jobs( + itask, { + 'time_submit': now[1], + 'try_num': itask.get_try_num(), + 'flow_nums': str(list(itask.flow_nums)), + 'is_manual_submit': itask.is_manual_submit, + 'job_runner_name': RunMode.SKIP.value, + 'platform_name': RunMode.SKIP.value, + 'submit_status': 0 # Submission has succeeded + } + ) + task_job_mgr.workflow_db_mgr.put_update_task_state(itask) + for output in process_outputs(itask, rtconfig): + task_job_mgr.task_events_mgr.process_message(itask, INFO, output) + + return True + + +def process_outputs(itask: 'TaskProxy', rtconfig: Dict) -> List[str]: + """Process Skip Mode Outputs: + + * By default, all required outputs will be generated plus succeeded + if success is optional. + * The outputs submitted and started are always produced and do not + need to be defined in outputs. + * If outputs is specified and does not include either + succeeded or failed then succeeded will be produced. + + Return: + A list of outputs to emit. + + """ + # Always produce `submitted` & `started` outputs first: + result: List[str] = [TASK_OUTPUT_SUBMITTED, TASK_OUTPUT_STARTED] + + conf_outputs = list(rtconfig['skip']['outputs']) + + # Send the rest of our outputs, unless they are succeeded or failed, + # which we hold back, to prevent warnings about pre-requisites being + # unmet being shown because a "finished" output happens to come first. + for message in itask.state.outputs.iter_required_messages( + disable=( + TASK_OUTPUT_SUCCEEDED + if TASK_OUTPUT_FAILED in conf_outputs + else TASK_OUTPUT_FAILED + ) + ): + trigger = itask.state.outputs._message_to_trigger[message] + # Send message unless it be succeeded/failed. + if ( + trigger not in { + TASK_OUTPUT_SUCCEEDED, + TASK_OUTPUT_FAILED, + TASK_OUTPUT_SUBMITTED, + TASK_OUTPUT_STARTED, + } + and (not conf_outputs or trigger in conf_outputs) + ): + result.append(message) + + # Add optional outputs specified in skip settings: + for message, trigger in itask.state.outputs._message_to_trigger.items(): + if trigger in conf_outputs and trigger not in result: + result.append(message) + + # Send succeeded/failed last. + if TASK_OUTPUT_FAILED in conf_outputs: + result.append(TASK_OUTPUT_FAILED) + elif TASK_OUTPUT_SUCCEEDED not in result: + result.append(TASK_OUTPUT_SUCCEEDED) + + return result + + +def check_task_skip_config(tdef: 'TaskDef') -> None: + """Validate Skip Mode configuration. + + Raises: + * Error if outputs include succeeded and failed. + """ + skip_outputs = tdef.rtconfig.get('skip', {}).get('outputs', {}) + if not skip_outputs: + return + + # Error if outputs include succeded and failed: + if ( + TASK_OUTPUT_SUCCEEDED in skip_outputs + and TASK_OUTPUT_FAILED in skip_outputs + ): + raise WorkflowConfigError( + f'Skip mode settings for task {tdef.name} has' + ' mutually exclusive outputs: succeeded AND failed.') + + +def skip_mode_validate(taskdefs: 'Dict[str, TaskDef]') -> None: + """Warn user if any tasks have "run mode" set to skip. + """ + skip_mode_tasks: List[str] = [] + for taskdef in taskdefs.values(): + if (taskdef.rtconfig.get('run mode', None) == RunMode.SKIP.value): + skip_mode_tasks.append(taskdef.name) + + # Run any mode specific validation checks: + check_task_skip_config(taskdef) + + if skip_mode_tasks: + message = 'The following tasks are set to run in skip mode:' + for taskname in skip_mode_tasks: + message += f'\n * {taskname}' + LOG.warning(message) diff --git a/cylc/flow/scheduler.py b/cylc/flow/scheduler.py index 8c4bb01e2d2..3339423640f 100644 --- a/cylc/flow/scheduler.py +++ b/cylc/flow/scheduler.py @@ -21,16 +21,30 @@ import itertools import os from pathlib import Path -from queue import Empty, Queue +from queue import ( + Empty, + Queue, +) from shlex import quote import signal from socket import gaierror -from subprocess import DEVNULL, PIPE, Popen +from subprocess import ( + DEVNULL, + PIPE, + Popen, +) import sys -from threading import Barrier, Thread -from time import sleep, time +from threading import ( + Barrier, + Thread, +) +from time import ( + sleep, + time, +) import traceback from typing import ( + TYPE_CHECKING, Any, AsyncGenerator, Dict, @@ -39,7 +53,6 @@ NoReturn, Optional, Set, - TYPE_CHECKING, Tuple, Union, ) @@ -50,13 +63,13 @@ from cylc.flow import ( LOG, __version__ as CYLC_VERSION, + commands, main_loop, + workflow_files, ) -from cylc.flow import workflow_files from cylc.flow.broadcast_mgr import BroadcastMgr from cylc.flow.cfgspec.glbl_cfg import glbl_cfg from cylc.flow.config import WorkflowConfig -from cylc.flow import commands from cylc.flow.cycling.loader import get_point from cylc.flow.data_store_mgr import DataStoreMgr from cylc.flow.exceptions import ( @@ -65,7 +78,12 @@ InputError, ) import cylc.flow.flags -from cylc.flow.flow_mgr import FLOW_NEW, FLOW_NONE, FlowMgr, repr_flow_nums +from cylc.flow.flow_mgr import ( + FLOW_NEW, + FLOW_NONE, + FlowMgr, + repr_flow_nums, +) from cylc.flow.host_select import ( HostSelectException, select_workflow_host, @@ -75,8 +93,13 @@ get_user, is_remote_platform, ) -from cylc.flow.id import Tokens -from cylc.flow.log_level import verbosity_to_env, verbosity_to_opts +from cylc.flow.id import ( + Tokens, +) +from cylc.flow.log_level import ( + verbosity_to_env, + verbosity_to_opts, +) from cylc.flow.loggingutil import ( ReferenceLogFileHandler, RotatingLogFileHandler, @@ -109,7 +132,8 @@ ) from cylc.flow.profiler import Profiler from cylc.flow.resources import get_resources -from cylc.flow.simulation import sim_time_check +from cylc.flow.run_modes import RunMode +from cylc.flow.run_modes.simulation import sim_time_check from cylc.flow.subprocpool import SubProcPool from cylc.flow.task_events_mgr import TaskEventsManager from cylc.flow.task_job_mgr import TaskJobManager @@ -131,9 +155,14 @@ TASK_STATUSES_ACTIVE, TASK_STATUSES_NEVER_ACTIVE, ) -from cylc.flow.taskdef import TaskDef, generate_graph_children -from cylc.flow.templatevars import eval_var -from cylc.flow.templatevars import get_template_vars +from cylc.flow.taskdef import ( + TaskDef, + generate_graph_children, +) +from cylc.flow.templatevars import ( + eval_var, + get_template_vars, +) from cylc.flow.timer import Timer from cylc.flow.util import cli_format from cylc.flow.wallclock import ( @@ -143,9 +172,13 @@ ) from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager from cylc.flow.workflow_events import WorkflowEventHandler -from cylc.flow.workflow_status import AutoRestartMode, RunMode, StopMode +from cylc.flow.workflow_status import ( + AutoRestartMode, + StopMode, +) from cylc.flow.xtrigger_mgr import XtriggerManager + if TYPE_CHECKING: from optparse import Values @@ -439,8 +472,10 @@ async def configure(self, params): og_run_mode = self.get_run_mode() if run_mode != og_run_mode: raise InputError( - f'This workflow was originally run in {og_run_mode} mode:' - f' Will not restart in {run_mode} mode.') + "This workflow was originally run in " + f"{og_run_mode.value} mode:" + f" You can't restart it in {run_mode.value} mode." + ) self.profiler.log_memory("scheduler.py: before load_flow_file") try: @@ -608,7 +643,7 @@ def log_start(self) -> None: # Note that the following lines must be present at the top of # the workflow log file for use in reference test runs. LOG.info( - f'Run mode: {self.get_run_mode()}', + f'Run mode: {self.get_run_mode().value}', extra=RotatingLogFileHandler.header_extra ) LOG.info( @@ -1328,7 +1363,7 @@ def _set_workflow_params( LOG.info('LOADING workflow parameters') for key, value in params: if key == self.workflow_db_mgr.KEY_RUN_MODE: - self.options.run_mode = value or RunMode.LIVE + self.options.run_mode = value or RunMode.LIVE.value LOG.info(f"+ run mode = {value}") if value is None: continue @@ -1393,7 +1428,7 @@ def _load_template_vars(self, _, row): def run_event_handlers(self, event, reason=""): """Run a workflow event handler. - Run workflow events in simulation and dummy mode ONLY if enabled. + Run workflow events only in live mode or skip mode. """ if self.get_run_mode() in {RunMode.SIMULATION, RunMode.DUMMY}: return @@ -1469,7 +1504,7 @@ def release_queued_tasks(self) -> bool: pre_prep_tasks, self.server.curve_auth, self.server.client_pub_key_dir, - is_simulation=(self.get_run_mode() == RunMode.SIMULATION) + run_mode=self.get_run_mode() ): if itask.flow_nums: flow = ','.join(str(i) for i in itask.flow_nums) @@ -1716,7 +1751,6 @@ async def _main_loop(self) -> None: if self.xtrigger_mgr.do_housekeeping: self.xtrigger_mgr.housekeep(self.pool.get_tasks()) - self.pool.clock_expire_tasks() self.release_queued_tasks() @@ -2192,7 +2226,7 @@ def _check_startup_opts(self) -> None: f"option --{opt}=reload is only valid for restart" ) - def get_run_mode(self) -> str: + def get_run_mode(self) -> RunMode: return RunMode.get(self.options) async def handle_exception(self, exc: BaseException) -> NoReturn: diff --git a/cylc/flow/scheduler_cli.py b/cylc/flow/scheduler_cli.py index 601dd3112b6..14c4a7ae77c 100644 --- a/cylc/flow/scheduler_cli.py +++ b/cylc/flow/scheduler_cli.py @@ -54,6 +54,7 @@ from cylc.flow.remote import cylc_server_cmd from cylc.flow.scheduler import Scheduler, SchedulerError from cylc.flow.scripts.common import cylc_header +from cylc.flow.run_modes import WORKFLOW_RUN_MODES from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager from cylc.flow.workflow_files import ( SUITERC_DEPR_MSG, @@ -65,7 +66,6 @@ is_terminal, prompt, ) -from cylc.flow.workflow_status import RunMode if TYPE_CHECKING: from optparse import Values @@ -129,9 +129,16 @@ RUN_MODE = OptionSettings( ["-m", "--mode"], - help="Run mode: live, dummy, simulation (default live).", + help=( + f"Run mode: {sorted(WORKFLOW_RUN_MODES)} (default live)." + " Live mode executes the tasks as defined in the runtime" + " section." + " Simulation and dummy modes ignore task 'script'" + " items and related job settings. They are" + " designed for testing." + ), metavar="STRING", action='store', dest="run_mode", - choices=[RunMode.LIVE, RunMode.DUMMY, RunMode.SIMULATION], + choices=list(WORKFLOW_RUN_MODES), ) PLAY_RUN_MODE = deepcopy(RUN_MODE) diff --git a/cylc/flow/scripts/lint.py b/cylc/flow/scripts/lint.py index f68ecbeb1a9..88f9a0ed5b8 100755 --- a/cylc/flow/scripts/lint.py +++ b/cylc/flow/scripts/lint.py @@ -96,6 +96,10 @@ ) from cylc.flow.parsec.config import ParsecConfig from cylc.flow.scripts.cylc import DEAD_ENDS +from cylc.flow.task_outputs import ( + TASK_OUTPUT_SUCCEEDED, + TASK_OUTPUT_FAILED, +) from cylc.flow.terminal import cli_function @@ -379,6 +383,36 @@ def check_for_deprecated_task_event_template_vars( return None +BAD_SKIP_OUTS = re.compile(r'outputs\s*=\s*(.*)') + + +def check_skip_mode_outputs(line: str) -> Dict: + """Ensure skip mode output setting doesn't include + succeeded _and_ failed, as they are mutually exclusive. + + n.b. + + This should be separable from ``[[outputs]]`` because it's a key + value pair not a section heading. + + Examples: + >>> this = check_skip_mode_outputs + >>> this('outputs = succeeded, failed') + {'description': 'are ... together', 'outputs': 'failed...succeeded'} + """ + + outputs = BAD_SKIP_OUTS.findall(line) + if outputs: + outputs = [i.strip() for i in outputs[0].split(',')] + if TASK_OUTPUT_FAILED in outputs and TASK_OUTPUT_SUCCEEDED in outputs: + return { + 'description': + 'are mutually exclusive and cannot be used together', + 'outputs': f'{TASK_OUTPUT_FAILED} and {TASK_OUTPUT_SUCCEEDED}' + } + return {} + + INDENTATION = re.compile(r'^(\s*)(.*)') @@ -624,6 +658,14 @@ def list_wrapper(line: str, check: Callable) -> Optional[Dict[str, str]]: ), FUNCTION: re.compile(r'=>\s*\\').findall }, + 'S016': { + 'short': 'Task outputs {outputs}: {description}.', + FUNCTION: check_skip_mode_outputs + }, + 'S017': { + 'short': 'Run mode is not live: This task will only appear to run.', + FUNCTION: re.compile(r'run mode\s*=\s*[^l][^i][^v][^e]$').findall + }, } # Subset of deprecations which are tricky (impossible?) to scrape from the # upgrader. diff --git a/cylc/flow/scripts/set.py b/cylc/flow/scripts/set.py index b64cf74aba0..adc3cf8449a 100755 --- a/cylc/flow/scripts/set.py +++ b/cylc/flow/scripts/set.py @@ -65,6 +65,9 @@ # complete the succeeded output of 3/bar: $ cylc set --out=succeeded my_workflow//3/bar + # complete the outputs defined in [runtime][task][skip] + $ cylc set --out=skip my_workflow//3/bar + # satisfy the 3/foo:succeeded prerequisite of 3/bar: $ cylc set --pre=3/foo my_workflow//3/bar # or: @@ -154,8 +157,10 @@ def get_option_parser() -> COP: "-o", "--out", "--output", metavar="OUTPUT(s)", help=( "Complete task outputs. For multiple outputs re-use the" - " option, or give a comma-separated list of outputs, or" - ' use "--out=required" to complete all required outputs.' + " option, or give a comma-separated list of outputs." + " Use '--out=required' to complete all required outputs." + " Use '--out=skip' to complete outputs defined in the task's" + " [skip] configuration." " OUTPUT format: trigger names as used in the graph." ), action="append", default=None, dest="outputs" diff --git a/cylc/flow/scripts/validate.py b/cylc/flow/scripts/validate.py index 86fa86fc227..edabd6bdab2 100755 --- a/cylc/flow/scripts/validate.py +++ b/cylc/flow/scripts/validate.py @@ -52,12 +52,12 @@ ICP_OPTION, ) from cylc.flow.profiler import Profiler +from cylc.flow.scheduler_cli import RUN_MODE from cylc.flow.task_proxy import TaskProxy from cylc.flow.templatevars import get_template_vars from cylc.flow.terminal import cli_function -from cylc.flow.scheduler_cli import RUN_MODE +from cylc.flow.run_modes import RunMode from cylc.flow.workflow_files import get_workflow_run_dir -from cylc.flow.workflow_status import RunMode if TYPE_CHECKING: from cylc.flow.option_parsers import Values @@ -130,7 +130,7 @@ def get_option_parser(): { 'check_circular': False, 'profile_mode': False, - 'run_mode': RunMode.LIVE + 'run_mode': RunMode.LIVE.value } ) diff --git a/cylc/flow/task_events_mgr.py b/cylc/flow/task_events_mgr.py index 977029f19cb..29cbface294 100644 --- a/cylc/flow/task_events_mgr.py +++ b/cylc/flow/task_events_mgr.py @@ -68,6 +68,8 @@ JOB_LOG_OUT, JOB_LOG_ERR, ) +from cylc.flow.run_modes import ( + JOBLESS_MODES, RunMode, disable_task_event_handlers) from cylc.flow.task_message import ( ABORT_MESSAGE_PREFIX, FAIL_MESSAGE_PREFIX, VACATION_MESSAGE_PREFIX) from cylc.flow.task_state import ( @@ -79,7 +81,7 @@ TASK_STATUS_FAILED, TASK_STATUS_EXPIRED, TASK_STATUS_SUCCEEDED, - TASK_STATUS_WAITING + TASK_STATUS_WAITING, ) from cylc.flow.task_outputs import ( TASK_OUTPUT_EXPIRED, @@ -99,7 +101,6 @@ get_template_variables as get_workflow_template_variables, process_mail_footer, ) -from cylc.flow.workflow_status import RunMode if TYPE_CHECKING: @@ -774,7 +775,7 @@ def process_message( # ... but either way update the job ID in the job proxy (it only # comes in via the submission message). - if itask.tdef.run_mode != RunMode.SIMULATION: + if itask.run_mode != RunMode.SIMULATION: job_tokens = itask.tokens.duplicate( job=str(itask.submit_num) ) @@ -896,7 +897,7 @@ def _process_message_check( if ( itask.state(TASK_STATUS_WAITING) # Polling in live mode only: - and itask.tdef.run_mode == RunMode.LIVE + and itask.run_mode == RunMode.LIVE and ( ( # task has a submit-retry lined up @@ -941,7 +942,7 @@ def _process_message_check( def setup_event_handlers(self, itask, event, message): """Set up handlers for a task event.""" - if itask.tdef.run_mode != RunMode.LIVE: + if disable_task_event_handlers(itask): return msg = "" if message != f"job {event}": @@ -1396,8 +1397,12 @@ def _process_message_succeeded(self, itask, event_time, forced): "run_status": 0, "time_run_exit": event_time, }) - # Update mean elapsed time only on task succeeded. - if itask.summary['started_time'] is not None: + # Update mean elapsed time only on task succeeded, + # (Don't record skip mode run times) + if ( + itask.summary['started_time'] is not None + and itask.run_mode != RunMode.SKIP + ): itask.tdef.elapsed_times.append( itask.summary['finished_time'] - itask.summary['started_time']) @@ -1484,7 +1489,7 @@ def _process_message_submitted( ) itask.set_summary_time('submitted', event_time) - if itask.tdef.run_mode == RunMode.SIMULATION: + if itask.run_mode == RunMode.SIMULATION: # Simulate job started as well. itask.set_summary_time('started', event_time) if itask.state_reset(TASK_STATUS_RUNNING, forced=forced): @@ -1520,7 +1525,7 @@ def _process_message_submitted( 'submitted', event_time, ) - if itask.tdef.run_mode == RunMode.SIMULATION: + if itask.run_mode == RunMode.SIMULATION: # Simulate job started as well. self.data_store_mgr.delta_job_time( job_tokens, @@ -1553,7 +1558,10 @@ def _insert_task_job( # not see previous submissions (so can't use itask.jobs[submit_num-1]). # And transient tasks, used for setting outputs and spawning children, # do not submit jobs. - if (itask.tdef.run_mode == RunMode.SIMULATION) or forced: + if ( + itask.run_mode and itask.run_mode.value in JOBLESS_MODES + or forced + ): job_conf = {"submit_num": itask.submit_num} else: try: diff --git a/cylc/flow/task_job_mgr.py b/cylc/flow/task_job_mgr.py index f4b9ab0a71a..7e1cfa02cc4 100644 --- a/cylc/flow/task_job_mgr.py +++ b/cylc/flow/task_job_mgr.py @@ -42,6 +42,7 @@ Iterable, List, Optional, + Tuple, Union, ) @@ -68,7 +69,10 @@ get_platform, ) from cylc.flow.remote import construct_ssh_cmd -from cylc.flow.simulation import ModeSettings +from cylc.flow.run_modes import ( + WORKFLOW_ONLY_MODES, + RunMode, +) from cylc.flow.subprocctx import SubProcContext from cylc.flow.subprocpool import SubProcPool from cylc.flow.task_action_timer import ( @@ -120,6 +124,7 @@ if TYPE_CHECKING: from cylc.flow.task_proxy import TaskProxy + from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager class TaskJobManager: @@ -153,7 +158,7 @@ def __init__(self, workflow, proc_pool, workflow_db_mgr, task_events_mgr, data_store_mgr, bad_hosts): self.workflow = workflow self.proc_pool = proc_pool - self.workflow_db_mgr = workflow_db_mgr + self.workflow_db_mgr: WorkflowDatabaseManager = workflow_db_mgr self.task_events_mgr = task_events_mgr self.data_store_mgr = data_store_mgr self.job_file_writer = JobFileWriter() @@ -241,8 +246,14 @@ def prep_submit_task_jobs(self, workflow, itasks, check_syntax=True): bad_tasks.append(itask) return [prepared_tasks, bad_tasks] - def submit_task_jobs(self, workflow, itasks, curve_auth, - client_pub_key_dir, is_simulation=False): + def submit_task_jobs( + self, + workflow, + itasks, + curve_auth, + client_pub_key_dir, + run_mode: RunMode = RunMode.LIVE, + ): """Prepare for job submission and submit task jobs. Preparation (host selection, remote host init, and remote install) @@ -257,10 +268,25 @@ def submit_task_jobs(self, workflow, itasks, curve_auth, Return (list): list of tasks that attempted submission. """ - if is_simulation: - return self._simulation_submit_task_jobs(itasks, workflow) + # submit "simulation/skip" mode tasks, modify "dummy" task configs: + itasks, submitted_nonlive_tasks = self.submit_nonlive_task_jobs( + workflow, itasks, run_mode) + + # submit "live" mode tasks (and "dummy" mode tasks) + submitted_live_tasks = self.submit_livelike_task_jobs( + workflow, itasks, curve_auth, client_pub_key_dir) + + return submitted_nonlive_tasks + submitted_live_tasks + + def submit_livelike_task_jobs( + self, workflow, itasks, curve_auth, client_pub_key_dir + ) -> 'List[TaskProxy]': + """Submission for live tasks and dummy tasks. + """ + done_tasks: 'List[TaskProxy]' = [] + # Mapping of platforms to task proxies: + auth_itasks: 'Dict[str, List[TaskProxy]]' = {} - # Prepare tasks for job submission prepared_tasks, bad_tasks = self.prep_submit_task_jobs( workflow, itasks) @@ -270,10 +296,9 @@ def submit_task_jobs(self, workflow, itasks, curve_auth, if not prepared_tasks: return bad_tasks - # Mapping of platforms to task proxies: - auth_itasks: Dict[str, List[TaskProxy]] = {} for itask in prepared_tasks: auth_itasks.setdefault(itask.platform['name'], []).append(itask) + # Submit task jobs for each platform # Non-prepared tasks can be considered done for now: done_tasks = bad_tasks @@ -444,6 +469,7 @@ def submit_task_jobs(self, workflow, itasks, curve_auth, 'platform_name': itask.platform['name'], 'job_runner_name': itask.summary['job_runner_name'], }) + itask.is_manual_submit = False if ri_map[install_target] == REMOTE_FILE_INSTALL_255: @@ -1000,44 +1026,64 @@ def _set_retry_timers( except KeyError: itask.try_timers[key] = TaskActionTimer(delays=delays) - def _simulation_submit_task_jobs(self, itasks, workflow): - """Simulation mode task jobs submission.""" + def submit_nonlive_task_jobs( + self: 'TaskJobManager', + workflow: str, + itasks: 'List[TaskProxy]', + workflow_run_mode: RunMode, + ) -> 'Tuple[List[TaskProxy], List[TaskProxy]]': + """Identify task mode and carry out alternative submission + paths if required: + + * Simulation: Job submission. + * Skip: Entire job lifecycle happens here! + * Dummy: Pre-submission preparation (removing task script's content) + before returning to live pathway. + * Live: return to main submission pathway without doing anything. + + Returns: + lively_tasks: + A list of tasks which require subsequent + processing **as if** they were live mode tasks. + (This includes live and dummy mode tasks) + nonlive_tasks: + A list of tasks which require no further processing + because their apparent execution is done entirely inside + the scheduler. (This includes skip and simulation mode tasks). + """ + lively_tasks: 'List[TaskProxy]' = [] + nonlive_tasks: 'List[TaskProxy]' = [] now = time() - now_str = get_time_string_from_unix_time(now) + now = (now, get_time_string_from_unix_time(now)) + for itask in itasks: - # Handle broadcasts + # Get task config with broadcasts applied: rtconfig = self.task_events_mgr.broadcast_mgr.get_updated_rtconfig( itask) - itask.summary['started_time'] = now - self._set_retry_timers(itask, rtconfig) - itask.mode_settings = ModeSettings( - itask, - self.workflow_db_mgr, - rtconfig - ) - - itask.waiting_on_job_prep = False - itask.submit_num += 1 + # Apply task run mode + if workflow_run_mode.value in WORKFLOW_ONLY_MODES: + # Task run mode cannot override workflow run-mode sim or dummy: + itask.run_mode = workflow_run_mode + else: + # If workflow mode is skip or live and task mode is set, + # override workflow mode, else use workflow mode. + itask.run_mode = RunMode( + rtconfig.get('run mode', workflow_run_mode)) + + # Submit nonlive tasks, or add live-like (live or dummy) + # tasks to list of tasks to put through live submission pipeline. + submit_func = itask.run_mode.get_submit_method() + if submit_func and submit_func( + self, itask, rtconfig, workflow, now + ): + # A submit function returns true if this is a nonlive task: + self.workflow_db_mgr.put_insert_task_states(itask) + nonlive_tasks.append(itask) + else: + lively_tasks.append(itask) - itask.platform = {'name': 'SIMULATION'} - itask.summary['job_runner_name'] = 'SIMULATION' - itask.summary[self.KEY_EXECUTE_TIME_LIMIT] = ( - itask.mode_settings.simulated_run_length - ) - itask.jobs.append( - self.get_simulation_job_conf(itask, workflow) - ) - self.task_events_mgr.process_message( - itask, INFO, TASK_OUTPUT_SUBMITTED, - ) - self.workflow_db_mgr.put_insert_task_jobs( - itask, { - 'time_submit': now_str, - 'try_num': itask.get_try_num(), - } - ) - return itasks + return lively_tasks, nonlive_tasks def _submit_task_jobs_callback(self, ctx, workflow, itasks): """Callback when submit task jobs command exits.""" diff --git a/cylc/flow/task_outputs.py b/cylc/flow/task_outputs.py index b8363a50ada..8548ab405e4 100644 --- a/cylc/flow/task_outputs.py +++ b/cylc/flow/task_outputs.py @@ -37,6 +37,7 @@ if TYPE_CHECKING: from cylc.flow.taskdef import TaskDef + from typing_extensions import Literal # Standard task output strings, used for triggering. @@ -194,6 +195,7 @@ def get_completion_expression(tdef: 'TaskDef') -> str: def get_optional_outputs( expression: str, outputs: Iterable[str], + disable: "Optional[str]" = None ) -> Dict[str, Optional[bool]]: """Determine which outputs in an expression are optional. @@ -202,6 +204,9 @@ def get_optional_outputs( The completion expression. outputs: All outputs that apply to this task. + disable: + Disable this output and any others it is joined with by `and` + (which will mean they are necessarily optional). Returns: dict: compvar: is_optional @@ -229,6 +234,21 @@ def get_optional_outputs( [('expired', True), ('failed', None), ('succeeded', False), ('x', False), ('y', False)] + >>> sorted(get_optional_outputs( + ... '(succeeded and towel) or (failed and bugblatter)', + ... {'succeeded', 'towel', 'failed', 'bugblatter'}, + ... ).items()) + [('bugblatter', True), ('failed', True), + ('succeeded', True), ('towel', True)] + + >>> sorted(get_optional_outputs( + ... '(succeeded and towel) or (failed and bugblatter)', + ... {'succeeded', 'towel', 'failed', 'bugblatter'}, + ... disable='failed' + ... ).items()) + [('bugblatter', True), ('failed', True), + ('succeeded', False), ('towel', False)] + """ # determine which triggers are used in the expression used_compvars = get_variable_names(expression) @@ -236,6 +256,9 @@ def get_optional_outputs( # all completion variables which could appear in the expression all_compvars = {trigger_to_completion_variable(out) for out in outputs} + # Allows exclusion of additional outcomes: + extra_excludes = {disable: False} if disable else {} + return { # output: is_optional # the outputs that are used in the expression **{ @@ -247,6 +270,7 @@ def get_optional_outputs( # (pre-conditions are considered separately) 'expired': False, 'submit_failed': False, + **extra_excludes }, ) for output in used_compvars @@ -609,15 +633,24 @@ def _is_compvar_complete(self, compvar: str) -> Optional[bool]: else: raise KeyError(compvar) - def iter_required_messages(self) -> Iterator[str]: + def iter_required_messages( + self, + disable: 'Optional[Literal["succeeded", "failed"]]' = None + ) -> Iterator[str]: """Yield task messages that are required for this task to be complete. Note, in some cases tasks might not have any required messages, e.g. "completion = succeeded or failed". + + Args: + disable: Consider this output and any others it is joined with by + `and` to not exist. In skip mode we only want to check either + succeeded or failed, but not both. """ for compvar, is_optional in get_optional_outputs( self._completion_expression, set(self._message_to_compvar.values()), + disable=disable ).items(): if is_optional is False: for message, _compvar in self._message_to_compvar.items(): diff --git a/cylc/flow/task_pool.py b/cylc/flow/task_pool.py index 3b1990169ff..c771af215cd 100644 --- a/cylc/flow/task_pool.py +++ b/cylc/flow/task_pool.py @@ -59,6 +59,8 @@ from cylc.flow.id_cli import contains_fnmatch from cylc.flow.id_match import filter_ids from cylc.flow.platforms import get_platform +from cylc.flow.run_modes import RunMode +from cylc.flow.run_modes.skip import process_outputs as get_skip_mode_outputs from cylc.flow.task_action_timer import ( TaskActionTimer, TimerFlags, @@ -1450,7 +1452,10 @@ def spawn_on_output(self, itask: TaskProxy, output: str) -> None: tasks = [c_task] for t in tasks: - t.satisfy_me([itask.tokens.duplicate(task_sel=output)]) + t.satisfy_me( + [itask.tokens.duplicate(task_sel=output)], + mode=itask.run_mode + ) self.data_store_mgr.delta_task_prerequisite(t) if not in_pool: self.add_to_pool(t) @@ -1577,7 +1582,8 @@ def spawn_on_all_outputs( continue if completed_only: c_task.satisfy_me( - [itask.tokens.duplicate(task_sel=message)] + [itask.tokens.duplicate(task_sel=message)], + mode=itask.run_mode ) self.data_store_mgr.delta_task_prerequisite(c_task) self.add_to_pool(c_task) @@ -1881,7 +1887,7 @@ def _standardise_prereqs( return _prereqs def _standardise_outputs( - self, point: 'PointBase', tdef: 'TaskDef', outputs: List[str] + self, point: 'PointBase', tdef: 'TaskDef', outputs: Iterable[str] ) -> List[str]: """Convert output names to task output messages.""" _outputs = [] @@ -1891,7 +1897,8 @@ def _standardise_outputs( try: msg = tdef.outputs[output][0] except KeyError: - LOG.warning(f"output {point}/{tdef.name}:{output} not found") + LOG.warning( + f"output {point}/{tdef.name}:{output} not found") continue _outputs.append(msg) return _outputs @@ -1991,15 +1998,26 @@ def set_prereqs_and_outputs( def _set_outputs_itask( self, itask: 'TaskProxy', - outputs: List[str], + outputs: Iterable[str], ) -> None: """Set requested outputs on a task proxy and spawn children.""" if not outputs: - outputs = list(itask.state.outputs.iter_required_messages()) + outputs = itask.state.outputs.iter_required_messages() else: - outputs = self._standardise_outputs( - itask.point, itask.tdef, outputs - ) + # --out=skip is a shortcut to setting all the outputs that + # skip mode would. + outputs = set(outputs) + skips = [] + if RunMode.SKIP.value in outputs: + # Check for broadcasts to task: + outputs.remove(RunMode.SKIP.value) + bc_mgr = self.task_events_mgr.broadcast_mgr + rtconfig = bc_mgr.get_updated_rtconfig(itask) + skips = get_skip_mode_outputs(itask, rtconfig) + itask.run_mode = RunMode.SKIP + outputs = set( + self._standardise_outputs(itask.point, itask.tdef, outputs) + ).union(skips) for output in sorted(outputs, key=itask.state.outputs.output_sort_key): if itask.state.outputs.is_message_complete(output): diff --git a/cylc/flow/task_proxy.py b/cylc/flow/task_proxy.py index e641b2598e1..4d98609df39 100644 --- a/cylc/flow/task_proxy.py +++ b/cylc/flow/task_proxy.py @@ -40,6 +40,7 @@ ) from cylc.flow.flow_mgr import repr_flow_nums from cylc.flow.platforms import get_platform +from cylc.flow.run_modes import RunMode from cylc.flow.task_action_timer import TimerFlags from cylc.flow.task_state import ( TASK_STATUS_EXPIRED, @@ -58,7 +59,7 @@ PrereqTuple, SatisfiedState, ) - from cylc.flow.simulation import ModeSettings + from cylc.flow.run_modes.simulation import ModeSettings from cylc.flow.task_action_timer import TaskActionTimer from cylc.flow.taskdef import TaskDef @@ -194,6 +195,7 @@ class TaskProxy: 'point_as_seconds', 'poll_timer', 'reload_successor', + 'run_mode', 'submit_num', 'tdef', 'state', @@ -303,6 +305,7 @@ def __init__( self.graph_children = generate_graph_children(tdef, self.point) self.mode_settings: Optional['ModeSettings'] = None + self.run_mode: Optional[RunMode] = None if self.tdef.expiration_offset is not None: self.expire_time = ( @@ -564,7 +567,10 @@ def state_reset( return False def satisfy_me( - self, task_messages: 'Iterable[Tokens]', forced: bool = False + self, + task_messages: 'Iterable[Tokens]', + mode: Optional[RunMode] = RunMode.LIVE, + forced: bool = False, ) -> 'Set[Tokens]': """Try to satisfy my prerequisites with given output messages. @@ -574,7 +580,7 @@ def satisfy_me( Return a set of unmatched task messages. """ - used = self.state.satisfy_me(task_messages, forced) + used = self.state.satisfy_me(task_messages, mode=mode, forced=forced) return set(task_messages) - used def clock_expire(self) -> bool: diff --git a/cylc/flow/task_state.py b/cylc/flow/task_state.py index 07f6cdc3a3c..5b8023d6464 100644 --- a/cylc/flow/task_state.py +++ b/cylc/flow/task_state.py @@ -16,7 +16,6 @@ """Task state related logic.""" - from typing import ( TYPE_CHECKING, Dict, @@ -31,8 +30,8 @@ TASK_OUTPUT_EXPIRED, TASK_OUTPUT_FAILED, TASK_OUTPUT_STARTED, - TASK_OUTPUT_SUBMITTED, TASK_OUTPUT_SUBMIT_FAILED, + TASK_OUTPUT_SUBMITTED, TASK_OUTPUT_SUCCEEDED, TaskOutputs, ) @@ -43,6 +42,7 @@ from cylc.flow.cycling import PointBase from cylc.flow.id import Tokens from cylc.flow.prerequisite import PrereqTuple + from cylc.flow.run_modes import RunMode from cylc.flow.taskdef import TaskDef @@ -324,6 +324,7 @@ def __call__( def satisfy_me( self, outputs: Iterable['Tokens'], + mode: 'Optional[RunMode]', forced: bool = False, ) -> Set['Tokens']: """Try to satisfy my prerequisites with given outputs. @@ -333,7 +334,7 @@ def satisfy_me( valid: Set[Tokens] = set() for prereq in (*self.prerequisites, *self.suicide_prerequisites): valid.update( - prereq.satisfy_me(outputs, forced) + prereq.satisfy_me(outputs, mode=mode, forced=forced) ) return valid diff --git a/cylc/flow/taskdef.py b/cylc/flow/taskdef.py index ec7ea0bdade..34461ca0d6f 100644 --- a/cylc/flow/taskdef.py +++ b/cylc/flow/taskdef.py @@ -151,7 +151,7 @@ class TaskDef: # Memory optimization - constrain possible attributes to this list. __slots__ = [ - "run_mode", "rtconfig", "start_point", "initial_point", "sequences", + "rtconfig", "start_point", "initial_point", "sequences", "used_in_offset_trigger", "max_future_prereq_offset", "sequential", "is_coldstart", "workflow_polling_cfg", "expiration_offset", @@ -162,11 +162,10 @@ class TaskDef: # Store the elapsed times for a maximum of 10 cycles MAX_LEN_ELAPSED_TIMES = 10 - def __init__(self, name, rtcfg, run_mode, start_point, initial_point): + def __init__(self, name, rtcfg, start_point, initial_point): if not TaskID.is_valid_name(name): raise TaskDefError("Illegal task name: %s" % name) - self.run_mode = run_mode self.rtconfig = rtcfg self.start_point = start_point self.initial_point = initial_point @@ -410,7 +409,7 @@ def is_parentless(self, point): def __repr__(self) -> str: """ >>> TaskDef( - ... name='oliver', rtcfg={}, run_mode='fake', start_point='1', + ... name='oliver', rtcfg={}, start_point='1', ... initial_point='1' ... ) diff --git a/cylc/flow/unicode_rules.py b/cylc/flow/unicode_rules.py index a6974888248..b24d576332d 100644 --- a/cylc/flow/unicode_rules.py +++ b/cylc/flow/unicode_rules.py @@ -22,6 +22,7 @@ _TASK_NAME_CHARACTERS, _TASK_NAME_PREFIX, ) +from cylc.flow.run_modes import RunMode from cylc.flow.task_qualifiers import TASK_QUALIFIERS from cylc.flow.task_state import TASK_STATUSES_ORDERED @@ -351,6 +352,8 @@ class TaskOutputValidator(UnicodeRuleChecker): not_starts_with('_cylc'), # blacklist keywords not_equals('required', 'optional', 'all', 'and', 'or'), + # blacklist Run Modes: + not_equals(RunMode.SKIP.value), # blacklist built-in task qualifiers and statuses (e.g. "waiting") not_equals(*sorted({*TASK_QUALIFIERS, *TASK_STATUSES_ORDERED})), ] diff --git a/cylc/flow/workflow_db_mgr.py b/cylc/flow/workflow_db_mgr.py index 93e0e32a28c..d9ae87150d8 100644 --- a/cylc/flow/workflow_db_mgr.py +++ b/cylc/flow/workflow_db_mgr.py @@ -367,11 +367,15 @@ def put_workflow_params(self, schd: 'Scheduler') -> None: value = getattr(schd.options, key, None) value = None if value == 'reload' else value self.put_workflow_params_1(key, value) - for key in ( + + self.put_workflow_params_1( + self.KEY_CYCLE_POINT_TIME_ZONE, + getattr(schd.options, self.KEY_CYCLE_POINT_TIME_ZONE, None), + ) + self.put_workflow_params_1( self.KEY_RUN_MODE, - self.KEY_CYCLE_POINT_TIME_ZONE - ): - self.put_workflow_params_1(key, getattr(schd.options, key, None)) + schd.get_run_mode().value, + ) def put_workflow_params_1( self, key: str, value: Union[AnyStr, float, None] diff --git a/cylc/flow/workflow_status.py b/cylc/flow/workflow_status.py index d6d6fb587dc..72761c08c87 100644 --- a/cylc/flow/workflow_status.py +++ b/cylc/flow/workflow_status.py @@ -23,8 +23,6 @@ from cylc.flow.wallclock import get_time_string_from_unix_time as time2str if TYPE_CHECKING: - from optparse import Values - from cylc.flow.cycling import PointBase from cylc.flow.scheduler import Scheduler from cylc.flow.task_pool import TaskPool @@ -202,21 +200,3 @@ def _get_earliest_stop_point_status_msg(pool: 'TaskPool') -> Optional[str]: if prop is None: return None return template % prop - - -class RunMode: - """The possible run modes of a workflow.""" - - LIVE = 'live' - """Workflow will run normally.""" - - SIMULATION = 'simulation' - """Workflow will run in simulation mode.""" - - DUMMY = 'dummy' - """Workflow will run in dummy mode.""" - - @staticmethod - def get(options: 'Values') -> str: - """Return the run mode from the options.""" - return getattr(options, 'run_mode', None) or RunMode.LIVE diff --git a/tests/flakyfunctional/database/00-simple.t b/tests/flakyfunctional/database/00-simple.t index c3f1ad19faf..832c35e46ec 100644 --- a/tests/flakyfunctional/database/00-simple.t +++ b/tests/flakyfunctional/database/00-simple.t @@ -49,7 +49,7 @@ fcp| icp|1 is_paused|0 n_restart|0 -run_mode| +run_mode|live startcp| stop_clock_time| stop_task| diff --git a/tests/functional/cylc-config/00-simple/section2.stdout b/tests/functional/cylc-config/00-simple/section2.stdout index 3d83ac15278..559d1c2556c 100644 --- a/tests/functional/cylc-config/00-simple/section2.stdout +++ b/tests/functional/cylc-config/00-simple/section2.stdout @@ -15,10 +15,14 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = live [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -90,10 +94,14 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = live [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -165,10 +173,14 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = live [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -240,12 +252,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = live [[[directives]]] job_type = serial [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -316,12 +332,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = live [[[directives]]] job_type = parallel [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -392,12 +412,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = live [[[directives]]] job_type = serial [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -468,12 +492,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = live [[[directives]]] job_type = serial [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -544,12 +572,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = live [[[directives]]] job_type = parallel [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -620,12 +652,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = live [[[directives]]] job_type = parallel [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -696,12 +732,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = live [[[directives]]] job_type = serial [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -772,12 +812,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = live [[[directives]]] job_type = serial [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -848,12 +892,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = live [[[directives]]] job_type = parallel [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -924,12 +972,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = live [[[directives]]] job_type = parallel [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = diff --git a/tests/functional/cylc-set/09-set-skip.t b/tests/functional/cylc-set/09-set-skip.t new file mode 100644 index 00000000000..dd314283700 --- /dev/null +++ b/tests/functional/cylc-set/09-set-skip.t @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- +# +# Skip Mode proposal example: +# https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md +# The cylc set --out option should accept the skip value +# which should set the outputs defined in +# [runtime][][skip]outputs. + +. "$(dirname "$0")/test_header" +set_test_number 2 +reftest +exit diff --git a/tests/functional/cylc-set/09-set-skip/flow.cylc b/tests/functional/cylc-set/09-set-skip/flow.cylc new file mode 100644 index 00000000000..17813d64d63 --- /dev/null +++ b/tests/functional/cylc-set/09-set-skip/flow.cylc @@ -0,0 +1,50 @@ +[meta] + test_description = """ + Test that cylc set --out skip satisfies + all outputs which are required by the graph. + """ + proposal url = https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md + +[scheduler] + allow implicit tasks = true + [[events]] + expected task failures = 1/bar + +[scheduling] + [[graph]] + R1 = """ + # Optional out not created by set --out skip + foo:no? => not_this_task + + # set --out skip creates required, started, submitted + # and succeeded (unless failed is set): + foo:yes => foo_done + foo:submitted => foo_done + foo:succeeded => foo_done + foo:started => do_skip + + # set --out skip creates failed if that is required + # by skip mode settings: + bar:started => do_skip + bar:failed? => bar_failed + """ + +[runtime] + [[foo]] + script = sleep 100 + [[[skip]]] + outputs = yes + [[[outputs]]] + no = Don't require this task + yes = Require this task + + [[bar]] + script = sleep 100 + [[[skip]]] + outputs = failed + + [[do_skip]] + script = """ + cylc set --out skip ${CYLC_WORKFLOW_ID}//1/foo \ + ${CYLC_WORKFLOW_ID}//1/bar + """ diff --git a/tests/functional/cylc-set/09-set-skip/reference.log b/tests/functional/cylc-set/09-set-skip/reference.log new file mode 100644 index 00000000000..7db64d9f47c --- /dev/null +++ b/tests/functional/cylc-set/09-set-skip/reference.log @@ -0,0 +1,5 @@ +1/bar -triggered off [] in flow 1 +1/foo -triggered off [] in flow 1 +1/do_skip -triggered off ['1/bar', '1/foo'] in flow 1 +1/foo_done -triggered off ['1/foo', '1/foo', '1/foo'] in flow 1 +1/bar_failed -triggered off ['1/bar'] in flow 1 diff --git a/tests/functional/modes/01-dummy.t b/tests/functional/run_modes/01-dummy.t similarity index 100% rename from tests/functional/modes/01-dummy.t rename to tests/functional/run_modes/01-dummy.t diff --git a/tests/functional/modes/01-dummy/flow.cylc b/tests/functional/run_modes/01-dummy/flow.cylc similarity index 100% rename from tests/functional/modes/01-dummy/flow.cylc rename to tests/functional/run_modes/01-dummy/flow.cylc diff --git a/tests/functional/modes/01-dummy/reference.log b/tests/functional/run_modes/01-dummy/reference.log similarity index 100% rename from tests/functional/modes/01-dummy/reference.log rename to tests/functional/run_modes/01-dummy/reference.log diff --git a/tests/functional/modes/02-dummy-message-outputs.t b/tests/functional/run_modes/02-dummy-message-outputs.t similarity index 100% rename from tests/functional/modes/02-dummy-message-outputs.t rename to tests/functional/run_modes/02-dummy-message-outputs.t diff --git a/tests/functional/modes/02-dummy-message-outputs/flow.cylc b/tests/functional/run_modes/02-dummy-message-outputs/flow.cylc similarity index 100% rename from tests/functional/modes/02-dummy-message-outputs/flow.cylc rename to tests/functional/run_modes/02-dummy-message-outputs/flow.cylc diff --git a/tests/functional/modes/02-dummy-message-outputs/reference.log b/tests/functional/run_modes/02-dummy-message-outputs/reference.log similarity index 100% rename from tests/functional/modes/02-dummy-message-outputs/reference.log rename to tests/functional/run_modes/02-dummy-message-outputs/reference.log diff --git a/tests/functional/modes/03-simulation.t b/tests/functional/run_modes/03-simulation.t similarity index 100% rename from tests/functional/modes/03-simulation.t rename to tests/functional/run_modes/03-simulation.t diff --git a/tests/functional/modes/03-simulation/flow.cylc b/tests/functional/run_modes/03-simulation/flow.cylc similarity index 100% rename from tests/functional/modes/03-simulation/flow.cylc rename to tests/functional/run_modes/03-simulation/flow.cylc diff --git a/tests/functional/modes/03-simulation/reference.log b/tests/functional/run_modes/03-simulation/reference.log similarity index 100% rename from tests/functional/modes/03-simulation/reference.log rename to tests/functional/run_modes/03-simulation/reference.log diff --git a/tests/functional/modes/04-simulation-runtime.t b/tests/functional/run_modes/04-simulation-runtime.t similarity index 100% rename from tests/functional/modes/04-simulation-runtime.t rename to tests/functional/run_modes/04-simulation-runtime.t diff --git a/tests/functional/modes/04-simulation-runtime/flow.cylc b/tests/functional/run_modes/04-simulation-runtime/flow.cylc similarity index 100% rename from tests/functional/modes/04-simulation-runtime/flow.cylc rename to tests/functional/run_modes/04-simulation-runtime/flow.cylc diff --git a/tests/functional/modes/04-simulation-runtime/reference.log b/tests/functional/run_modes/04-simulation-runtime/reference.log similarity index 100% rename from tests/functional/modes/04-simulation-runtime/reference.log rename to tests/functional/run_modes/04-simulation-runtime/reference.log diff --git a/tests/functional/modes/05-sim-trigger.t b/tests/functional/run_modes/05-sim-trigger.t similarity index 100% rename from tests/functional/modes/05-sim-trigger.t rename to tests/functional/run_modes/05-sim-trigger.t diff --git a/tests/functional/modes/05-sim-trigger/flow.cylc b/tests/functional/run_modes/05-sim-trigger/flow.cylc similarity index 100% rename from tests/functional/modes/05-sim-trigger/flow.cylc rename to tests/functional/run_modes/05-sim-trigger/flow.cylc diff --git a/tests/functional/modes/05-sim-trigger/reference.log b/tests/functional/run_modes/05-sim-trigger/reference.log similarity index 100% rename from tests/functional/modes/05-sim-trigger/reference.log rename to tests/functional/run_modes/05-sim-trigger/reference.log diff --git a/tests/functional/run_modes/06-run-mode-overrides.t b/tests/functional/run_modes/06-run-mode-overrides.t new file mode 100644 index 00000000000..c7fc3325b8f --- /dev/null +++ b/tests/functional/run_modes/06-run-mode-overrides.t @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Testing Skip mode functionality. + +. "$(dirname "$0")/test_header" +set_test_number 6 + +# Install and run the workflow in live mode (default). +# Check that tasks with run mode unset and run mode = live +# leave log files, and that skip mode tasks don't. +TEST_NAME="${TEST_NAME_BASE}:live-workflow" +install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" +run_ok "${TEST_NAME}:validate" cylc validate "${WORKFLOW_NAME}" +workflow_run_ok "${TEST_NAME}:play" \ + cylc play "${WORKFLOW_NAME}" \ + --no-detach + +JOB_LOGS="${WORKFLOW_RUN_DIR}/log/job/1000" +run_fail "${TEST_NAME}:config run mode=skip" ls "${JOB_LOGS}/skip_" +for MODE in default live; do + named_grep_ok "${TEST_NAME}:config run mode=${MODE}" "===.*===" "${JOB_LOGS}/${MODE}_/NN/job.out" +done + +# After broadcasting a change in run_mode to task default_ it now runs +# in skip mode and fails to produce a log file: +JOB_LOGS="${WORKFLOW_RUN_DIR}/log/job/1001" +run_fail "${TEST_NAME}:broadcast run mode=skip" ls "${JOB_LOGS}/default_/" + +purge +exit 0 diff --git a/tests/functional/run_modes/06-run-mode-overrides/flow.cylc b/tests/functional/run_modes/06-run-mode-overrides/flow.cylc new file mode 100644 index 00000000000..6d1b1258833 --- /dev/null +++ b/tests/functional/run_modes/06-run-mode-overrides/flow.cylc @@ -0,0 +1,28 @@ +#!Jinja2 +[scheduler] + cycle point format = %Y + +[scheduling] + initial cycle point = 1000 + final cycle point = 1001 + [[graph]] + R1/1000 = default_ & live_ & skip_ => end + R1/1001 = end[-P1Y] => broadcaster => default_ + +[runtime] + [[root]] + script = echo "=== this task ran in live mode ===" + [[[simulation]]] + default run length = PT0S + [[default_, end]] + [[live_]] + run mode = live + [[skip_]] + run mode = skip + [[broadcaster]] + script = """ + cylc broadcast "${CYLC_WORKFLOW_ID}" \ + --name default_ \ + --point 1001 \ + --set='run mode="{{changemode | default("skip")}}"' + """ diff --git a/tests/functional/modes/test_header b/tests/functional/run_modes/test_header similarity index 100% rename from tests/functional/modes/test_header rename to tests/functional/run_modes/test_header diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index fe4b19ab92b..6300cefc74e 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -36,6 +36,7 @@ from cylc.flow.network.client import WorkflowRuntimeClient from cylc.flow.option_parsers import Options from cylc.flow.pathutil import get_cylc_run_dir +from cylc.flow.run_modes import RunMode from cylc.flow.rundb import CylcWorkflowDAO from cylc.flow.scripts.install import ( get_option_parser as install_gop, @@ -46,6 +47,10 @@ prereqs_and_outputs_query, ) from cylc.flow.scripts.validate import ValidateOptions +from cylc.flow.task_state import ( + TASK_STATUS_SUBMITTED, + TASK_STATUS_SUCCEEDED, +) from cylc.flow.util import serialise_set from cylc.flow.wallclock import get_current_time_string from cylc.flow.workflow_files import infer_latest_run_from_id @@ -721,3 +726,43 @@ async def _cylc_show(schd: 'Scheduler', *task_ids: str) -> dict: return json_filter return _cylc_show + + +@pytest.fixture +def capture_live_submissions(capcall, monkeypatch): + """Capture live submission attempts. + + This prevents real jobs from being submitted to the system. + + If you call this fixture from a test, it will return a set of tasks that + would have been submitted had this fixture not been used. + """ + def fake_submit(self, _workflow, itasks, *_): + self.submit_nonlive_task_jobs(_workflow, itasks, RunMode.SIMULATION) + for itask in itasks: + for status in (TASK_STATUS_SUBMITTED, TASK_STATUS_SUCCEEDED): + self.task_events_mgr.process_message( + itask, + 'INFO', + status, + '2000-01-01T00:00:00Z', + '(received)', + ) + return itasks + + # suppress and capture live submissions + submit_live_calls = capcall( + 'cylc.flow.task_job_mgr.TaskJobManager.submit_livelike_task_jobs', + fake_submit) + + + + def get_submissions(): + nonlocal submit_live_calls + return { + itask.identity + for ((_self, _workflow, itasks, *_), _kwargs) in submit_live_calls + for itask in itasks + } + + return get_submissions diff --git a/tests/integration/run_modes/test_mode_overrides.py b/tests/integration/run_modes/test_mode_overrides.py new file mode 100644 index 00000000000..25cdf6cf68a --- /dev/null +++ b/tests/integration/run_modes/test_mode_overrides.py @@ -0,0 +1,166 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +"""Test that using [runtime][TASK]run mode works in each mode. + +Point 3 of the Skip Mode proposal +https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md + +| The run mode should be controlled by a new task configuration +| [runtime][]run mode with the default being live. +| As a runtime configuration, this can be defined in the workflow +| for development / testing purposes or set by cylc broadcast. + +n.b: This is pretty much a functional test and +probably ought to be labelled as such, but uses the +integration test framework. +""" + +import pytest + +from cylc.flow.cycling.iso8601 import ISO8601Point +from cylc.flow.run_modes import WORKFLOW_RUN_MODES, RunMode +from cylc.flow.scheduler import Scheduler, SchedulerStop +from cylc.flow.task_state import TASK_STATUS_WAITING + + +@pytest.mark.parametrize('workflow_run_mode', sorted(WORKFLOW_RUN_MODES)) +async def test_run_mode_override_from_config( + capture_live_submissions, + flow, + scheduler, + run, + complete, + workflow_run_mode, + validate +): + """Test that `[runtime][]run mode` overrides workflow modes.""" + id_ = flow({ + 'scheduling': { + 'graph': { + 'R1': 'live & skip', + }, + }, + 'runtime': { + 'live': {'run mode': 'live'}, + 'skip': {'run mode': 'skip'}, + } + }) + run_mode = RunMode(workflow_run_mode) + validate(id_) + schd = scheduler(id_, run_mode=run_mode, paused_start=False) + async with run(schd): + await complete(schd) + + if workflow_run_mode == 'live': + assert capture_live_submissions() == {'1/live'} + elif workflow_run_mode == 'dummy': + # Skip mode doesn't override dummy mode: + assert capture_live_submissions() == {'1/live', '1/skip'} + else: + assert capture_live_submissions() == set() + + +async def test_force_trigger_does_not_override_run_mode( + flow, + scheduler, + start, +): + """Force-triggering a task will not override the run mode. + + Taken from spec at + https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md#proposal + """ + wid = flow({ + 'scheduling': {'graph': {'R1': 'foo'}}, + 'runtime': {'foo': {'run mode': 'skip'}} + }) + schd = scheduler(wid, run_mode="live") + async with start(schd): + foo = schd.pool.get_tasks()[0] + + # Force trigger task: + schd.pool.force_trigger_tasks('1/foo', [1]) + + # ... but job submission will always change this to the correct mode: + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + [foo], + schd.server.curve_auth, + schd.server.client_pub_key_dir) + + assert foo.run_mode.value == 'skip' + + +async def test_run_mode_skip_abides_by_held(flow, scheduler, run): + """Tasks with run mode = skip will continue to abide by the + is_held flag as normal. + + Taken from spec at + https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md#proposal + """ + wid = flow({ + 'scheduling': {'graph': {'R1': 'foo'}}, + 'runtime': {'foo': {'run mode': 'skip'}} + }) + schd: Scheduler = scheduler(wid, run_mode="live", paused_start=False) + async with run(schd): + foo = schd.pool.get_tasks()[0] + assert not foo.state.is_held + + # Hold task, check that it's held: + schd.pool.hold_tasks(['1/foo']) + assert foo.state.is_held + await schd._main_loop() + assert foo.state(TASK_STATUS_WAITING) + + schd.pool.release_held_tasks(['1/foo']) + assert not foo.state.is_held + with pytest.raises(SchedulerStop): + # Will shut down as foo has run + await schd._main_loop() + + +async def test_run_mode_override_from_broadcast( + flow, scheduler, start, complete, log_filter, capture_live_submissions +): + """Test that run_mode modifications only apply to one task. + """ + cfg = { + "scheduler": {"cycle point format": "%Y"}, + "scheduling": { + "initial cycle point": "1000", + "final cycle point": "1001", + "graph": {"P1Y": "foo"}}, + "runtime": { + } + } + id_ = flow(cfg) + schd = scheduler(id_, run_mode='live', paused_start=False) + + async with start(schd): + schd.broadcast_mgr.put_broadcast( + ['1000'], ['foo'], [{'run mode': 'skip'}]) + + foo_1000 = schd.pool.get_task(ISO8601Point('1000'), 'foo') + foo_1001 = schd.pool.get_task(ISO8601Point('1001'), 'foo') + + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + [foo_1000, foo_1001], + schd.server.curve_auth, + schd.server.client_pub_key_dir) + assert foo_1000.run_mode.value == 'skip' + assert capture_live_submissions() == {'1001/foo'} diff --git a/tests/integration/run_modes/test_nonlive.py b/tests/integration/run_modes/test_nonlive.py new file mode 100644 index 00000000000..90cefbf7701 --- /dev/null +++ b/tests/integration/run_modes/test_nonlive.py @@ -0,0 +1,178 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import pytest +from typing import Any, Dict + +from cylc.flow.cycling.integer import IntegerPoint +from cylc.flow.cycling.iso8601 import ISO8601Point + + +# Define here to ensure test doesn't just mirror code: +KGO = { + 'live': { + 'flow_nums': '[1]', + 'is_manual_submit': 0, + 'try_num': 1, + 'submit_status': 0, + 'run_signal': None, + 'run_status': 0, + 'platform_name': 'simulation', + 'job_runner_name': 'simulation', + 'job_id': None}, + 'skip': { + 'flow_nums': '[1]', + 'is_manual_submit': 0, + 'try_num': 1, + 'submit_status': 0, + 'run_signal': None, + 'run_status': 0, + 'platform_name': 'skip', + 'job_runner_name': 'skip', + 'job_id': None}, +} + + +def not_time(data: Dict[str, Any]): + """Filter out fields containing times to reduce risk of + flakiness""" + return {k: v for k, v in data.items() if 'time' not in k} + + +@pytest.fixture +def submit_and_check_db(): + """Wraps up testing that we want to do repeatedly in + test_db_task_jobs. + """ + def _inner(schd): + # Submit task jobs: + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + schd.pool.get_tasks(), + schd.server.curve_auth, + schd.server.client_pub_key_dir + ) + # Make sure that db changes are enacted: + schd.workflow_db_mgr.process_queued_ops() + + for mode, kgo in KGO.items(): + task_jobs = schd.workflow_db_mgr.pub_dao.select_task_job(1, mode) + + # Check all non-datetime items against KGO: + assert not_time(task_jobs) == kgo, ( + f'Mode {mode}: incorrect db entries.') + + # Check that timestamps have been created: + for timestamp in [ + 'time_submit', 'time_submit_exit', 'time_run', 'time_run_exit' + ]: + assert task_jobs[timestamp] is not None + return _inner + + +async def test_db_task_jobs( + flow, scheduler, start, capture_live_submissions, + submit_and_check_db +): + """Ensure that task job data is added to the database correctly + for each run mode. + """ + schd = scheduler(flow({ + 'scheduling': {'graph': { + 'R1': '&'.join(KGO)}}, + 'runtime': { + mode: {'run mode': mode} for mode in KGO} + })) + async with start(schd): + # Reference all task proxies so we can examine them + # at the end of the test: + itask_skip = schd.pool.get_task(IntegerPoint('1'), 'skip') + itask_live = schd.pool.get_task(IntegerPoint('1'), 'live') + + + submit_and_check_db(schd) + + # Set outputs to failed: + schd.pool.set_prereqs_and_outputs('*', ['failed'], [], []) + + submit_and_check_db(schd) + + assert itask_live.run_mode.value == 'simulation' + assert itask_skip.run_mode.value == 'skip' + + +async def test_db_task_states( + one_conf, flow, scheduler, start +): + """Test that tasks will have the same information entered into the task + state database whichever mode is used. + """ + conf = one_conf + conf['runtime'] = {'one': {'run mode': 'skip'}} + schd = scheduler(flow(conf)) + async with start(schd): + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + schd.pool.get_tasks(), + schd.server.curve_auth, + schd.server.client_pub_key_dir + ) + schd.workflow_db_mgr.process_queued_ops() + result = schd.workflow_db_mgr.pri_dao.connect().execute( + 'SELECT * FROM task_states').fetchone() + + # Submit number has been added to the table: + assert result[5] == 1 + # time_created added to the table + assert result[3] + + +async def test_mean_task_time( + flow, scheduler, start, complete, capture_live_submissions +): + """Non-live tasks are not added to the list of task times, + so skipping tasks will not affect how long Cylc expects tasks to run. + """ + schd = scheduler(flow({ + 'scheduling': { + 'initial cycle point': '1000', + 'final cycle point': '1002', + 'graph': {'P1Y': 'foo'}} + }), run_mode='live') + + async with start(schd): + itask = schd.pool.get_task(ISO8601Point('10000101T0000Z'), 'foo') + assert list(itask.tdef.elapsed_times) == [] + + # Make the task run in skip mode at one cycle: + schd.broadcast_mgr.put_broadcast( + ['1000'], ['foo'], [{'run mode': 'skip'}]) + + # Fake adding some other examples of the task: + itask.tdef.elapsed_times.extend([133.0, 132.4]) + + # Submit two tasks: + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + [itask], + schd.server.curve_auth, + schd.server.client_pub_key_dir + ) + + # Ensure that the skipped task has succeeded, and that the + # number of items in the elapsed_times has not changed. + assert itask.state.status == 'succeeded' + assert len(itask.tdef.elapsed_times) == 2 diff --git a/tests/integration/run_modes/test_simulation.py b/tests/integration/run_modes/test_simulation.py index 4d1cd0b7ed9..72cbd7e10f1 100644 --- a/tests/integration/run_modes/test_simulation.py +++ b/tests/integration/run_modes/test_simulation.py @@ -1,6 +1,6 @@ # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. # Copyright (C) NIWA & British Crown (Met Office) & Contributors. - +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or @@ -16,6 +16,15 @@ """Test the workings of simulation mode""" +from pathlib import Path +import pytest +from pytest import param + +from cylc.flow import commands +from cylc.flow.cycling.iso8601 import ISO8601Point +from cylc.flow.run_modes import RunMode +from cylc.flow.run_modes.simulation import sim_time_check + async def test_started_trigger(flow, reftest, scheduler): """Does the started task output trigger downstream tasks @@ -32,3 +41,436 @@ async def test_started_trigger(flow, reftest, scheduler): ('1/a', None), ('1/b', ('1/a',)) } + + +@pytest.fixture +def monkeytime(monkeypatch): + """Convenience function monkeypatching time.""" + def _inner(time_: int): + monkeypatch.setattr('cylc.flow.task_job_mgr.time', lambda: time_) + monkeypatch.setattr( + 'cylc.flow.run_modes.simulation.time', lambda: time_) + return _inner + + +@pytest.fixture +def run_simjob(monkeytime): + """Run a simulated job to completion. + + Returns the output status. + """ + def _run_simjob(schd, point, task): + itask = schd.pool.get_task(point, task) + itask.state.is_queued = False + monkeytime(0) + schd.task_job_mgr.submit_nonlive_task_jobs( + schd.workflow, [itask], RunMode.SIMULATION) + monkeytime(itask.mode_settings.timeout + 1) + + # Run Time Check + assert sim_time_check( + schd.task_events_mgr, [itask], + schd.workflow_db_mgr + ) is True + + # Capture result process queue. + return itask + return _run_simjob + + +@pytest.fixture(scope='module') +async def sim_time_check_setup( + mod_flow, mod_scheduler, mod_start, mod_one_conf, +): + schd = mod_scheduler(mod_flow({ + 'scheduler': {'cycle point format': '%Y'}, + 'scheduling': { + 'initial cycle point': '1066', + 'graph': { + 'R1': 'one & fail_all & fast_forward', + 'P1Y': 'fail_once & fail_all_submits' + } + }, + 'runtime': { + 'one': {}, + 'fail_all': { + 'simulation': { + 'fail cycle points': 'all', + 'fail try 1 only': False + }, + 'outputs': {'foo': 'bar'} + }, + # This task ought not be finished quickly, but for the speed up + 'fast_forward': { + 'execution time limit': 'PT1M', + 'simulation': {'speedup factor': 2} + }, + 'fail_once': { + 'simulation': { + 'fail cycle points': '1066, 1068', + } + }, + 'fail_all_submits': { + 'simulation': { + 'fail cycle points': '1066', + 'fail try 1 only': False, + } + } + } + })) + async with mod_start(schd): + itasks = schd.pool.get_tasks() + [schd.task_job_mgr._set_retry_timers(i) for i in itasks] + yield schd, itasks + + +def test_false_if_not_running( + sim_time_check_setup, monkeypatch +): + schd, itasks = sim_time_check_setup + + itasks = [i for i in itasks if i.state.status != 'running'] + + # False if task status not running: + assert sim_time_check(schd.task_events_mgr, itasks, '') is False + + +@pytest.mark.parametrize( + 'itask, point, results', + ( + # Task fails this CP, first submit. + param( + 'fail_once', '1066', (True, False, False), + id='only-fail-on-submit-1'), + # Task succeeds this CP, all submits. + param( + 'fail_once', '1067', (False, False, False), + id='do-not-fail-this-cp'), + # Task fails this CP, first submit. + param( + 'fail_once', '1068', (True, False, False), + id='and-another-cp'), + # Task fails this CP, all submits. + param( + 'fail_all_submits', '1066', (True, True, True), + id='fail-all-submits'), + # Task succeeds this CP, all submits. + param( + 'fail_all_submits', '1067', (False, False, False), + id='fail-no-submits'), + ) +) +def test_fail_once(sim_time_check_setup, itask, point, results, monkeypatch): + """A task with a fail cycle point only fails + at that cycle point, and then only on the first submission. + """ + schd, _ = sim_time_check_setup + + itask = schd.pool.get_task( + ISO8601Point(point), itask) + + for i, result in enumerate(results): + itask.try_timers['execution-retry'].num = i + schd.task_job_mgr.submit_nonlive_task_jobs( + schd.workflow, [itask], RunMode.SIMULATION) + assert itask.mode_settings.sim_task_fails is result + + +def test_task_finishes(sim_time_check_setup, monkeytime, caplog): + """...and an appropriate message sent. + + Checks that failed and bar are output if a task is set to fail. + + Does NOT check every possible cause of an outcome - this is done + in unit tests. + """ + schd, _ = sim_time_check_setup + monkeytime(0) + + # Setup a task to fail, submit it. + fail_all_1066 = schd.pool.get_task(ISO8601Point('1066'), 'fail_all') + fail_all_1066.state.status = 'running' + fail_all_1066.state.is_queued = False + schd.task_job_mgr.submit_nonlive_task_jobs( + schd.workflow, [fail_all_1066], RunMode.SIMULATION) + + # For the purpose of the test delete the started time set by + # submit_nonlive_task_jobs. + fail_all_1066.summary['started_time'] = 0 + + # Before simulation time is up: + assert sim_time_check(schd.task_events_mgr, [fail_all_1066], '') is False + + # Time's up... + monkeytime(12) + + # After simulation time is up it Fails and records custom outputs: + assert sim_time_check(schd.task_events_mgr, [fail_all_1066], '') is True + outputs = fail_all_1066.state.outputs + assert outputs.is_message_complete('succeeded') is False + assert outputs.is_message_complete('bar') is True + assert outputs.is_message_complete('failed') is True + + +def test_task_sped_up(sim_time_check_setup, monkeytime): + """Task will speed up by a factor set in config.""" + + schd, _ = sim_time_check_setup + fast_forward_1066 = schd.pool.get_task( + ISO8601Point('1066'), 'fast_forward') + + # Run the job submission method: + monkeytime(0) + schd.task_job_mgr.submit_nonlive_task_jobs( + schd.workflow, [fast_forward_1066], RunMode.SIMULATION) + fast_forward_1066.state.is_queued = False + + result = sim_time_check(schd.task_events_mgr, [fast_forward_1066], '') + assert result is False + monkeytime(29) + result = sim_time_check(schd.task_events_mgr, [fast_forward_1066], '') + assert result is False + monkeytime(31) + result = sim_time_check(schd.task_events_mgr, [fast_forward_1066], '') + assert result is True + + +async def test_settings_restart(monkeytime, flow, scheduler, start): + """Check that simulation mode settings are correctly restored + upon restart. + + In the case of start time this is collected from the database + from task_jobs.start_time. + + tasks: + one: Runs straighforwardly. + two: Test case where database is missing started_time + because it was upgraded from an earlier version of Cylc. + """ + id_ = flow({ + 'scheduler': {'cycle point format': '%Y'}, + 'scheduling': { + 'initial cycle point': '1066', + 'graph': { + 'R1': 'one & two' + } + }, + 'runtime': { + 'root': { + 'execution time limit': 'PT1M', + 'execution retry delays': 'P0Y', + 'simulation': { + 'speedup factor': 1, + 'fail cycle points': 'all', + 'fail try 1 only': True, + } + }, + } + }) + schd = scheduler(id_) + # Start the workflow: + async with start(schd): + og_timeouts = {} + for itask in schd.pool.get_tasks(): + schd.task_job_mgr.submit_nonlive_task_jobs( + schd.workflow, [itask], RunMode.SIMULATION) + + og_timeouts[itask.identity] = itask.mode_settings.timeout + + # Mock wallclock < sim end timeout + monkeytime(itask.mode_settings.timeout - 1) + assert sim_time_check( + schd.task_events_mgr, [itask], schd.workflow_db_mgr + ) is False + + # Stop and restart the scheduler: + schd = scheduler(id_) + async with start(schd) as log: + for itask in schd.pool.get_tasks(): + # Check that we haven't got mode settings back: + assert itask.mode_settings is None + + if itask.identity == '1066/two': + # Delete the database entry for `two`: Ensure that + # we don't break sim mode on upgrade to this version of Cylc. + schd.workflow_db_mgr.pri_dao.connect().execute( + 'UPDATE task_jobs' + '\n SET time_submit = NULL' + '\n WHERE (name == \'two\')' + ) + schd.workflow_db_mgr.process_queued_ops() + monkeytime(42) + expected_timeout = 102.0 + else: + monkeytime(og_timeouts[itask.identity] - 1) + expected_timeout = float(int(og_timeouts[itask.identity])) + + assert sim_time_check( + schd.task_events_mgr, [itask], schd.workflow_db_mgr + ) is False + + # Check that the itask.mode_settings is now re-created + + assert itask.mode_settings.simulated_run_length == 60.0 + assert itask.mode_settings.sim_task_fails is True + + +async def test_settings_reload( + flow, scheduler, start, run_simjob +): + """Check that simulation mode settings are changed for future + pseudo jobs on reload. + + """ + id_ = flow({ + 'scheduler': {'cycle point format': '%Y'}, + 'scheduling': { + 'initial cycle point': '1066', + 'graph': {'R1': 'one'} + }, + 'runtime': { + 'one': { + 'execution time limit': 'PT1M', + 'execution retry delays': 'P0Y', + 'simulation': { + 'speedup factor': 1, + 'fail cycle points': 'all', + 'fail try 1 only': False, + } + }, + } + }) + schd = scheduler(id_) + async with start(schd): + # Submit first psuedo-job and "run" to failure: + one_1066 = schd.pool.get_task(ISO8601Point('1066'), 'one') + + itask = run_simjob(schd, one_1066.point, 'one') + assert itask.state.outputs.is_message_complete('failed') is False + + # Modify config as if reinstall had taken place: + conf_file = Path(schd.workflow_run_dir) / 'flow.cylc' + conf_file.write_text( + conf_file.read_text().replace('False', 'True')) + + # Reload Workflow: + await commands.run_cmd(commands.reload_workflow(schd)) + + # Submit second psuedo-job and "run" to success: + itask = run_simjob(schd, one_1066.point, 'one') + assert itask.state.outputs.is_message_complete('succeeded') is True + + +async def test_settings_broadcast( + flow, scheduler, start, monkeytime +): + """Assert that broadcasting a change in the settings for a task + affects subsequent psuedo-submissions. + """ + id_ = flow({ + 'scheduler': {'cycle point format': '%Y'}, + 'scheduling': { + 'initial cycle point': '1066', + 'graph': {'R1': 'one'} + }, + 'runtime': { + 'one': { + 'execution time limit': 'PT1S', + 'execution retry delays': '2*PT5S', + 'simulation': { + 'speedup factor': 1, + 'fail cycle points': '1066', + 'fail try 1 only': False + } + }, + } + }, defaults=False) + schd = scheduler(id_, paused_start=False, run_mode='simulation') + async with start(schd) as log: + itask = schd.pool.get_task(ISO8601Point('1066'), 'one') + itask.state.is_queued = False + + # Submit the first - the sim task will fail: + schd.task_job_mgr.submit_nonlive_task_jobs( + schd.workflow, [itask], RunMode.SIMULATION) + assert itask.mode_settings.sim_task_fails is True + + # Let task finish. + monkeytime(itask.mode_settings.timeout + 1) + assert sim_time_check( + schd.task_events_mgr, [itask], + schd.workflow_db_mgr + ) is True + + # The mode_settings object has been cleared: + assert itask.mode_settings is None + # Change a setting using broadcast: + schd.broadcast_mgr.put_broadcast( + ['1066'], ['one'], [{ + 'simulation': {'fail cycle points': ''} + }]) + # Submit again - result is different: + schd.task_job_mgr.submit_nonlive_task_jobs( + schd.workflow, [itask], RunMode.SIMULATION) + assert itask.mode_settings.sim_task_fails is False + + # Assert Clearing the broadcast works + schd.broadcast_mgr.clear_broadcast() + schd.task_job_mgr.submit_nonlive_task_jobs( + schd.workflow, [itask], RunMode.SIMULATION) + assert itask.mode_settings.sim_task_fails is True + + # Assert that list of broadcasts doesn't change if we submit + # Invalid fail cycle points to broadcast. + itask.mode_settings = None + schd.broadcast_mgr.put_broadcast( + ['1066'], ['one'], [{ + 'simulation': {'fail cycle points': 'higadfuhasgiurguj'} + }]) + schd.task_job_mgr.submit_nonlive_task_jobs( + schd.workflow, [itask], RunMode.SIMULATION) + assert ( + 'Invalid ISO 8601 date representation: higadfuhasgiurguj' + in log.messages[-1]) + + # Check that the invalid broadcast hasn't + # changed the itask sim mode settings: + assert itask.mode_settings.sim_task_fails is True + + schd.broadcast_mgr.put_broadcast( + ['1066'], ['one'], [{ + 'simulation': {'fail cycle points': '1'} + }]) + schd.task_job_mgr.submit_nonlive_task_jobs( + schd.workflow, [itask], RunMode.SIMULATION) + assert ( + 'Invalid ISO 8601 date representation: 1' + in log.messages[-1]) + + # Broadcast tasks will reparse correctly: + schd.broadcast_mgr.put_broadcast( + ['1066'], ['one'], [{ + 'simulation': {'fail cycle points': '1945, 1977, 1066'}, + 'execution retry delays': '3*PT2S' + }]) + schd.task_job_mgr.submit_nonlive_task_jobs( + schd.workflow, [itask], RunMode.SIMULATION) + assert itask.mode_settings.sim_task_fails is True + assert itask.try_timers['execution-retry'].delays == [2.0, 2.0, 2.0] + # n.b. rtconfig should remain unchanged, lest we cancel broadcasts: + assert itask.tdef.rtconfig['execution retry delays'] == [5.0, 5.0] + + +async def test_db_submit_num( + flow, one_conf, scheduler, run, complete, db_select +): + """Test simulation mode correctly increments the submit_num in the DB.""" + one_conf['runtime'] = { + 'one': {'simulation': {'default run length': 'PT0S'}} + } + schd = scheduler(flow(one_conf), paused_start=False) + async with run(schd): + await complete(schd, '1/one', timeout=10) + assert db_select(schd, False, 'task_states', 'submit_num', 'status') == [ + (1, 'succeeded'), + ] diff --git a/tests/integration/run_modes/test_skip.py b/tests/integration/run_modes/test_skip.py new file mode 100644 index 00000000000..fb58a82d427 --- /dev/null +++ b/tests/integration/run_modes/test_skip.py @@ -0,0 +1,259 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +"""Test for skip mode integration. +""" + +from cylc.flow.cycling.integer import IntegerPoint + + +async def test_settings_override_from_broadcast( + flow, scheduler, start, complete, log_filter +): + """Test that skip mode runs differently if settings are modified. + """ + cfg = { + "scheduling": {"graph": {"R1": "foo:failed => bar"}}, + "runtime": { + "foo": { + "events": { + 'handler events': 'failed', + "handlers": 'echo "HELLO"' + } + } + } + } + id_ = flow(cfg) + schd = scheduler(id_, run_mode='live') + + async with start(schd): + schd.broadcast_mgr.put_broadcast( + ['1'], + ['foo'], + [ + {'run mode': 'skip'}, + {'skip': {'outputs': 'failed'}}, + {'skip': {'disable task event handlers': "False"}} + ] + ) + + foo, = schd.pool.get_tasks() + + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + schd.pool.get_tasks(), + schd.server.curve_auth, + schd.server.client_pub_key_dir + ) + # Run mode has changed: + assert foo.platform['name'] == 'skip' + # Output failed emitted: + assert foo.state.status == 'failed' + # After processing events there is a handler in the subprocpool: + schd.task_events_mgr.process_events(schd) + assert 'echo "HELLO"' in schd.proc_pool.is_not_done()[0][0].cmd + + +async def test_broadcast_changes_set_skip_outputs( + flow, scheduler, start, complete, log_filter +): + """When cylc set --out skip is used, task outputs are updated with + broadcasts. + + Skip mode proposal point 4 + https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md + + | The cylc set --out option should accept the skip value which should + | set the outputs defined in [runtime][][skip]outputs. + | The skip keyword should not be allowed in custom outputs. + """ + wid = flow({ + 'scheduling': {'graph': {'R1': 'foo:x?\nfoo:y?'}}, + 'runtime': {'foo': {'outputs': { + 'x': 'some message', 'y': 'another message'}}} + }) + schd = scheduler(wid, run_mode='live') + async with start(schd): + schd.broadcast_mgr.put_broadcast( + ['1'], + ['foo'], + [{'skip': {'outputs': 'x'}}], + ) + foo, = schd.pool.get_tasks() + schd.pool.set_prereqs_and_outputs( + '1/foo', ['skip'], [], ['all']) + + foo_outputs = foo.state.outputs.get_completed_outputs() + + assert foo_outputs == { + 'submitted': '(manually completed)', + 'started': '(manually completed)', + 'succeeded': '(manually completed)', + 'x': '(manually completed)'} + + +async def test_skip_mode_outputs( + flow, scheduler, reftest, +): + """Skip mode can be configured by the `[runtime][][skip]` + section. + + Skip mode proposal point 2 + https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md + """ + graph = """ + # By default, all required outputs will be generated + # plus succeeded if success is optional: + foo? & foo:required_out => success_if_optional & required_outs + + # The outputs submitted and started are always produced + # and do not need to be defined in outputs: + foo:submitted => submitted_always + foo:started => started_always + + # If outputs is specified and does not include either + # succeeded or failed then succeeded will be produced. + opt:optional_out? => optional_outs_produced + + should_fail:fail => did_fail + """ + wid = flow({ + 'scheduling': {'graph': {'R1': graph}}, + 'runtime': { + 'root': { + 'run mode': 'skip', + 'outputs': { + 'required_out': 'the plans have been on display...', + 'optional_out': 'its only four light years away...' + } + }, + 'opt': { + 'skip': { + 'outputs': 'optional_out' + } + }, + 'should_fail': { + 'skip': { + 'outputs': 'failed' + } + } + } + }) + schd = scheduler(wid, run_mode='live', paused_start=False) + assert await reftest(schd) == { + ('1/did_fail', ('1/should_fail',),), + ('1/foo', None,), + ('1/opt', None,), + ('1/optional_outs_produced', ('1/opt',),), + ('1/required_outs', ('1/foo', '1/foo',),), + ('1/should_fail', None,), + ('1/started_always', ('1/foo',),), + ('1/submitted_always', ('1/foo',),), + ('1/success_if_optional', ('1/foo', '1/foo',),), + } + + +async def test_doesnt_release_held_tasks( + one_conf, flow, scheduler, start, log_filter, capture_live_submissions +): + """Point 5 of the proposal + https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md + + | Tasks with run mode = skip will continue to abide by the is_held + | flag as normal. + + """ + one_conf['runtime'] = {'one': {'run mode': 'skip'}} + schd = scheduler(flow(one_conf), run_mode='live', paused_start=False) + async with start(schd): + itask, = schd.pool.get_tasks() + msg = 'held tasks shoudn\'t {}' + + # Set task to held and check submission in skip mode doesn't happen: + itask.state.is_held = True + + # Relinquish contol to the main loop. + schd.release_queued_tasks() + + assert not log_filter(contains='=> running'), msg.format('run') + assert not log_filter(contains='=> succeeded'), msg.format('succeed') + + # Release held task and assert that it now skips successfully: + schd.pool.release_held_tasks(['1/one']) + schd.release_queued_tasks() + + assert log_filter(contains='=> running'), msg.format('run') + assert log_filter(contains='=> succeeded'), msg.format('succeed') + + +async def test_prereqs_marked_satisfied_by_skip_mode( + flow, scheduler, start, log_filter, complete +): + """Point 8 from the skip mode proposal + https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md + + | When tasks are run in skip mode, the prerequisites which correspond + | to the outputs they generate should be marked as "satisfied by skip mode" + | rather than "satisfied naturally" for provenance reasons. + """ + schd = scheduler(flow({ + 'scheduling': {'graph': {'R1': 'foo => bar'}}, + 'runtime': {'foo': {'run mode': 'skip'}} + }), run_mode='live') + + async with start(schd): + foo = schd.pool.get_task(IntegerPoint(1), 'foo') + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + [foo], + schd.server.curve_auth, + schd.server.client_pub_key_dir, + run_mode=schd.get_run_mode() + ) + bar = schd.pool.get_task(IntegerPoint(1), 'bar') + satisfied_message, = bar.state.prerequisites[0]._satisfied.values() + assert satisfied_message == 'satisfied by skip mode' + + +async def test_outputs_can_be_changed(one_conf, flow, start, scheduler, validate): + + schd = scheduler(flow(one_conf), run_mode='live') + async with start(schd) as log: + # Broadcast the task into skip mode, output failed and submit it: + schd.broadcast_mgr.put_broadcast( + ["1"], + ["one"], + [ + {"run mode": "skip"}, + {"skip": {"outputs": "failed"}}, + ], + ) + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + schd.pool.get_tasks(), + None, + None + ) + + # Broadcast the task into skip mode, output succeeded and submit it: + schd.broadcast_mgr.put_broadcast( + ['1'], ['one'], [{'skip': {'outputs': 'succeeded'}}] + ) + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + schd.pool.get_tasks(), + None, + None + ) diff --git a/tests/integration/scripts/test_validate_integration.py b/tests/integration/scripts/test_validate_integration.py index c53ac97e0f1..4adabf7995f 100644 --- a/tests/integration/scripts/test_validate_integration.py +++ b/tests/integration/scripts/test_validate_integration.py @@ -161,7 +161,7 @@ def test_pre_cylc8(flow, validate, caplog): assert warning in caplog.messages -def test_graph_upgrade_msg_default(flow, validate, caplog): +def test_graph_upgrade_msg_default(flow, validate, caplog, log_filter): """It lists Cycling definitions which need upgrading.""" id_ = flow({ 'scheduler': {'allow implicit tasks': True}, @@ -174,11 +174,11 @@ def test_graph_upgrade_msg_default(flow, validate, caplog): }, }) validate(id_) - assert '[scheduling][dependencies][X]graph' in caplog.messages[0] - assert 'for X in:\n P1Y, R1' in caplog.messages[0] + assert log_filter(contains='[scheduling][dependencies][X]graph') + assert log_filter(contains='for X in:\n P1Y, R1') -def test_graph_upgrade_msg_graph_equals(flow, validate, caplog): +def test_graph_upgrade_msg_graph_equals(flow, validate, caplog, log_filter): """It gives a more useful message in special case where graph is key rather than section: @@ -191,11 +191,12 @@ def test_graph_upgrade_msg_graph_equals(flow, validate, caplog): 'scheduling': {'dependencies': {'graph': 'foo => bar'}}, }) validate(id_) - expect = ('[scheduling][dependencies]graph -> [scheduling][graph]R1') - assert expect in caplog.messages[0] + assert log_filter( + contains='[scheduling][dependencies]graph -> [scheduling][graph]R1' + ) -def test_graph_upgrade_msg_graph_equals2(flow, validate, caplog): +def test_graph_upgrade_msg_graph_equals2(flow, validate, caplog, log_filter): """Both an implicit R1 and explict reccurance exist: It appends a note. """ @@ -215,7 +216,7 @@ def test_graph_upgrade_msg_graph_equals2(flow, validate, caplog): '\n P1Y, graph' '\n ([scheduling][dependencies]graph moves to [scheduling][graph]R1)' ) - assert expect in caplog.messages[0] + assert log_filter(contains=expect) def test_undefined_parent(flow, validate): diff --git a/tests/integration/test_config.py b/tests/integration/test_config.py index 5ba56327b11..660d03eb290 100644 --- a/tests/integration/test_config.py +++ b/tests/integration/test_config.py @@ -17,6 +17,7 @@ import logging from pathlib import Path import sqlite3 +from textwrap import dedent from typing import Any import pytest @@ -274,7 +275,7 @@ def test_parse_special_tasks_families(flow, scheduler, validate, section): } -def test_queue_treated_as_implicit(flow, validate, caplog): +def test_queue_treated_as_implicit(flow, validate, caplog, log_filter): """Tasks in queues but not in runtime generate a warning. https://github.com/cylc/cylc-flow/issues/5260 @@ -289,10 +290,7 @@ def test_queue_treated_as_implicit(flow, validate, caplog): } ) validate(id_) - assert ( - 'Queues contain tasks not defined in runtime' - in caplog.records[0].message - ) + assert log_filter(contains='Queues contain tasks not defined in runtime') def test_queue_treated_as_comma_separated(flow, validate): @@ -596,19 +594,60 @@ def _inner(*args, **kwargs): assert get_platforms(glbl_cfg()) == {'localhost', 'foo', 'bar'} -def test_validate_run_mode(flow: Fixture, validate: Fixture): +def test_nonlive_mode_validation(flow, validate, caplog, log_filter): + """Nonlive tasks return a warning at validation. + """ + msg1 = dedent('The following tasks are set to run in skip mode:\n * skip') + + wid = flow({ + 'scheduling': { + 'graph': { + 'R1': 'live => skip => simulation => dummy => default' + } + }, + 'runtime': { + 'default': {}, + 'live': {'run mode': 'live'}, + 'skip': { + 'run mode': 'skip', + 'skip': {'outputs': 'started, submitted'} + }, + }, + }) + + validate(wid) + assert log_filter(contains=msg1) + + +def test_skip_forbidden_as_output(flow, validate): + """Run mode names are forbidden as task output names.""" + wid = flow({ + 'scheduling': {'graph': {'R1': 'task'}}, + 'runtime': {'task': {'outputs': {'skip': 'message for skip'}}} + }) + with pytest.raises( + WorkflowConfigError, match='Invalid task output .* cannot be: `skip`' + ): + validate(wid) + + +def test_validate_workflow_run_mode(flow: Fixture, validate: Fixture, caplog: Fixture): """Test that Cylc validate will only check simulation mode settings if validate --mode simulation or dummy. - Discovered in: https://github.com/cylc/cylc-flow/pull/6213#issuecomment-2225365825 """ - wid = flow({ - 'scheduling': {'graph': {'R1': 'mytask'}}, - 'runtime': {'mytask': {'simulation': {'fail cycle points': 'alll'}}} - }) + wid = flow( + { + 'scheduling': {'graph': {'R1': 'mytask'}}, + 'runtime': { + 'mytask': { + 'simulation': {'fail cycle points': 'invalid'}, + } + }, + } + ) - # It's fine with run mode live validate(wid) # It fails with run mode simulation: diff --git a/tests/integration/test_dbstatecheck.py b/tests/integration/test_dbstatecheck.py index 94de81fbef0..16f4a7bb460 100644 --- a/tests/integration/test_dbstatecheck.py +++ b/tests/integration/test_dbstatecheck.py @@ -79,27 +79,27 @@ def test_basic(checker): ['output', '10010101T0000Z', 'succeeded'], ['good', '10000101T0000Z', 'waiting', '(flows=2)'], ['good', '10010101T0000Z', 'waiting', '(flows=2)'], ] - assert result == expect + assert sorted(result) == sorted(expect) def test_task(checker): """Filter by task name""" result = checker.workflow_state_query(task='bad') - assert result == [ + assert sorted(result) == ([ ['bad', '10000101T0000Z', 'failed'], ['bad', '10010101T0000Z', 'succeeded'] - ] + ]) def test_point(checker): """Filter by point""" result = checker.workflow_state_query(cycle='10000101T0000Z') - assert result == [ + assert sorted(result) == sorted([ ['bad', '10000101T0000Z', 'failed'], ['good', '10000101T0000Z', 'succeeded'], ['output', '10000101T0000Z', 'succeeded'], ['good', '10000101T0000Z', 'waiting', '(flows=2)'], - ] + ]) def test_status(checker): diff --git a/tests/integration/test_mode_on_restart.py b/tests/integration/test_mode_on_restart.py index 80628933918..27ada98455b 100644 --- a/tests/integration/test_mode_on_restart.py +++ b/tests/integration/test_mode_on_restart.py @@ -20,6 +20,7 @@ from cylc.flow.exceptions import InputError from cylc.flow.scheduler import Scheduler +from cylc.flow.run_modes import RunMode MODES = [('live'), ('simulation'), ('dummy')] @@ -42,7 +43,7 @@ async def test_restart_mode( async with start(schd): if not mode_before: mode_before = 'live' - assert schd.get_run_mode() == mode_before + assert schd.get_run_mode().value == mode_before schd = scheduler(id_, run_mode=mode_after) @@ -52,10 +53,10 @@ async def test_restart_mode( ): # Restarting in the same mode is fine. async with run(schd): - assert schd.get_run_mode() == mode_before + assert schd.get_run_mode().value == mode_before else: # Restarting in a new mode is not: - errormsg = f'^This.*{mode_before} mode: Will.*{mode_after} mode.$' + errormsg = f'^This.*{mode_before} mode: You.*{mode_after} mode.$' with pytest.raises(InputError, match=errormsg): async with run(schd): pass diff --git a/tests/integration/test_simulation.py b/tests/integration/test_simulation.py deleted file mode 100644 index b50acbb084d..00000000000 --- a/tests/integration/test_simulation.py +++ /dev/null @@ -1,459 +0,0 @@ -# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. -# Copyright (C) NIWA & British Crown (Met Office) & Contributors. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from pathlib import Path - -import pytest -from pytest import param - -from cylc.flow import commands -from cylc.flow.cycling.iso8601 import ISO8601Point -from cylc.flow.scheduler import Scheduler -from cylc.flow.simulation import sim_time_check - - -@pytest.fixture -def monkeytime(monkeypatch): - """Convenience function monkeypatching time.""" - def _inner(time_: int): - monkeypatch.setattr('cylc.flow.task_job_mgr.time', lambda: time_) - monkeypatch.setattr('cylc.flow.simulation.time', lambda: time_) - return _inner - - -@pytest.fixture -def run_simjob(monkeytime): - """Run a simulated job to completion. - - Returns the output status. - """ - def _run_simjob(schd, point, task): - itask = schd.pool.get_task(point, task) - itask.state.is_queued = False - monkeytime(0) - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - monkeytime(itask.mode_settings.timeout + 1) - - # Run Time Check - assert sim_time_check( - schd.task_events_mgr, [itask], - schd.workflow_db_mgr - ) is True - - # Capture result process queue. - return itask - return _run_simjob - - -@pytest.fixture(scope='module') -async def sim_time_check_setup( - mod_flow, mod_scheduler, mod_start, mod_one_conf, -): - schd = mod_scheduler(mod_flow({ - 'scheduler': {'cycle point format': '%Y'}, - 'scheduling': { - 'initial cycle point': '1066', - 'graph': { - 'R1': 'one & fail_all & fast_forward', - 'P1Y': 'fail_once & fail_all_submits' - } - }, - 'runtime': { - 'one': {}, - 'fail_all': { - 'simulation': { - 'fail cycle points': 'all', - 'fail try 1 only': False - }, - 'outputs': {'foo': 'bar'} - }, - # This task ought not be finished quickly, but for the speed up - 'fast_forward': { - 'execution time limit': 'PT1M', - 'simulation': {'speedup factor': 2} - }, - 'fail_once': { - 'simulation': { - 'fail cycle points': '1066, 1068', - } - }, - 'fail_all_submits': { - 'simulation': { - 'fail cycle points': '1066', - 'fail try 1 only': False, - } - } - } - })) - async with mod_start(schd): - itasks = schd.pool.get_tasks() - [schd.task_job_mgr._set_retry_timers(i) for i in itasks] - yield schd, itasks - - -def test_false_if_not_running( - sim_time_check_setup, monkeypatch -): - schd, itasks = sim_time_check_setup - - itasks = [i for i in itasks if i.state.status != 'running'] - - # False if task status not running: - assert sim_time_check(schd.task_events_mgr, itasks, '') is False - - -@pytest.mark.parametrize( - 'itask, point, results', - ( - # Task fails this CP, first submit. - param( - 'fail_once', '1066', (True, False, False), - id='only-fail-on-submit-1'), - # Task succeeds this CP, all submits. - param( - 'fail_once', '1067', (False, False, False), - id='do-not-fail-this-cp'), - # Task fails this CP, first submit. - param( - 'fail_once', '1068', (True, False, False), - id='and-another-cp'), - # Task fails this CP, all submits. - param( - 'fail_all_submits', '1066', (True, True, True), - id='fail-all-submits'), - # Task succeeds this CP, all submits. - param( - 'fail_all_submits', '1067', (False, False, False), - id='fail-no-submits'), - ) -) -def test_fail_once(sim_time_check_setup, itask, point, results, monkeypatch): - """A task with a fail cycle point only fails - at that cycle point, and then only on the first submission. - """ - schd, _ = sim_time_check_setup - - itask = schd.pool.get_task( - ISO8601Point(point), itask) - - for i, result in enumerate(results): - itask.try_timers['execution-retry'].num = i - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - assert itask.mode_settings.sim_task_fails is result - - -def test_task_finishes(sim_time_check_setup, monkeytime, caplog): - """...and an appropriate message sent. - - Checks that failed and bar are output if a task is set to fail. - - Does NOT check every possible cause of an outcome - this is done - in unit tests. - """ - schd, _ = sim_time_check_setup - monkeytime(0) - - # Setup a task to fail, submit it. - fail_all_1066 = schd.pool.get_task(ISO8601Point('1066'), 'fail_all') - fail_all_1066.state.status = 'running' - fail_all_1066.state.is_queued = False - schd.task_job_mgr._simulation_submit_task_jobs( - [fail_all_1066], schd.workflow) - - # For the purpose of the test delete the started time set by - # _simulation_submit_task_jobs. - fail_all_1066.summary['started_time'] = 0 - - # Before simulation time is up: - assert sim_time_check(schd.task_events_mgr, [fail_all_1066], '') is False - - # Time's up... - monkeytime(12) - - # After simulation time is up it Fails and records custom outputs: - assert sim_time_check(schd.task_events_mgr, [fail_all_1066], '') is True - outputs = fail_all_1066.state.outputs - assert outputs.is_message_complete('succeeded') is False - assert outputs.is_message_complete('bar') is True - assert outputs.is_message_complete('failed') is True - - -def test_task_sped_up(sim_time_check_setup, monkeytime): - """Task will speed up by a factor set in config.""" - - schd, _ = sim_time_check_setup - fast_forward_1066 = schd.pool.get_task( - ISO8601Point('1066'), 'fast_forward') - - # Run the job submission method: - monkeytime(0) - schd.task_job_mgr._simulation_submit_task_jobs( - [fast_forward_1066], schd.workflow) - fast_forward_1066.state.is_queued = False - - result = sim_time_check(schd.task_events_mgr, [fast_forward_1066], '') - assert result is False - monkeytime(29) - result = sim_time_check(schd.task_events_mgr, [fast_forward_1066], '') - assert result is False - monkeytime(31) - result = sim_time_check(schd.task_events_mgr, [fast_forward_1066], '') - assert result is True - - -async def test_settings_restart( - monkeytime, flow, scheduler, start -): - """Check that simulation mode settings are correctly restored - upon restart. - - In the case of start time this is collected from the database - from task_jobs.start_time. - - tasks: - one: Runs straighforwardly. - two: Test case where database is missing started_time - because it was upgraded from an earlier version of Cylc. - """ - id_ = flow({ - 'scheduler': {'cycle point format': '%Y'}, - 'scheduling': { - 'initial cycle point': '1066', - 'graph': { - 'R1': 'one & two' - } - }, - 'runtime': { - 'root': { - 'execution time limit': 'PT1M', - 'execution retry delays': 'P0Y', - 'simulation': { - 'speedup factor': 1, - 'fail cycle points': 'all', - 'fail try 1 only': True, - } - }, - } - }) - schd = scheduler(id_) - - # Start the workflow: - async with start(schd): - og_timeouts = {} - for itask in schd.pool.get_tasks(): - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - - og_timeouts[itask.identity] = itask.mode_settings.timeout - - # Mock wallclock < sim end timeout - monkeytime(itask.mode_settings.timeout - 1) - assert sim_time_check( - schd.task_events_mgr, [itask], schd.workflow_db_mgr - ) is False - - # Stop and restart the scheduler: - schd = scheduler(id_) - async with start(schd): - # Get our tasks and fix wallclock: - itasks = schd.pool.get_tasks() - for itask in itasks: - - # Check that we haven't got started time & mode settings back: - assert itask.summary['started_time'] is None - assert itask.mode_settings is None - - if itask.identity == '1066/two': - # Delete the database entry for `two`: Ensure that - # we don't break sim mode on upgrade to this version of Cylc. - schd.workflow_db_mgr.pri_dao.connect().execute( - 'UPDATE task_jobs' - '\n SET time_submit = NULL' - '\n WHERE (name == \'two\')' - ) - schd.workflow_db_mgr.process_queued_ops() - monkeytime(42) - expected_timeout = 102.0 - else: - monkeytime(og_timeouts[itask.identity] - 1) - expected_timeout = float(int(og_timeouts[itask.identity])) - - assert sim_time_check( - schd.task_events_mgr, [itask], schd.workflow_db_mgr - ) is False - - # Check that the itask.mode_settings is now re-created - assert itask.mode_settings.__dict__ == { - 'simulated_run_length': 60.0, - 'sim_task_fails': True, - 'timeout': expected_timeout - } - - -async def test_settings_reload( - flow, scheduler, start, run_simjob -): - """Check that simulation mode settings are changed for future - pseudo jobs on reload. - - """ - id_ = flow({ - 'scheduler': {'cycle point format': '%Y'}, - 'scheduling': { - 'initial cycle point': '1066', - 'graph': {'R1': 'one'} - }, - 'runtime': { - 'one': { - 'execution time limit': 'PT1M', - 'execution retry delays': 'P0Y', - 'simulation': { - 'speedup factor': 1, - 'fail cycle points': 'all', - 'fail try 1 only': False, - } - }, - } - }) - schd = scheduler(id_) - async with start(schd): - # Submit first psuedo-job and "run" to failure: - one_1066 = schd.pool.get_tasks()[0] - - itask = run_simjob(schd, one_1066.point, 'one') - assert itask.state.outputs.is_message_complete('failed') is False - - # Modify config as if reinstall had taken place: - conf_file = Path(schd.workflow_run_dir) / 'flow.cylc' - conf_file.write_text( - conf_file.read_text().replace('False', 'True')) - - # Reload Workflow: - await commands.run_cmd(commands.reload_workflow(schd)) - - # Submit second psuedo-job and "run" to success: - itask = run_simjob(schd, one_1066.point, 'one') - assert itask.state.outputs.is_message_complete('succeeded') is True - - -async def test_settings_broadcast( - flow, scheduler, start, monkeytime -): - """Assert that broadcasting a change in the settings for a task - affects subsequent psuedo-submissions. - """ - id_ = flow({ - 'scheduler': {'cycle point format': '%Y'}, - 'scheduling': { - 'initial cycle point': '1066', - 'graph': {'R1': 'one'} - }, - 'runtime': { - 'one': { - 'execution time limit': 'PT1S', - 'execution retry delays': '2*PT5S', - 'simulation': { - 'speedup factor': 1, - 'fail cycle points': '1066', - 'fail try 1 only': False - } - }, - } - }, defaults=False) - schd = scheduler(id_, paused_start=False, run_mode='simulation') - async with start(schd) as log: - itask = schd.pool.get_tasks()[0] - itask.state.is_queued = False - - # Submit the first - the sim task will fail: - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - assert itask.mode_settings.sim_task_fails is True - - # Let task finish. - monkeytime(itask.mode_settings.timeout + 1) - assert sim_time_check( - schd.task_events_mgr, [itask], - schd.workflow_db_mgr - ) is True - - # The mode_settings object has been cleared: - assert itask.mode_settings is None - # Change a setting using broadcast: - schd.broadcast_mgr.put_broadcast( - ['1066'], ['one'], [{ - 'simulation': {'fail cycle points': ''} - }]) - # Submit again - result is different: - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - assert itask.mode_settings.sim_task_fails is False - - # Assert Clearing the broadcast works - schd.broadcast_mgr.clear_broadcast() - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - assert itask.mode_settings.sim_task_fails is True - - # Assert that list of broadcasts doesn't change if we submit - # Invalid fail cycle points to broadcast. - itask.mode_settings = None - schd.broadcast_mgr.put_broadcast( - ['1066'], ['one'], [{ - 'simulation': {'fail cycle points': 'higadfuhasgiurguj'} - }]) - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - assert ( - 'Invalid ISO 8601 date representation: higadfuhasgiurguj' - in log.messages[-1]) - - schd.broadcast_mgr.put_broadcast( - ['1066'], ['one'], [{ - 'simulation': {'fail cycle points': '1'} - }]) - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - assert ( - 'Invalid ISO 8601 date representation: 1' - in log.messages[-1]) - - # Broadcast tasks will reparse correctly: - schd.broadcast_mgr.put_broadcast( - ['1066'], ['one'], [{ - 'simulation': {'fail cycle points': '1945, 1977, 1066'}, - 'execution retry delays': '3*PT2S' - }]) - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - assert itask.mode_settings.sim_task_fails is True - assert itask.try_timers['execution-retry'].delays == [2.0, 2.0, 2.0] - # n.b. rtconfig should remain unchanged, lest we cancel broadcasts: - assert itask.tdef.rtconfig['execution retry delays'] == [5.0, 5.0] - - -async def test_db_submit_num( - flow, one_conf, scheduler, run, complete, db_select -): - """Test simulation mode correctly increments the submit_num in the DB.""" - schd: Scheduler = scheduler(flow(one_conf), paused_start=False) - async with run(schd): - await complete(schd, '1/one') - assert db_select(schd, False, 'task_states', 'submit_num', 'status') == [ - (1, 'succeeded'), - ] diff --git a/tests/integration/test_task_events_mgr.py b/tests/integration/test_task_events_mgr.py index 7ac12274d7b..ac1fb2f9344 100644 --- a/tests/integration/test_task_events_mgr.py +++ b/tests/integration/test_task_events_mgr.py @@ -14,17 +14,15 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from itertools import product import logging from typing import Any as Fixture +from cylc.flow.run_modes import RunMode from cylc.flow.task_events_mgr import TaskJobLogsRetrieveContext from cylc.flow.scheduler import Scheduler from cylc.flow.data_store_mgr import ( JOBS, - TASK_STATUSES_ORDERED, TASK_STATUS_WAITING, - TASK_STATUS_SUBMIT_FAILED, ) @@ -80,17 +78,23 @@ async def test__insert_task_job(flow, one_conf, scheduler, start, validate): """ conf = { 'scheduling': {'graph': {'R1': 'rhenas'}}, - 'runtime': {'rhenas': {'simulation': { - 'fail cycle points': '1', - 'fail try 1 only': False, - }}}} + 'runtime': { + 'rhenas': { + 'simulation': { + 'fail cycle points': '1', + 'fail try 1 only': False, + } + } + }, + } id_ = flow(conf) schd = scheduler(id_) async with start(schd): # Set task to running: - itask = schd.pool.get_tasks()[0] + itask = schd.pool.get_tasks()[0] itask.state.status = 'running' itask.submit_num += 1 + itask.run_mode = RunMode.SIMULATION # Not run _insert_task_job yet: assert not schd.data_store_mgr.added['jobs'].keys() @@ -152,7 +156,7 @@ async def test__always_insert_task_job( schd.pool.get_tasks(), schd.server.curve_auth, schd.server.client_pub_key_dir, - is_simulation=False + run_mode=RunMode('live') ) # Both tasks are in a waiting state: diff --git a/tests/integration/test_task_pool.py b/tests/integration/test_task_pool.py index d347dd68141..404ec8da87f 100644 --- a/tests/integration/test_task_pool.py +++ b/tests/integration/test_task_pool.py @@ -1550,6 +1550,75 @@ async def test_set_outputs_future( assert log_filter(contains="completed output y") +async def test_set_outputs_from_skip_settings( + flow, + scheduler, + start, + log_filter, + validate +): + """Check working of ``cylc set --out=skip``: + + 1. --out=skip can be used to set all required outputs. + 2. --out=skip,other_output can be used to set other outputs. + + """ + id_ = flow( + { + 'scheduling': { + 'cycling mode': 'integer', + 'initial cycle point': 1, + 'final cycle point': 2, + 'graph': { + 'P1': """ + a => after_asucceeded + a:x => after_ax + a:y? => after_ay + """ + } + }, + 'runtime': { + 'a': { + 'outputs': { + 'x': 'xebec', + 'y': 'yacht' + }, + 'skip': {'outputs': 'x'} + } + } + } + ) + validate(id_) + schd = scheduler(id_) + + async with start(schd): + # it should start up with just tasks a: + assert pool_get_task_ids(schd.pool) == ['1/a', '2/a'] + + # setting 1/a output to skip should set output x, but not + # y (because y is optional). + schd.pool.set_prereqs_and_outputs( + ['1/a'], ['skip'], None, ['all']) + assert (pool_get_task_ids(schd.pool) == [ + '1/after_asucceeded', + '1/after_ax', + '2/a']) + + # Check that the presence of "skip" in outputs doesn't + # trigger a warning: + assert not log_filter(level=30) + + # You should be able to set skip as part of a list of outputs: + schd.pool.set_prereqs_and_outputs( + ['2/a'], ['skip', 'y'], None, ['all']) + assert (pool_get_task_ids(schd.pool) == [ + '1/after_asucceeded', + '1/after_ax', + '2/after_asucceeded', + '2/after_ax', + '2/after_ay']) + + async def test_prereq_satisfaction( flow, scheduler, diff --git a/tests/integration/utils/flow_tools.py b/tests/integration/utils/flow_tools.py index 7419fd4fe14..86377bfaf50 100644 --- a/tests/integration/utils/flow_tools.py +++ b/tests/integration/utils/flow_tools.py @@ -31,6 +31,7 @@ from secrets import token_hex from cylc.flow import CYLC_LOG +from cylc.flow.run_modes import RunMode from cylc.flow.workflow_files import WorkflowFiles from cylc.flow.scheduler import Scheduler, SchedulerStop from cylc.flow.scheduler_cli import RunOptions diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 924b1295998..f1f17101fd5 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -172,20 +172,26 @@ def set_cycling_type(monkeypatch: pytest.MonkeyPatch): custom time zone to use. dump_format: If using ISO8601, specify custom dump format. """ + def _set_cycling_type( ctype: str = INTEGER_CYCLING_TYPE, - time_zone: Optional[str] = None, + time_zone: Optional[str] = 'Z', dump_format: Optional[str] = None, ) -> None: class _DefaultCycler: TYPE = ctype + monkeypatch.setattr( - 'cylc.flow.cycling.loader.DefaultCycler', _DefaultCycler) + 'cylc.flow.cycling.loader.DefaultCycler', _DefaultCycler + ) if ctype == ISO8601_CYCLING_TYPE: monkeypatch.setattr( 'cylc.flow.cycling.iso8601.WorkflowSpecifics', - iso8601_init(time_zone=time_zone, custom_dump_format=dump_format) + iso8601_init( + time_zone=time_zone, custom_dump_format=dump_format + ), ) + return _set_cycling_type diff --git a/tests/unit/network/test_schema.py b/tests/unit/network/test_schema.py index 5f92cb4a35c..1604cadfb0a 100644 --- a/tests/unit/network/test_schema.py +++ b/tests/unit/network/test_schema.py @@ -26,6 +26,7 @@ RUNTIME_FIELD_TO_CFG_MAP, Mutations, Runtime, + runtime_schema_to_cfg, sort_elements, SortArgs, ) @@ -105,6 +106,20 @@ def test_runtime_field_to_cfg_map(field_name: str): assert WORKFLOW_SPEC.get('runtime', '__MANY__', cfg_name) +@pytest.mark.parametrize('runtime_dict,expected', [ + pytest.param( + {'run_mode': 'Skip'}, {'run mode': 'skip'}, id='edit-runtime' + ), + pytest.param( + {'run mode': 'skip'}, {'run mode': 'skip'}, id='broadcast' + ), +]) +def test_runtime_schema_to_cfg(runtime_dict, expected): + """Test this function can handle Edit Runtime submitted values as well + as normal broadcast values.""" + assert runtime_schema_to_cfg(runtime_dict) == expected + + @pytest.mark.parametrize('mutation', ( pytest.param(attr, id=name) for name, attr in Mutations.__dict__.items() diff --git a/tests/unit/run_modes/__init__.py b/tests/unit/run_modes/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/unit/run_modes/test_dummy.py b/tests/unit/run_modes/test_dummy.py new file mode 100644 index 00000000000..998c13767c9 --- /dev/null +++ b/tests/unit/run_modes/test_dummy.py @@ -0,0 +1,40 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +"""Tests for utilities supporting dummy mode. +""" +import pytest +from cylc.flow.run_modes.dummy import build_dummy_script + + +@pytest.mark.parametrize( + 'fail_one_time_only', (True, False) +) +def test_build_dummy_script(fail_one_time_only): + rtc = { + 'outputs': {'foo': '1', 'bar': '2'}, + 'simulation': { + 'fail try 1 only': fail_one_time_only, + 'fail cycle points': '1', + } + } + result = build_dummy_script(rtc, 60) + assert result.split('\n') == [ + 'sleep 60', + "cylc message '1'", + "cylc message '2'", + f"cylc__job__dummy_result {str(fail_one_time_only).lower()}" + " 1 || exit 1" + ] diff --git a/tests/unit/run_modes/test_run_modes.py b/tests/unit/run_modes/test_run_modes.py new file mode 100644 index 00000000000..57d245016d1 --- /dev/null +++ b/tests/unit/run_modes/test_run_modes.py @@ -0,0 +1,30 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +"""Tests for utilities supporting run modes. +""" + +from cylc.flow.run_modes import RunMode + + +def test_run_mode_desc(): + """All run mode labels have descriptions.""" + for mode in RunMode: + assert mode.describe() + + +def test_get_default_live(): + """RunMode.get() => live""" + assert RunMode.get({}) == RunMode.LIVE diff --git a/tests/unit/test_simulation.py b/tests/unit/run_modes/test_simulation_units.py similarity index 80% rename from tests/unit/test_simulation.py rename to tests/unit/run_modes/test_simulation_units.py index 920a872503a..78ef2bd2f05 100644 --- a/tests/unit/test_simulation.py +++ b/tests/unit/run_modes/test_simulation_units.py @@ -20,11 +20,10 @@ from cylc.flow.cycling.integer import IntegerPoint from cylc.flow.cycling.iso8601 import ISO8601Point -from cylc.flow.simulation import ( - parse_fail_cycle_points, - build_dummy_script, +from cylc.flow.run_modes.simulation import ( disable_platforms, get_simulated_run_len, + parse_fail_cycle_points, sim_task_failed, ) @@ -56,27 +55,6 @@ def test_get_simulated_run_len( assert get_simulated_run_len(rtc) == 3600 -@pytest.mark.parametrize( - 'fail_one_time_only', (True, False) -) -def test_set_simulation_script(fail_one_time_only): - rtc = { - 'outputs': {'foo': '1', 'bar': '2'}, - 'simulation': { - 'fail try 1 only': fail_one_time_only, - 'fail cycle points': '1', - } - } - result = build_dummy_script(rtc, 60) - assert result.split('\n') == [ - 'sleep 60', - "cylc message '1'", - "cylc message '2'", - f"cylc__job__dummy_result {str(fail_one_time_only).lower()}" - " 1 || exit 1" - ] - - @pytest.mark.parametrize( 'rtc, expect', ( ({'platform': 'skarloey'}, 'localhost'), @@ -97,12 +75,39 @@ def test_disable_platforms(rtc, expect): assert val is None -def test_parse_fail_cycle_points(set_cycling_type): - before = ['2', '4'] - set_cycling_type() - assert parse_fail_cycle_points(before) == [ - IntegerPoint(i) for i in before - ] +@pytest.mark.parametrize( + 'args, cycling, fallback', + ( + param((['2', '4'], ['']), 'integer', False, id='int.valid'), + param((['garbage'], []), 'integer', True, id='int.invalid'), + param((['20200101T0000Z'], []), 'iso8601', False, id='iso.valid'), + param((['garbage'], []), 'iso8601', True, id='iso.invalid'), + ), +) +def test_parse_fail_cycle_points( + caplog, set_cycling_type, args, cycling, fallback +): + """Tests for parse_fail_cycle points. + """ + set_cycling_type(cycling) + if fallback: + expect = args[1] + check_log = True + else: + expect = args[0] + check_log = False + + if cycling == 'integer': + assert parse_fail_cycle_points(*args) == [ + IntegerPoint(i) for i in expect + ] + else: + assert parse_fail_cycle_points(*args) == [ + ISO8601Point(i) for i in expect + ] + if check_log: + assert "Incompatible" in caplog.messages[0] + assert cycling in caplog.messages[0].lower() @pytest.mark.parametrize( diff --git a/tests/unit/run_modes/test_skip_units.py b/tests/unit/run_modes/test_skip_units.py new file mode 100644 index 00000000000..bf5ffa7be60 --- /dev/null +++ b/tests/unit/run_modes/test_skip_units.py @@ -0,0 +1,140 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +"""Unit tests for utilities supporting skip modes +""" +import logging +import pytest +from pytest import param, raises +from types import SimpleNamespace + +from cylc.flow.exceptions import WorkflowConfigError +from cylc.flow.run_modes.skip import ( + check_task_skip_config, + process_outputs, + skip_mode_validate, +) + + +@pytest.mark.parametrize( + 'conf', + ( + param({}, id='no-skip-config'), + param({'skip': {'outputs': []}}, id='no-skip-outputs'), + param({'skip': {'outputs': ['foo1', 'failed']}}, id='ok-skip-outputs'), + ) +) +def test_good_check_task_skip_config(conf): + """It returns none if the problems this function checks are not present. + """ + tdef = SimpleNamespace(rtconfig=conf) + tdef.name = 'foo' + assert check_task_skip_config(tdef) is None + + +def test_raises_check_task_skip_config(): + """It raises an error if succeeded and failed are set. + """ + tdef = SimpleNamespace( + rtconfig={'skip': {'outputs': ['foo1', 'failed', 'succeeded']}} + ) + tdef.name = 'foo' + with raises(WorkflowConfigError, match='succeeded AND failed'): + check_task_skip_config(tdef) + + +@pytest.mark.parametrize( + 'outputs, required, expect', + ( + param([], [], ['succeeded'], id='implicit-succeded'), + param( + ['succeeded'], ['succeeded'], ['succeeded'], + id='explicit-succeded' + ), + param(['submitted'], [], ['succeeded'], id='only-1-submit'), + param( + ['foo', 'bar', 'baz', 'qux'], + ['bar', 'qux'], + ['bar', 'qux', 'succeeded'], + id='required-only' + ), + param( + ['foo', 'baz'], + ['bar', 'qux'], + ['succeeded'], + id='no-required' + ), + param( + ['failed'], + [], + ['failed'], + id='explicit-failed' + ), + ) +) +def test_process_outputs(outputs, required, expect): + """Check that skip outputs: + + 1. Doesn't send submitted twice. + 2. Sends every required output. + 3. If failed is set send failed + 4. If failed in not set send succeeded. + """ + # Create a mocked up task-proxy: + rtconf = {'skip': {'outputs': outputs}} + itask = SimpleNamespace( + tdef=SimpleNamespace( + rtconfig=rtconf), + state=SimpleNamespace( + outputs=SimpleNamespace( + iter_required_messages=lambda *a, **k: iter(required), + _message_to_trigger={v: v for v in required} + ))) + + assert process_outputs(itask, rtconf) == ['submitted', 'started'] + expect + + +def test_skip_mode_validate(caplog, log_filter): + """It warns us if we've set a task config to nonlive mode. + + (And not otherwise) + + Point 3 from the skip mode proposal + https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md + + | If the run mode is set to simulation or skip in the workflow + | configuration, then cylc validate and cylc lint should produce + | warning (similar to development features in other languages / systems). + """ + taskdefs = { + f'{run_mode}_task': SimpleNamespace( + rtconfig={'run mode': run_mode}, + name=f'{run_mode}_task' + ) + for run_mode + in ['live', 'skip'] + } + + skip_mode_validate(taskdefs) + + assert len(caplog.records) == 1 + assert log_filter( + level=logging.WARNING, + exact_match=( + "The following tasks are set to run in skip mode:\n" + " * skip_task" + ), + log=caplog + ) diff --git a/tests/unit/scripts/test_lint.py b/tests/unit/scripts/test_lint.py index 4029bc1c66c..42623bf6f3a 100644 --- a/tests/unit/scripts/test_lint.py +++ b/tests/unit/scripts/test_lint.py @@ -193,7 +193,10 @@ [[[directives]]] -l walltime = 666 [[baz]] + run mode = skip platform = `no backticks` + [[[skip]]] + outputs = succeeded, failed ''' + ( '\nscript = the quick brown fox jumps over the lazy dog until it becomes ' 'clear that this line is longer than the default 130 character limit.' diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 74c54e44583..1b98d42f729 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -17,8 +17,8 @@ import os import sys from optparse import Values -from typing import Any, Callable, Dict, List, Optional, Tuple, Type -from pathlib import Path +from typing import ( + TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type) import pytest import logging from types import SimpleNamespace @@ -47,8 +47,9 @@ from cylc.flow.cycling.iso8601 import ISO8601Point - -Fixture = Any +if TYPE_CHECKING: + from pathlib import Path + Fixture = Any def _tmp_flow_config(tmp_run_dir: Callable): @@ -60,8 +61,8 @@ def _tmp_flow_config(tmp_run_dir: Callable): Returns the path to the flow file. """ - def __tmp_flow_config(id_: str, config: str) -> Path: - run_dir: Path = tmp_run_dir(id_) + def __tmp_flow_config(id_: str, config: str) -> 'Path': + run_dir: 'Path' = tmp_run_dir(id_) flow_file = run_dir / WorkflowFiles.FLOW_FILE flow_file.write_text(config) return flow_file @@ -82,7 +83,7 @@ class TestWorkflowConfig: """Test class for the Cylc WorkflowConfig object.""" def test_xfunction_imports( - self, mock_glbl_cfg: Fixture, tmp_path: Path): + self, mock_glbl_cfg: 'Fixture', tmp_path: 'Path'): """Test for a workflow configuration with valid xtriggers""" mock_glbl_cfg( 'cylc.flow.platforms.glbl_cfg', @@ -175,7 +176,8 @@ def test_xfunction_attribute_error(self, mock_glbl_cfg, tmp_path): with pytest.raises(XtriggerConfigError) as excinfo: WorkflowConfig(workflow="capybara_workflow", fpath=flow_file, options=SimpleNamespace()) - assert "module 'capybara' has no attribute 'capybara'" in str(excinfo.value) + assert "module 'capybara' has no attribute 'capybara'" in str( + excinfo.value) def test_xfunction_not_callable(self, mock_glbl_cfg, tmp_path): """Test for error when a xtrigger function is not callable.""" @@ -358,7 +360,7 @@ def test_process_icp( expected_icp: Optional[str], expected_eval_icp: Optional[str], expected_err: Optional[Tuple[Type[Exception], str]], - monkeypatch: pytest.MonkeyPatch, set_cycling_type: Fixture + monkeypatch: pytest.MonkeyPatch, set_cycling_type: 'Fixture' ) -> None: """Test WorkflowConfig.process_initial_cycle_point(). @@ -445,7 +447,7 @@ def test_process_startcp( starttask: Optional[str], expected: str, expected_err: Optional[Tuple[Type[Exception], str]], - monkeypatch: pytest.MonkeyPatch, set_cycling_type: Fixture + monkeypatch: pytest.MonkeyPatch, set_cycling_type: 'Fixture' ) -> None: """Test WorkflowConfig.process_start_cycle_point(). @@ -648,7 +650,7 @@ def test_process_fcp( options_fcp: Optional[str], expected_fcp: Optional[str], expected_err: Optional[Tuple[Type[Exception], str]], - set_cycling_type: Fixture + set_cycling_type: 'Fixture' ) -> None: """Test WorkflowConfig.process_final_cycle_point(). @@ -671,7 +673,7 @@ def test_process_fcp( initial_point=loader.get_point( scheduling_cfg['initial cycle point'] ).standardise(), - final_point = None, + final_point=None, options=SimpleNamespace(fcp=options_fcp), ) @@ -812,7 +814,7 @@ def test_stopcp_after_fcp( cycle point is handled correctly.""" caplog.set_level(logging.WARNING, CYLC_LOG) id_ = 'cassini' - flow_file: Path = tmp_flow_config(id_, f""" + flow_file: 'Path' = tmp_flow_config(id_, f""" [scheduler] allow implicit tasks = True [scheduling] @@ -1325,7 +1327,7 @@ def test_implicit_tasks( """ # Setup id_ = 'rincewind' - flow_file: Path = tmp_flow_config(id_, f""" + flow_file: 'Path' = tmp_flow_config(id_, f""" [scheduler] { f'allow implicit tasks = {allow_implicit_tasks}' @@ -1427,7 +1429,7 @@ def test_zero_interval( """Test that a zero-duration recurrence with >1 repetition gets an appropriate warning.""" id_ = 'ordinary' - flow_file: Path = tmp_flow_config(id_, f""" + flow_file: 'Path' = tmp_flow_config(id_, f""" [scheduler] UTC mode = True allow implicit tasks = True @@ -1470,7 +1472,7 @@ def test_chain_expr( Note the order matters when "nominal" units (years, months) are used. """ id_ = 'osgiliath' - flow_file: Path = tmp_flow_config(id_, f""" + flow_file: 'Path' = tmp_flow_config(id_, f""" [scheduler] UTC mode = True allow implicit tasks = True @@ -1649,7 +1651,7 @@ def test__warn_if_queues_have_implicit_tasks(caplog): ] ) def test_cylc_env_at_parsing( - tmp_path: Path, + tmp_path: 'Path', monkeypatch: pytest.MonkeyPatch, installed, run_dir, diff --git a/tests/unit/test_platforms.py b/tests/unit/test_platforms.py index bb785b56266..106aceea068 100644 --- a/tests/unit/test_platforms.py +++ b/tests/unit/test_platforms.py @@ -34,6 +34,7 @@ PlatformLookupError, GlobalConfigError ) +from cylc.flow.run_modes import JOBLESS_MODES PLATFORMS = { @@ -473,13 +474,15 @@ def test_get_install_target_to_platforms_map( assert result == expected_map -def test_platform_from_name__sim_mode(): - result = platform_from_name('SIMULATION') +@pytest.mark.parametrize('mode', sorted(JOBLESS_MODES)) +def test_platform_from_name__jobless_modes(mode): + result = platform_from_name(mode) assert result['name'] == 'localhost' -def test_get_install_target_to_platforms_map__sim_mode(): - result = get_install_target_to_platforms_map(['SIMULATION']) +@pytest.mark.parametrize('mode', sorted(JOBLESS_MODES)) +def test_get_install_target_to_platforms_map__jobless_modes(mode): + result = get_install_target_to_platforms_map([mode]) assert list(result) == ['localhost'] assert len(result['localhost']) == 1 assert result['localhost'][0]['hosts'] == ['localhost'] diff --git a/tests/unit/test_prerequisite.py b/tests/unit/test_prerequisite.py index d3d7226bc78..04cd570afe7 100644 --- a/tests/unit/test_prerequisite.py +++ b/tests/unit/test_prerequisite.py @@ -15,6 +15,7 @@ # along with this program. If not, see . from functools import partial +from typing import Optional import pytest @@ -22,6 +23,7 @@ from cylc.flow.cycling.loader import ISO8601_CYCLING_TYPE, get_point from cylc.flow.id import Tokens, detokenise from cylc.flow.prerequisite import Prerequisite, SatisfiedState +from cylc.flow.run_modes import RunMode detok = partial(detokenise, selectors=True, relative=True) @@ -151,6 +153,7 @@ def satisfied_states_prereq(): prereq[('1', 'b', 'x')] = False prereq[('1', 'c', 'x')] = 'satisfied from database' prereq[('1', 'd', 'x')] = 'force satisfied' + prereq[('1', 'e', 'x')] = 'satisfied by skip mode' return prereq @@ -162,6 +165,7 @@ def test_unset_naturally_satisfied(satisfied_states_prereq: Prerequisite): ('1/b', False), ('1/c', True), ('1/d', False), + ('1/e', True), ]: assert ( satisfied_states_prereq.unset_naturally_satisfied(id_) == expected @@ -173,6 +177,18 @@ def test_unset_naturally_satisfied(satisfied_states_prereq: Prerequisite): ('1', 'b', 'x'): False, ('1', 'c', 'x'): False, ('1', 'd', 'x'): 'force satisfied', + ('1', 'e', 'x'): False, + } + + +def test_set_satisfied(satisfied_states_prereq: Prerequisite): + satisfied_states_prereq.set_satisfied() + assert satisfied_states_prereq._satisfied == { + ('1', 'a', 'x'): 'satisfied naturally', + ('1', 'b', 'x'): 'force satisfied', + ('1', 'c', 'x'): 'satisfied from database', + ('1', 'd', 'x'): 'force satisfied', + ('1', 'e', 'x'): 'satisfied by skip mode', } @@ -208,24 +224,40 @@ def test_satisfy_me(): } -@pytest.mark.parametrize('forced', [False, True]) -@pytest.mark.parametrize('existing, expected_when_forced', [ - (False, 'force satisfied'), - ('satisfied from database', 'force satisfied'), - ('force satisfied', 'force satisfied'), - ('satisfied naturally', 'satisfied naturally'), +@pytest.mark.parametrize('forced, mode, expected', [ + (False, None, 'satisfied naturally'), + (True, None, 'force satisfied'), + (True, RunMode.SKIP, 'force satisfied'), + (False, RunMode.SKIP, 'satisfied by skip mode'), ]) -def test_satisfy_me__override( +def test_satisfy_me__override_false( forced: bool, + mode: Optional[RunMode], + expected: SatisfiedState, +): + """Test satisfying an unsatisfied prereq with different states.""" + prereq = Prerequisite(IntegerPoint('2')) + prereq[('1', 'a', 'x')] = False + + prereq.satisfy_me([Tokens('//1/a:x')], forced=forced, mode=mode) + assert prereq[('1', 'a', 'x')] == expected + + +@pytest.mark.parametrize('mode', [None, RunMode.SKIP]) +@pytest.mark.parametrize('forced', [True, False]) +@pytest.mark.parametrize('existing', [ + 'satisfied from database', + 'force satisfied', + 'satisfied naturally', +]) +def test_satisfy_me__override_truthy( existing: SatisfiedState, - expected_when_forced: SatisfiedState, + forced: bool, + mode: Optional[RunMode], ): - """Test that satisfying a prereq with a different state works as expected - with and without the `forced` arg.""" + """Test that satisfying an already-satisfied prereq doesn't change it.""" prereq = Prerequisite(IntegerPoint('2')) prereq[('1', 'a', 'x')] = existing - prereq.satisfy_me([Tokens('//1/a:x')], forced) - assert prereq[('1', 'a', 'x')] == ( - expected_when_forced if forced else 'satisfied naturally' - ) + prereq.satisfy_me([Tokens('//1/a:x')], forced=forced, mode=mode) + assert prereq[('1', 'a', 'x')] == existing diff --git a/tests/unit/test_task_outputs.py b/tests/unit/test_task_outputs.py index 2dbe684f04e..2306e412342 100644 --- a/tests/unit/test_task_outputs.py +++ b/tests/unit/test_task_outputs.py @@ -274,7 +274,7 @@ def test_iter_required_outputs(): assert set(outputs.iter_required_messages()) == set() # the preconditions expiry/submitted are excluded from this logic when - # defined as optional + # defined as optional: outputs = TaskOutputs( tdef( {TASK_OUTPUT_SUCCEEDED, 'x', 'y'}, @@ -289,6 +289,44 @@ def test_iter_required_outputs(): } +def test_iter_required_outputs__disable(): + # Get all outputs required for success path (excluding failure, what + # is still required): + outputs = TaskOutputs( + tdef( + {}, + {'a', 'succeeded', 'b', 'y', 'failed', 'x'}, + '(x and y and failed) or (a and b and succeeded)' + ) + ) + + assert set(outputs.iter_required_messages()) == set() + + # Disabling succeeded leaves us with failure required outputs: + assert set( + outputs.iter_required_messages(disable=TASK_OUTPUT_SUCCEEDED) + ) == { + TASK_OUTPUT_FAILED, + 'x', + 'y', + } + + # Disabling failed leaves us with succeeded required outputs: + assert set(outputs.iter_required_messages(disable=TASK_OUTPUT_FAILED)) == { + TASK_OUTPUT_SUCCEEDED, + 'a', + 'b', + } + + # Disabling an abitrary output leaves us with required outputs + # from another branch: + assert set(outputs.iter_required_messages(disable='a')) == { + TASK_OUTPUT_FAILED, + 'x', + 'y', + } + + def test_get_trigger_completion_variable_maps(): """It should return a bi-map of triggers to compvars.""" t2c, c2t = get_trigger_completion_variable_maps(('a', 'b-b', 'c-c-c')) diff --git a/tests/unit/test_task_remote_mgr.py b/tests/unit/test_task_remote_mgr.py index c41e415eba3..61cdcce2bc5 100644 --- a/tests/unit/test_task_remote_mgr.py +++ b/tests/unit/test_task_remote_mgr.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from contextlib import suppress from pathlib import Path from time import sleep import pytest @@ -229,6 +230,9 @@ def flatten_install_targets_map(itm): install_targets_map = TaskRemoteMgr._get_remote_tidy_targets( set(platform_names), set(install_targets)) + with suppress(KeyError): + install_targets_map.pop('localhost') + assert ( expect['targets'] == flatten_install_targets_map(install_targets_map)) diff --git a/tests/unit/test_task_state.py b/tests/unit/test_task_state.py index 9743fbd3332..7b33e5f5b8b 100644 --- a/tests/unit/test_task_state.py +++ b/tests/unit/test_task_state.py @@ -16,10 +16,12 @@ from unittest.mock import MagicMock import pytest +from types import SimpleNamespace from cylc.flow.prerequisite import Prerequisite from cylc.flow.taskdef import TaskDef from cylc.flow.cycling.integer import IntegerSequence, IntegerPoint +from cylc.flow.run_modes import RunMode, disable_task_event_handlers from cylc.flow.task_trigger import Dependency, TaskTrigger from cylc.flow.task_state import ( TaskState, @@ -41,7 +43,7 @@ ) def test_state_comparison(state, is_held): """Test the __call__ method.""" - tdef = TaskDef('foo', {}, 'live', '123', '123') + tdef = TaskDef('foo', {}, '123', '123') tstate = TaskState(tdef, '123', state, is_held) assert tstate(state, is_held=is_held) @@ -72,7 +74,7 @@ def test_state_comparison(state, is_held): ) def test_reset(state, is_held, should_reset): """Test that tasks do or don't have their state changed.""" - tdef = TaskDef('foo', {}, 'live', '123', '123') + tdef = TaskDef('foo', {}, '123', '123') # create task state: # * status: waiting # * is_held: true @@ -96,7 +98,7 @@ def test_task_prereq_duplicates(set_cycling_type): dep = Dependency([trig], [trig], False) - tdef = TaskDef('foo', {}, 'live', IntegerPoint("1"), IntegerPoint("1")) + tdef = TaskDef('foo', {}, IntegerPoint("1"), IntegerPoint("1")) tdef.add_dependency(dep, seq1) tdef.add_dependency(dep, seq2) # duplicate! @@ -110,7 +112,7 @@ def test_task_prereq_duplicates(set_cycling_type): def test_task_state_order(): """Test is_gt and is_gte methods.""" - tdef = TaskDef('foo', {}, 'live', IntegerPoint("1"), IntegerPoint("1")) + tdef = TaskDef('foo', {}, IntegerPoint("1"), IntegerPoint("1")) tstate = TaskState(tdef, IntegerPoint("1"), TASK_STATUS_SUBMITTED, False) assert tstate.is_gt(TASK_STATUS_WAITING) @@ -141,3 +143,31 @@ def test_get_resolved_dependencies(): '1/d', '1/e', ] + + +@pytest.mark.parametrize( + 'itask_run_mode, disable_handlers, expect', + ( + ('live', True, False), + ('live', False, False), + ('dummy', True, False), + ('dummy', False, False), + ('simulation', True, True), + ('simulation', False, True), + ('skip', True, True), + ('skip', False, False), + ) +) +def test_disable_task_event_handlers(itask_run_mode, disable_handlers, expect): + """Conditions under which task event handlers should not be used. + """ + # Construct a fake itask object: + itask = SimpleNamespace( + run_mode=RunMode(itask_run_mode), + platform={'disable task event handlers': disable_handlers}, + tdef=SimpleNamespace( + rtconfig={ + 'skip': {'disable task event handlers': disable_handlers}}) + ) + # Check method: + assert disable_task_event_handlers(itask) is expect diff --git a/tests/unit/test_workflow_db_mgr.py b/tests/unit/test_workflow_db_mgr.py index 630e18dd038..c749dc9eeef 100644 --- a/tests/unit/test_workflow_db_mgr.py +++ b/tests/unit/test_workflow_db_mgr.py @@ -53,7 +53,7 @@ def test_remove_task_from_flows( } db_mgr = WorkflowDatabaseManager(tmp_path) schd_tokens = Tokens('~asterix/gaul') - tdef = TaskDef('a', {}, None, None, None) + tdef = TaskDef('a', rtcfg={}, start_point=None, initial_point=None) with db_mgr.get_pri_dao() as dao: db_mgr.pri_dao = dao db_mgr.pub_dao = Mock() diff --git a/tests/unit/test_xtrigger_mgr.py b/tests/unit/test_xtrigger_mgr.py index 276fd354a95..2ec207cf25d 100644 --- a/tests/unit/test_xtrigger_mgr.py +++ b/tests/unit/test_xtrigger_mgr.py @@ -178,7 +178,6 @@ def test_housekeeping_with_xtrigger_satisfied(xtrigger_mgr): tdef = TaskDef( name="foo", rtcfg={'completion': None}, - run_mode="live", start_point=1, initial_point=1, ) @@ -232,7 +231,6 @@ def test__call_xtriggers_async(xtrigger_mgr): tdef = TaskDef( name="foo", rtcfg={'completion': None}, - run_mode="live", start_point=1, initial_point=1 )