diff --git a/README.md b/README.md index 74d6238..fa8f877 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ ![os](https://img.shields.io/badge/OS-win%7Cmac%7Clinux-9cf) ![python](https://img.shields.io/badge/Python-3.10+-darkgreen) -> **Note:** EOS is currently in the alpha development stage. Much additional functionality and enhancements are planned. +> **Note:** EOS is actively being developed. Much additional functionality and enhancements are planned. > It currently has a core feature set to use for research. Please report any issues, make feature requests, or contribute to development! The Experiment Orchestration System (EOS) is a comprehensive software framework and runtime for laboratory automation, designed diff --git a/docker/.env.example b/docker/.env.example index e04a4de..d9de245 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -1,4 +1,3 @@ -# EOS ##################################### COMPOSE_PROJECT_NAME=eos # MongoDB admin credentials @@ -8,17 +7,3 @@ EOS_MONGODB_ROOT_PASSWORD= # MinIO admin credentials EOS_MINIO_ROOT_USER= EOS_MINIO_ROOT_PASSWORD= - -# Budibase ################################ -# You can set the below to random values -BB_JWT_SECRET= -BB_MINIO_ACCESS_KEY= -BB_MINIO_SECRET_KEY= -BB_REDIS_PASSWORD= -BB_COUCHDB_USER= -BB_COUCHDB_PASSWORD= -BB_INTERNAL_API_KEY= - -# Admin user credentials to login to Budibase -BB_ADMIN_USER_EMAIL= -BB_ADMIN_USER_PASSWORD= diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 5ca6cdc..7728999 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -38,30 +38,6 @@ services: - minio_data:/data command: server --console-address ":9001" /data - eos-budibase: - image: budibase/budibase:2.32.12-sqs - container_name: eos-budibase - hostname: eos-budibase - restart: unless-stopped - environment: - JWT_SECRET: ${BB_JWT_SECRET} - MINIO_ACCESS_KEY: ${BB_MINIO_ACCESS_KEY} - MINIO_SECRET_KEY: ${BB_MINIO_SECRET_KEY} - REDIS_PASSWORD: ${BB_REDIS_PASSWORD} - COUCHDB_USER: ${BB_COUCHDB_USER} - COUCHDB_PASSWORD: ${BB_COUCHDB_PASSWORD} - INTERNAL_API_KEY: ${BB_INTERNAL_API_KEY} - BB_ADMIN_USER_EMAIL: ${BB_ADMIN_USER_EMAIL} - BB_ADMIN_USER_PASSWORD: ${BB_ADMIN_USER_PASSWORD} - ports: - - "8080:80" - networks: - - eos_network - extra_hosts: - - "host.docker.internal:host-gateway" - volumes: - - budibase_data:/data - networks: eos_network: name: eos_network @@ -72,5 +48,3 @@ volumes: driver: local minio_data: driver: local - budibase_data: - driver: local diff --git a/docs/conf.py b/docs/conf.py index b263fb2..1407ad4 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -6,10 +6,11 @@ # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + project = "eos" copyright = "2024, UNC Robotics" author = "Angelos Angelopoulos" -release = "0.4.0" +release = "0.6.0" extensions = [ "sphinx.ext.autodoc", diff --git a/docs/index.rst b/docs/index.rst index 976e1ee..32caab0 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -3,7 +3,7 @@ The Experiment Orchestration System (EOS) .. note:: - EOS is currently in the alpha development stage. + EOS is actively being developed. Much additional functionality and enhancements are planned. It currently has a core feature set to use for research. Please report any issues, make feature requests, or contribute to development! diff --git a/docs/user-guide/campaigns.rst b/docs/user-guide/campaigns.rst index 4a0f53d..5523b7c 100644 --- a/docs/user-guide/campaigns.rst +++ b/docs/user-guide/campaigns.rst @@ -41,27 +41,27 @@ Looking at the task specification of the ``score_color`` task, we also see that .. code-block:: yaml type: Score Color - description: Score a color based on how close it is to an expected color + desc: Score a color based on how close it is to an expected color input_parameters: red: - type: integer + type: int unit: n/a - description: The red component of the color + desc: The red component of the color green: - type: integer + type: int unit: n/a - description: The green component of the color + desc: The green component of the color blue: - type: integer + type: int unit: n/a - description: The blue component of the color + desc: The blue component of the color output_parameters: loss: - type: decimal + type: float unit: n/a - description: Total loss of the color compared to the expected color + desc: Total loss of the color compared to the expected color Taking all these together, we see that this experiment involves selecting CMYK color component volumes, as well as a mixing time and mixing speed and trying to minimize the loss of a synthesized color compared to an expected color. @@ -72,8 +72,6 @@ This setup is also summarized in the ``optimizer.py`` file adjacent to ``experim .. code-block:: python - from typing import Type, Tuple, Dict - from bofire.data_models.acquisition_functions.acquisition_function import qNEI from bofire.data_models.enum import SamplingMethodEnum from bofire.data_models.features.continuous import ContinuousOutput, ContinuousInput @@ -83,7 +81,7 @@ This setup is also summarized in the ``optimizer.py`` file adjacent to ``experim from eos.optimization.abstract_sequential_optimizer import AbstractSequentialOptimizer - def eos_create_campaign_optimizer() -> Tuple[Dict, Type[AbstractSequentialOptimizer]]: + def eos_create_campaign_optimizer() -> tuple[dict, type[AbstractSequentialOptimizer]]: constructor_args = { "inputs": [ ContinuousInput(key="dispense_colors.cyan_volume", bounds=(0, 5)), diff --git a/docs/user-guide/color_mixing.rst b/docs/user-guide/color_mixing.rst index 5cc6b22..df25ce5 100644 --- a/docs/user-guide/color_mixing.rst +++ b/docs/user-guide/color_mixing.rst @@ -44,19 +44,17 @@ For example, you can submit a request to run a campaign through the REST API wit .. code-block:: bash - curl -X POST http://localhost:8000 \ + curl -X POST http://localhost:8000/api/campaigns/submit \ -H "Content-Type: application/json" \ -d '{ - "campaign_id": "mix_colors", + "id": "mix_colors", "experiment_type": "color_mixing_1", - "campaign_execution_parameters": { - "max_experiments": 150, - "max_concurrent_experiments": 1, - "do_optimization": true, - "optimizer_computer_ip": "127.0.0.1", - "dynamic_parameters": {}, - "resume": false - } + "max_experiments": 150, + "max_concurrent_experiments": 1, + "optimize": true, + "optimizer_computer_ip": "127.0.0.1", + "dynamic_parameters": {}, + "resume": false }' .. note:: @@ -101,8 +99,8 @@ This is the Python code for the color analyzer device: class ColorAnalyzerDevice(BaseDevice): - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: - port = int(initialization_parameters["port"]) + async def _initialize(self, init_parameters: dict[str, Any]) -> None: + port = int(init_parameters["port"]) self.client = DeviceClient(port) self.client.open_connection() @@ -136,9 +134,9 @@ The device YAML file for the color analyzer device is: .. code-block:: yaml type: color_analyzer - description: Analyzes the RGB value of a color mixture + desc: Analyzes the RGB value of a color mixture - initialization_parameters: + init_parameters: port: 5002 The main thing to notice is that it accepts an initialization parameter called ``port``, which is used to connect to the @@ -167,7 +165,7 @@ This is the Python code the "Analyze color" task: from eos.tasks.base_task import BaseTask - class AnalyzeColorTask(BaseTask): + class AnalyzeColor(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, @@ -188,7 +186,7 @@ This is the Python code the "Analyze color" task: The task implementation is straightforward. We first get a reference to the color analyzer device (there is only one allocated to the the task). Then, we call the `analyze` function from the color analyzer device we saw earlier. Finally, we construct -and return the dictionary of output parameters and the containers. +and return the dict of output parameters and the containers. The task YAML file is the following: @@ -197,7 +195,7 @@ The task YAML file is the following: .. code-block:: yaml type: Analyze Color - description: Analyze the color of a solution + desc: Analyze the color of a solution device_types: - color_analyzer @@ -208,17 +206,17 @@ The task YAML file is the following: output_parameters: red: - type: integer + type: int unit: n/a - description: The red component of the color + desc: The red component of the color green: - type: integer + type: int unit: n/a - description: The green component of the color + desc: The green component of the color blue: - type: integer + type: int unit: n/a - description: The blue component of the color + desc: The blue component of the color Laboratory ---------- @@ -244,42 +242,42 @@ We define five beakers with a capacity of 300 mL. .. code-block:: yaml type: color_lab - description: A laboratory for color analysis and mixing + desc: A laboratory for color analysis and mixing locations: color_experiment_benchtop: - description: Benchtop for color experiments + desc: Benchtop for color experiments container_storage: - description: Storage unit for containers + desc: Storage unit for containers color_mixer_1: - description: Color mixing apparatus for incrementally dispensing and mixing color solutions + desc: Color mixing apparatus for incrementally dispensing and mixing color solutions color_mixer_2: - description: Color mixing apparatus for incrementally dispensing and mixing color solutions + desc: Color mixing apparatus for incrementally dispensing and mixing color solutions color_mixer_3: - description: Color mixing apparatus for incrementally dispensing and mixing color solutions + desc: Color mixing apparatus for incrementally dispensing and mixing color solutions color_analyzer_1: - description: Analyzer for color solutions + desc: Analyzer for color solutions color_analyzer_2: - description: Analyzer for color solutions + desc: Analyzer for color solutions color_analyzer_3: - description: Analyzer for color solutions + desc: Analyzer for color solutions cleaning_station: - description: Station for cleaning containers + desc: Station for cleaning containers devices: cleaning_station: - description: Station for cleaning containers + desc: Station for cleaning containers type: cleaning_station location: cleaning_station computer: eos_computer robot_arm: - description: Robotic arm for moving containers + desc: Robotic arm for moving containers type: robot_arm location: color_experiment_benchtop computer: eos_computer - initialization_parameters: + init_parameters: locations: - container_storage - color_mixer_1 @@ -292,57 +290,57 @@ We define five beakers with a capacity of 300 mL. - emptying_location color_mixer_1: - description: Color mixing apparatus for incrementally dispensing and mixing color solutions + desc: Color mixing apparatus for incrementally dispensing and mixing color solutions type: color_mixer location: color_mixer_1 computer: eos_computer - initialization_parameters: + init_parameters: port: 5004 color_mixer_2: - description: Color mixing apparatus for incrementally dispensing and mixing color solutions + desc: Color mixing apparatus for incrementally dispensing and mixing color solutions type: color_mixer location: color_mixer_2 computer: eos_computer - initialization_parameters: + init_parameters: port: 5006 color_mixer_3: - description: Color mixing apparatus for incrementally dispensing and mixing color solutions + desc: Color mixing apparatus for incrementally dispensing and mixing color solutions type: color_mixer location: color_mixer_3 computer: eos_computer - initialization_parameters: + init_parameters: port: 5008 color_analyzer_1: - description: Analyzer for color solutions + desc: Analyzer for color solutions type: color_analyzer location: color_analyzer_1 computer: eos_computer - initialization_parameters: + init_parameters: port: 5003 color_analyzer_2: - description: Analyzer for color solutions + desc: Analyzer for color solutions type: color_analyzer location: color_analyzer_2 computer: eos_computer - initialization_parameters: + init_parameters: port: 5005 color_analyzer_3: - description: Analyzer for color solutions + desc: Analyzer for color solutions type: color_analyzer location: color_analyzer_3 computer: eos_computer - initialization_parameters: + init_parameters: port: 5007 containers: @@ -385,7 +383,7 @@ The YAML definition for the template experiment is shown below: .. code-block:: yaml type: {{ experiment_type }} - description: Experiment to find optimal parameters to synthesize a desired color + desc: Experiment to find optimal parameters to synthesize a desired color labs: - color_lab @@ -393,7 +391,7 @@ The YAML definition for the template experiment is shown below: tasks: - id: retrieve_container type: Retrieve Container - description: Get a container from storage and move it to the color dispenser + desc: Get a container from storage and move it to the color dispenser devices: - lab_id: color_lab id: robot_arm @@ -405,7 +403,7 @@ The YAML definition for the template experiment is shown below: - id: mix_colors type: Color Mixing - description: Iteratively dispense and mix the colors in the container + desc: Iteratively dispense and mix the colors in the container devices: - lab_id: color_lab id: {{ color_mixer }} @@ -427,7 +425,7 @@ The YAML definition for the template experiment is shown below: - id: move_container_to_analyzer type: Move Container - description: Move the container from the color mixer to the color analyzer + desc: Move the container from the color mixer to the color analyzer devices: - lab_id: color_lab id: robot_arm @@ -441,7 +439,7 @@ The YAML definition for the template experiment is shown below: - id: analyze_color type: Analyze Color - description: Analyze the color of the solution in the container and output the RGB values + desc: Analyze the color of the solution in the container and output the RGB values devices: - lab_id: color_lab id: {{ color_analyzer }} @@ -451,7 +449,7 @@ The YAML definition for the template experiment is shown below: - id: score_color type: Score Color - description: Score the color based on the RGB values + desc: Score the color based on the RGB values parameters: red: analyze_color.red green: analyze_color.green @@ -463,7 +461,7 @@ The YAML definition for the template experiment is shown below: - id: empty_container type: Empty Container - description: Empty the container and move it to the cleaning station + desc: Empty the container and move it to the cleaning station devices: - lab_id: color_lab id: robot_arm @@ -478,7 +476,7 @@ The YAML definition for the template experiment is shown below: - id: clean_container type: Clean Container - description: Clean the container by rinsing it with distilled water + desc: Clean the container by rinsing it with distilled water devices: - lab_id: color_lab id: cleaning_station @@ -490,7 +488,7 @@ The YAML definition for the template experiment is shown below: - id: store_container type: Store Container - description: Store the container back in the container storage + desc: Store the container back in the container storage devices: - lab_id: color_lab id: robot_arm diff --git a/docs/user-guide/devices.rst b/docs/user-guide/devices.rst index b2214af..5058943 100644 --- a/docs/user-guide/devices.rst +++ b/docs/user-guide/devices.rst @@ -32,7 +32,7 @@ Device Implementation YAML File (device.yml) ~~~~~~~~~~~~~~~~~~~~~~ -* Specifies the device type, description, and initialization parameters +* Specifies the device type, desc, and initialization parameters * The same implementation can be used for multiple devices of the same type * Initialization parameters can be overridden in laboratory definition @@ -43,16 +43,15 @@ Below is an example device YAML file for a magnetic mixer: .. code-block:: yaml type: magnetic_mixer - description: Magnetic mixer for mixing the contents of a container + desc: Magnetic mixer for mixing the contents of a container - initialization_parameters: + init_parameters: port: 5004 Python File (device.py) ~~~~~~~~~~~~~~~~~~~~~~~ * Implements device functionality * All devices implementations must inherit from ``BaseDevice`` -* The device class name must end with "Device" to be discovered by EOS Below is a example implementation of a magnetic mixer device: @@ -66,9 +65,9 @@ Below is a example implementation of a magnetic mixer device: from eos.devices.base_device import BaseDevice from user.color_lab.common.device_client import DeviceClient - class MagneticMixerDevice(BaseDevice): - async def _initialize(self, initialization_parameters: Dict[str, Any]) -> None: - port = int(initialization_parameters["port"]) + class MagneticMixer(BaseDevice): + async def _initialize(self, init_parameters: Dict[str, Any]) -> None: + port = int(init_parameters["port"]) self.client = DeviceClient(port) self.client.open_connection() diff --git a/docs/user-guide/experiments.rst b/docs/user-guide/experiments.rst index 7f4c0d5..c069d1c 100644 --- a/docs/user-guide/experiments.rst +++ b/docs/user-guide/experiments.rst @@ -38,7 +38,7 @@ Below is an example experiment YAML file for an experiment to optimize parameter .. code-block:: yaml type: color_mixing - description: Experiment to find optimal parameters to synthesize a desired color + desc: Experiment to find optimal parameters to synthesize a desired color labs: - color_lab @@ -46,7 +46,7 @@ Below is an example experiment YAML file for an experiment to optimize parameter tasks: - id: retrieve_container type: Retrieve Container - description: Get a random available container from storage and move it to the color dispenser + desc: Get a random available container from storage and move it to the color dispenser devices: - lab_id: color_lab id: robot_arm @@ -64,7 +64,7 @@ Below is an example experiment YAML file for an experiment to optimize parameter - id: dispense_colors type: Dispense Colors - description: Dispense a color from the color dispenser into the container + desc: Dispense a color from the color dispenser into the container devices: - lab_id: color_lab id: color_dispenser @@ -79,7 +79,7 @@ Below is an example experiment YAML file for an experiment to optimize parameter - id: move_container_to_mixer type: Move Container - description: Move the container to the magnetic mixer + desc: Move the container to the magnetic mixer devices: - lab_id: color_lab id: robot_arm @@ -93,7 +93,7 @@ Below is an example experiment YAML file for an experiment to optimize parameter - id: mix_colors type: Magnetic Mixing - description: Mix the colors in the container + desc: Mix the colors in the container devices: - lab_id: color_lab id: magnetic_mixer @@ -106,7 +106,7 @@ Below is an example experiment YAML file for an experiment to optimize parameter - id: move_container_to_analyzer type: Move Container - description: Move the container to the color analyzer + desc: Move the container to the color analyzer devices: - lab_id: color_lab id: robot_arm @@ -120,7 +120,7 @@ Below is an example experiment YAML file for an experiment to optimize parameter - id: analyze_color type: Analyze Color - description: Analyze the color of the solution in the container and output the RGB values + desc: Analyze the color of the solution in the container and output the RGB values devices: - lab_id: color_lab id: color_analyzer @@ -130,7 +130,7 @@ Below is an example experiment YAML file for an experiment to optimize parameter - id: score_color type: Score Color - description: Score the color based on the RGB values + desc: Score the color based on the RGB values parameters: red: analyze_color.red green: analyze_color.green @@ -139,7 +139,7 @@ Below is an example experiment YAML file for an experiment to optimize parameter - id: empty_container type: Empty Container - description: Empty the container and move it to the cleaning station + desc: Empty the container and move it to the cleaning station devices: - lab_id: color_lab id: robot_arm @@ -154,7 +154,7 @@ Below is an example experiment YAML file for an experiment to optimize parameter - id: clean_container type: Clean Container - description: Clean the container by rinsing it with distilled water + desc: Clean the container by rinsing it with distilled water devices: - lab_id: color_lab id: cleaning_station @@ -164,7 +164,7 @@ Below is an example experiment YAML file for an experiment to optimize parameter - id: store_container type: Store Container - description: Store the container back in the container storage + desc: Store the container back in the container storage devices: - lab_id: color_lab id: robot_arm @@ -181,7 +181,7 @@ Let's dissect this file: .. code-block:: yaml type: color_mixing - description: Experiment to find optimal parameters to synthesize a desired color + desc: Experiment to find optimal parameters to synthesize a desired color labs: - color_lab @@ -197,7 +197,7 @@ Now let's look at the first task in the experiment: - id: retrieve_container type: Retrieve Container - description: Get a random available container from storage and move it to the color dispenser + desc: Get a random available container from storage and move it to the color dispenser devices: - lab_id: color_lab id: robot_arm @@ -228,7 +228,7 @@ Let's look at the next task: - id: dispense_colors type: Dispense Colors - description: Dispense a color from the color dispenser into the container + desc: Dispense a color from the color dispenser into the container devices: - lab_id: color_lab id: color_dispenser @@ -260,8 +260,6 @@ As an example, below is the optimizer file for the color mixing experiment: .. code-block:: python - from typing import Type, Tuple, Dict - from bofire.data_models.acquisition_functions.acquisition_function import qNEI from bofire.data_models.enum import SamplingMethodEnum from bofire.data_models.features.continuous import ContinuousOutput, ContinuousInput @@ -271,7 +269,7 @@ As an example, below is the optimizer file for the color mixing experiment: from eos.optimization.abstract_sequential_optimizer import AbstractSequentialOptimizer - def eos_create_campaign_optimizer() -> Tuple[Dict, Type[AbstractSequentialOptimizer]]: + def eos_create_campaign_optimizer() -> tuple[dict, type[AbstractSequentialOptimizer]]: constructor_args = { "inputs": [ ContinuousInput(key="dispense_colors.cyan_volume", bounds=(0, 5)), diff --git a/docs/user-guide/installation.rst b/docs/user-guide/installation.rst index 6fd7aa0..e36ff17 100644 --- a/docs/user-guide/installation.rst +++ b/docs/user-guide/installation.rst @@ -11,7 +11,7 @@ We recommend a central computer that is easily accessible. We strongly recommend that the laboratory has its own isolated network for security and performance reasons. See :doc:`infrastructure setup ` for more information. -EOS also requires a MongoDB database, a MinIO object storage server, and (for now) Budibase for the web UI. +EOS also requires a MongoDB database and a MinIO object storage server. We provide a Docker Compose file that can set up all of these services for you. 1. Install PDM diff --git a/docs/user-guide/jinja2_templating.rst b/docs/user-guide/jinja2_templating.rst index 2a58cd5..8048b26 100644 --- a/docs/user-guide/jinja2_templating.rst +++ b/docs/user-guide/jinja2_templating.rst @@ -67,7 +67,7 @@ In the example below, the task "mix_colors" is only included if the variable ``m {% if mix_colors %} - id: mix_colors type: Color Mixing - description: Mix the colors in the container + desc: Mix the colors in the container # ... rest of the task definition {% endif %} diff --git a/docs/user-guide/laboratories.rst b/docs/user-guide/laboratories.rst index ea9822a..0ba0b42 100644 --- a/docs/user-guide/laboratories.rst +++ b/docs/user-guide/laboratories.rst @@ -27,79 +27,79 @@ Below is an example laboratory YAML file for a solar cell fabrication lab: .. code-block:: yaml type: solar_cell_fabrication_lab - description: A laboratory for fabricating and characterizing perovskite solar cells + desc: A laboratory for fabricating and characterizing perovskite solar cells locations: glovebox: - description: Nitrogen-filled glovebox + desc: Nitrogen-filled glovebox metadata: map_coordinates: x: 10 y: 20 theta: 0 fume_hood: - description: Fume hood for solution preparation and coating + desc: Fume hood for solution preparation and coating annealing_station: - description: Hotplate for thermal annealing + desc: Hotplate for thermal annealing evaporation_chamber: - description: Thermal evaporation chamber for electrode deposition + desc: Thermal evaporation chamber for electrode deposition characterization_room: - description: Room for solar cell performance testing + desc: Room for solar cell performance testing computers: xrd_computer: - description: XRD system control and data analysis + desc: XRD system control and data analysis ip: 192.168.1.101 solar_sim_computer: - description: Solar simulator control and J-V measurements + desc: Solar simulator control and J-V measurements ip: 192.168.1.102 robot_computer: - description: Mobile manipulation robot control + desc: Mobile manipulation robot control ip: 192.168.1.103 devices: spin_coater: - description: Spin coater for depositing perovskite and transport layers + desc: Spin coater for depositing perovskite and transport layers type: spin_coater location: glovebox computer: eos_computer uv_ozone_cleaner: - description: UV-Ozone cleaner for substrate treatment + desc: UV-Ozone cleaner for substrate treatment type: uv_ozone_cleaner location: fume_hood computer: eos_computer thermal_evaporator: - description: Thermal evaporator for metal electrode deposition + desc: Thermal evaporator for metal electrode deposition type: thermal_evaporator location: evaporation_chamber computer: eos_computer - initialization_parameters: + init_parameters: max_temperature: 1000C materials: [Au, Ag, Al] solar_simulator: - description: Solar simulator for J-V curve measurements + desc: Solar simulator for J-V curve measurements type: solar_simulator location: characterization_room computer: solar_sim_computer - initialization_parameters: + init_parameters: spectrum: AM1.5G intensity: 100mW/cm2 xrd_system: - description: X-ray diffractometer for crystal structure analysis + desc: X-ray diffractometer for crystal structure analysis type: xrd location: characterization_room computer: xrd_computer mobile_robot: - description: Mobile manipulation robot for automated sample transfer + desc: Mobile manipulation robot for automated sample transfer type: mobile_robot location: characterization_room computer: robot_computer - initialization_parameters: + init_parameters: locations: - glovebox - fume_hood @@ -144,20 +144,20 @@ Defining locations is optional. locations: glovebox: - description: Nitrogen-filled glovebox + desc: Nitrogen-filled glovebox metadata: map_coordinates: x: 10 y: 20 theta: 0 fume_hood: - description: Fume hood for solution preparation and coating + desc: Fume hood for solution preparation and coating annealing_station: - description: Hotplate for thermal annealing + desc: Hotplate for thermal annealing evaporation_chamber: - description: Thermal evaporation chamber for electrode deposition + desc: Thermal evaporation chamber for electrode deposition characterization_room: - description: Room for solar cell performance testing + desc: Room for solar cell performance testing Computers (Optional) """""""""""""""""""" @@ -179,13 +179,13 @@ connected to eos_computer). computers: xrd_computer: - description: XRD system control and data analysis + desc: XRD system control and data analysis ip: 192.168.1.101 solar_sim_computer: - description: Solar simulator control and J-V measurements + desc: Solar simulator control and J-V measurements ip: 192.168.1.102 robot_computer: - description: Mobile manipulation robot control + desc: Mobile manipulation robot control ip: 192.168.1.103 Devices (Required) @@ -197,23 +197,23 @@ Each device must have a unique name inside the lab and must be defined in the `` devices: spin_coater: - description: Spin coater for depositing perovskite and transport layers + desc: Spin coater for depositing perovskite and transport layers type: spin_coater location: glovebox computer: eos_computer uv_ozone_cleaner: - description: UV-Ozone cleaner for substrate treatment + desc: UV-Ozone cleaner for substrate treatment type: uv_ozone_cleaner location: fume_hood computer: eos_computer thermal_evaporator: - description: Thermal evaporator for metal electrode deposition + desc: Thermal evaporator for metal electrode deposition type: thermal_evaporator location: evaporation_chamber computer: eos_computer - initialization_parameters: + init_parameters: max_temperature: 1000C materials: [Au, Ag, Al] @@ -226,7 +226,7 @@ There can be multiple devices with different names of the same type. **computer**: The computer that controls the device. If not "eos_computer", the computer must be defined in the "computers" section. -**initialization_parameters** (optional): Parameters required to initialize the device. +**init_parameters** (optional): Parameters required to initialize the device. These parameters are defined in the device specification and can be overridden here. Containers (Optional) diff --git a/docs/user-guide/rest_api.rst b/docs/user-guide/rest_api.rst index 8ea6525..c699e1f 100644 --- a/docs/user-guide/rest_api.rst +++ b/docs/user-guide/rest_api.rst @@ -11,7 +11,7 @@ Example functions include: .. warning:: - Be **very careful** to who accesses REST API. + Be very careful to who accesses REST API. The REST API currently has no authentication. Only use it internally in its current state. @@ -20,7 +20,7 @@ Example functions include: .. warning:: EOS will likely have control over expensive (and potentially dangerous) hardware and unchecked REST API access could - have **severe** consequences. + have severe consequences. Documentation ------------- diff --git a/docs/user-guide/tasks.rst b/docs/user-guide/tasks.rst index b6f384f..6117eb4 100644 --- a/docs/user-guide/tasks.rst +++ b/docs/user-guide/tasks.rst @@ -26,19 +26,19 @@ Parameters are values that are input to a task or output from a task. Every parameter has a specific data type. EOS supports the following parameter types: -* **integer**: An integer number. +* **int**: An int number. Equivalent to Python's ``int`` -* **decimal**: A decimal number. +* **float**: A float number. Equivalent to Python's ``float`` -* **string**: A string (series of text characters). +* **str**: A str (series of text characters). Equivalent to Python's ``str`` -* **boolean**: A true/false value. +* **bool**: A true/false value. Equivalent to Python's ``bool`` * **choice**: A value that must be one of a set of predefined choices. The choices can be any type. * **list**: A list of values of a specific type. Equivalent to Python's ``list``. -* **dictionary**: A dictionary of key-value pairs. +* **dict**: A dict of key-value pairs. Equivalent to Python's ``dict``. Tasks can have multiple parameters of different types. @@ -47,7 +47,7 @@ EOS will ensure that the parameters passed to a task are of the correct type and Containers ---------- Containers are referenced by a unique identifier called a **container ID**. -A container ID is a string that uniquely identifies a container. +A container ID is a str that uniquely identifies a container. Every container in EOS must have an ID, and these can be specified in the laboratory definition. Containers are treated as *global* objects and can move across labs. However, every container must have a "home" lab from which it originates. @@ -77,7 +77,7 @@ Task Implementation YAML File (task.yml) ~~~~~~~~~~~~~~~~~~~~ -* Specifies the task type, description, and input/output parameters and containers +* Specifies the task type, desc, and input/output parameters and containers * Acts as the interface contract (spec) for the task * This contract is used to validate tasks, and EOS enforces the contract statically and dynamically during execution * Useful as documentation for the task @@ -89,32 +89,32 @@ Below is an example task YAML file for a GC analysis task for GCs made by SRI In .. code-block:: yaml type: SRI GC Analysis - description: Perform gas chromatography (GC) analysis on a sample. + desc: Perform gas chromatography (GC) analysis on a sample. device_types: - sri_gas_chromatograph input_parameters: analysis_time: - type: integer + type: int unit: seconds value: 480 - description: How long to run the GC analysis + desc: How long to run the GC analysis output_parameters: known_substances: - type: dictionary - description: Peaks and peak areas of identified substances + type: dict + desc: Peaks and peak areas of identified substances unknown_substances: - type: dictionary - description: Peaks and peak areas of substances that could not be identified + type: dict + desc: Peaks and peak areas of substances that could not be identified The task specification makes clear that: * The task is of type "SRI GC Analysis" * The task requires a device of type "sri_gas_chromatograph". EOS will enforce this requirement. -* The task takes an input integer parameter ``analysis_time`` in seconds. +* The task takes an input int parameter ``analysis_time`` in seconds. It has a default value of 480, making this an optional parameter. * The task outputs two dictionaries: ``known_substances`` and ``unknown_substances``. @@ -129,8 +129,8 @@ Integer .. code-block:: yaml sample_rate: - type: integer - description: The number of samples per second + type: int + desc: The number of samples per second value: 44100 unit: Hz min: 8000 @@ -138,13 +138,13 @@ Integer Integers must have a unit (can be n/a) and can also have a minimum and maximum value. -Decimal -""""""" +Float +""""" .. code-block:: yaml threshold_voltage: - type: decimal - description: The voltage threshold for signal detection + type: float + desc: The voltage threshold for signal detection value: 2.5 unit: volts min: 0.0 @@ -157,8 +157,8 @@ String .. code-block:: yaml file_prefix: - type: string - description: Prefix for output file names + type: str + desc: Prefix for output file names value: "experiment_" Boolean @@ -167,7 +167,7 @@ Boolean auto_calibrate: type: boolean - description: Whether to perform auto-calibration before analysis + desc: Whether to perform auto-calibration before analysis value: true Booleans are true/false values. @@ -178,7 +178,7 @@ Choice column_type: type: choice - description: HPLC column type + desc: HPLC column type value: "C18" choices: - "C18" @@ -195,9 +195,9 @@ List channel_gains: type: list - description: Gain values for each input channel + desc: Gain values for each input channel value: [1.0, 1.2, 0.8, 1.1] - element_type: decimal + element_type: float length: 4 min: [0.5, 0.5, 0.5, 0.5] max: [2.0, 2.0, 2.0, 2.0] @@ -210,8 +210,8 @@ Dictionary .. code-block:: yaml buffer_composition: - type: dictionary - description: Composition of a buffer solution + type: dict + desc: Composition of a buffer solution value: pH: 7.4 base: "Tris" @@ -229,7 +229,6 @@ Python File (task.yml) ~~~~~~~~~~~~~~~~~~~~~~ * Implements the task * All task implementations must inherit from ``BaseTask`` -* The task class name must end with "Task" to be discovered by EOS :bdg-primary:`task.py` @@ -238,7 +237,7 @@ Python File (task.yml) from eos.tasks.base_task import BaseTask - class MagneticMixingTask(BaseTask): + class MagneticMixing(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/eos/campaigns/campaign_executor.py b/eos/campaigns/campaign_executor.py index cbe3445..bba514e 100644 --- a/eos/campaigns/campaign_executor.py +++ b/eos/campaigns/campaign_executor.py @@ -6,9 +6,9 @@ from eos.campaigns.campaign_manager import CampaignManager from eos.campaigns.campaign_optimizer_manager import CampaignOptimizerManager -from eos.campaigns.entities.campaign import CampaignStatus, Campaign, CampaignExecutionParameters +from eos.campaigns.entities.campaign import CampaignStatus, Campaign, CampaignDefinition from eos.campaigns.exceptions import EosCampaignExecutionError -from eos.experiments.entities.experiment import ExperimentStatus, ExperimentExecutionParameters +from eos.experiments.entities.experiment import ExperimentStatus, ExperimentDefinition from eos.experiments.exceptions import EosExperimentCancellationError, EosExperimentExecutionError from eos.experiments.experiment_executor_factory import ExperimentExecutorFactory from eos.logging.logger import log @@ -22,17 +22,16 @@ class CampaignExecutor: def __init__( self, - campaign_id: str, - experiment_type: str, - execution_parameters: CampaignExecutionParameters, + campaign_definition: CampaignDefinition, campaign_manager: CampaignManager, campaign_optimizer_manager: CampaignOptimizerManager, task_manager: TaskManager, experiment_executor_factory: ExperimentExecutorFactory, ): - self._campaign_id = campaign_id - self._experiment_type = experiment_type - self._execution_parameters = execution_parameters + self._campaign_definition = campaign_definition + + self._campaign_id = campaign_definition.id + self._experiment_type = campaign_definition.experiment_type self._campaign_manager = campaign_manager self._campaign_optimizer_manager = campaign_optimizer_manager @@ -54,7 +53,7 @@ async def _setup_optimizer(self) -> None: self._optimizer = self._campaign_optimizer_manager.create_campaign_optimizer_actor( self._experiment_type, self._campaign_id, - self._execution_parameters.optimizer_computer_ip, + self._campaign_definition.optimizer_computer_ip, ) self._optimizer_input_names, self._optimizer_output_names = ( await self._campaign_optimizer_manager.get_input_and_output_names(self._campaign_id) @@ -64,7 +63,7 @@ def cleanup(self) -> None: """ Clean up resources when the campaign executor is no longer needed. """ - if self._execution_parameters.do_optimization: + if self._campaign_definition.optimize: self._campaign_optimizer_manager.terminate_campaign_optimizer_actor(self._campaign_id) async def start_campaign(self) -> None: @@ -87,7 +86,8 @@ async def _handle_existing_campaign(self, campaign: Campaign) -> None: """ self._campaign_status = campaign.status - if not self._execution_parameters.resume: + if not self._campaign_definition.resume: + def _raise_error(status: str) -> None: raise EosCampaignExecutionError( f"Cannot start campaign '{self._campaign_id}' as it already exists and is '{status}'. " @@ -108,13 +108,9 @@ async def _create_new_campaign(self) -> None: """ Create a new campaign. """ - await self._campaign_manager.create_campaign( - campaign_id=self._campaign_id, - experiment_type=self._experiment_type, - execution_parameters=self._execution_parameters, - ) + await self._campaign_manager.create_campaign(self._campaign_definition) - if self._execution_parameters.do_optimization: + if self._campaign_definition.optimize: await self._setup_optimizer() async def _resume_campaign(self) -> None: @@ -123,7 +119,7 @@ async def _resume_campaign(self) -> None: """ await self._campaign_manager.delete_current_campaign_experiments(self._campaign_id) - if self._execution_parameters.do_optimization: + if self._campaign_definition.optimize: await self._setup_optimizer() await self._restore_optimizer_state() @@ -196,7 +192,7 @@ async def progress_campaign(self) -> bool: campaign = await self._campaign_manager.get_campaign(self._campaign_id) if self._is_campaign_completed(campaign): - if self._execution_parameters.do_optimization: + if self._campaign_definition.optimize: await self._compute_pareto_solutions() await self._campaign_manager.complete_campaign(self._campaign_id) return True @@ -220,7 +216,7 @@ async def _progress_experiments(self) -> None: if is_completed: completed_experiments.append(experiment_id) - if self._execution_parameters.do_optimization and completed_experiments: + if self._campaign_definition.optimize and completed_experiments: await self._process_completed_experiments(completed_experiments) for experiment_id in completed_experiments: @@ -268,23 +264,26 @@ async def _create_experiments(self, campaign: Campaign) -> None: experiment_dynamic_parameters = await self._get_experiment_parameters(iteration) - experiment_execution_parameters = ExperimentExecutionParameters() - experiment_executor = self._experiment_executor_factory.create( - new_experiment_id, self._experiment_type, experiment_execution_parameters + experiment_definition = ExperimentDefinition( + id=new_experiment_id, + type=self._experiment_type, + dynamic_parameters=experiment_dynamic_parameters, ) + experiment_executor = self._experiment_executor_factory.create(experiment_definition) + await self._campaign_manager.add_campaign_experiment(self._campaign_id, new_experiment_id) self._experiment_executors[new_experiment_id] = experiment_executor - await experiment_executor.start_experiment(experiment_dynamic_parameters) + await experiment_executor.start_experiment() async def _get_experiment_parameters(self, iteration: int) -> dict[str, Any]: """ Get parameters for a new experiment. """ - campaign_dynamic_parameters = self._execution_parameters.dynamic_parameters + campaign_dynamic_parameters = self._campaign_definition.dynamic_parameters if campaign_dynamic_parameters and len(campaign_dynamic_parameters) > iteration: return campaign_dynamic_parameters[iteration] - if self._execution_parameters.do_optimization: + if self._campaign_definition.optimize: log.info(f"CMP '{self._campaign_id}' - Sampling new parameters from the optimizer...") new_parameters = await self._optimizer.sample.remote(1) new_parameters = new_parameters.to_dict(orient="records")[0] @@ -300,8 +299,8 @@ def _can_create_more_experiments(self, campaign: Campaign) -> bool: Check if more experiments can be created. """ num_executors = len(self._experiment_executors) - max_concurrent = self._execution_parameters.max_concurrent_experiments - max_total = self._execution_parameters.max_experiments + max_concurrent = self._campaign_definition.max_concurrent_experiments + max_total = self._campaign_definition.max_experiments current_total = campaign.experiments_completed + num_executors return num_executors < max_concurrent and (max_total == 0 or current_total < max_total) @@ -310,7 +309,7 @@ def _is_campaign_completed(self, campaign: Campaign) -> bool: """ Check if the campaign is completed. """ - max_experiments = self._execution_parameters.max_experiments + max_experiments = self._campaign_definition.max_experiments return ( max_experiments > 0 and campaign.experiments_completed >= max_experiments diff --git a/eos/campaigns/campaign_executor_factory.py b/eos/campaigns/campaign_executor_factory.py index d9be256..efa91be 100644 --- a/eos/campaigns/campaign_executor_factory.py +++ b/eos/campaigns/campaign_executor_factory.py @@ -1,7 +1,7 @@ from eos.campaigns.campaign_executor import CampaignExecutor from eos.campaigns.campaign_manager import CampaignManager from eos.campaigns.campaign_optimizer_manager import CampaignOptimizerManager -from eos.campaigns.entities.campaign import CampaignExecutionParameters +from eos.campaigns.entities.campaign import CampaignDefinition from eos.configuration.configuration_manager import ConfigurationManager from eos.experiments.experiment_executor_factory import ExperimentExecutorFactory @@ -30,14 +30,10 @@ def __init__( def create( self, - campaign_id: str, - experiment_type: str, - execution_parameters: CampaignExecutionParameters, + campaign_definition: CampaignDefinition, ) -> CampaignExecutor: return CampaignExecutor( - campaign_id, - experiment_type, - execution_parameters, + campaign_definition, self._campaign_manager, self._campaign_optimizer_manager, self._task_manager, diff --git a/eos/campaigns/campaign_manager.py b/eos/campaigns/campaign_manager.py index 5349072..a60cef0 100644 --- a/eos/campaigns/campaign_manager.py +++ b/eos/campaigns/campaign_manager.py @@ -2,7 +2,7 @@ from datetime import datetime, timezone from typing import Any -from eos.campaigns.entities.campaign import Campaign, CampaignStatus, CampaignExecutionParameters +from eos.campaigns.entities.campaign import Campaign, CampaignStatus, CampaignDefinition from eos.campaigns.exceptions import EosCampaignStateError from eos.campaigns.repositories.campaign_repository import CampaignRepository from eos.configuration.configuration_manager import ConfigurationManager @@ -39,19 +39,12 @@ async def initialize(self, db_interface: AsyncMongoDbInterface) -> None: async def create_campaign( self, - campaign_id: str, - experiment_type: str, - execution_parameters: CampaignExecutionParameters, - metadata: dict[str, Any] | None = None, + definition: CampaignDefinition, ) -> None: - """ - Create a new campaign of a given experiment type with a unique id. + """Create a new campaign.""" + campaign_id = definition.id + experiment_type = definition.experiment_type - :param campaign_id: A unique id for the campaign. - :param experiment_type: The type of the experiment as defined in the configuration. - :param execution_parameters: Parameters for the execution of the campaign. - :param metadata: Additional metadata to be stored with the campaign. - """ if await self._campaigns.get_one(id=campaign_id): raise EosCampaignStateError(f"Campaign '{campaign_id}' already exists.") @@ -59,12 +52,7 @@ async def create_campaign( if not experiment_config: raise EosCampaignStateError(f"Experiment type '{experiment_type}' not found in the configuration.") - campaign = Campaign( - id=campaign_id, - experiment_type=experiment_type, - execution_parameters=execution_parameters, - metadata=metadata or {}, - ) + campaign = Campaign.from_definition(definition) await self._campaigns.create(campaign.model_dump()) log.info(f"Created campaign '{campaign_id}'.") diff --git a/eos/campaigns/entities/campaign.py b/eos/campaigns/entities/campaign.py index 2eda803..5ff154a 100644 --- a/eos/campaigns/entities/campaign.py +++ b/eos/campaigns/entities/campaign.py @@ -2,22 +2,30 @@ from enum import Enum from typing import Any -from pydantic import BaseModel, field_serializer, Field, model_validator +from pydantic import BaseModel, Field, field_serializer, model_validator -class CampaignExecutionParameters(BaseModel): +class CampaignDefinition(BaseModel): + """The definition of a campaign. Used for submission.""" + + id: str + experiment_type: str + max_experiments: int = Field(0, ge=0) max_concurrent_experiments: int = Field(1, ge=1) - do_optimization: bool + optimize: bool optimizer_computer_ip: str = "127.0.0.1" + dynamic_parameters: list[dict[str, dict[str, Any]]] | None = None + metadata: dict[str, Any] = Field(default_factory=dict) + resume: bool = False @model_validator(mode="after") - def validate_dynamic_parameters(self) -> None: - if not self.do_optimization: + def validate_dynamic_parameters(self) -> "CampaignDefinition": + if not self.optimize: if not self.dynamic_parameters: raise ValueError("Campaign dynamic parameters must be provided if optimization is not enabled.") if len(self.dynamic_parameters) != self.max_experiments: @@ -37,35 +45,40 @@ class CampaignStatus(Enum): FAILED = "FAILED" -class Campaign(BaseModel): - id: str - experiment_type: str - - execution_parameters: CampaignExecutionParameters +class Campaign(CampaignDefinition): + """The state of a campaign in the system.""" status: CampaignStatus = CampaignStatus.CREATED + experiments_completed: int = Field(0, ge=0) - current_experiment_ids: list[str] = [] + current_experiment_ids: list[str] = Field(default_factory=list) pareto_solutions: list[dict[str, Any]] | None = None - metadata: dict[str, Any] = {} - start_time: datetime | None = None end_time: datetime | None = None + created_at: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc)) - created_at: datetime = datetime.now(tz=timezone.utc) + class Config: + arbitrary_types_allowed = True @field_serializer("status") def status_enum_to_string(self, v: CampaignStatus) -> str: return v.value + @classmethod + def from_definition(cls, definition: CampaignDefinition) -> "Campaign": + """Create a Campaign instance from a CampaignDefinition.""" + return cls(**definition.model_dump()) + class CampaignSample(BaseModel): + """A sample collected during campaign execution.""" + campaign_id: str experiment_id: str inputs: dict[str, Any] outputs: dict[str, Any] - created_at: datetime = datetime.now(tz=timezone.utc) + created_at: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc)) diff --git a/eos/cli/orchestrator_cli.py b/eos/cli/orchestrator_cli.py index 779b5c7..a41a3cb 100644 --- a/eos/cli/orchestrator_cli.py +++ b/eos/cli/orchestrator_cli.py @@ -6,17 +6,18 @@ from contextlib import AbstractAsyncContextManager from pathlib import Path from typing import Annotated +import importlib.metadata import typer import uvicorn +import yaml from litestar import Litestar, Router from litestar.di import Provide from litestar.logging import LoggingConfig -from omegaconf import OmegaConf, DictConfig +from eos.configuration.entities.eos_config import EosConfig, WebApiConfig from eos.logging.logger import log, LogLevel from eos.orchestration.orchestrator import Orchestrator -from eos.persistence.service_credentials import ServiceCredentials from eos.web_api.orchestrator.controllers.campaign_controller import CampaignController from eos.web_api.orchestrator.controllers.experiment_controller import ExperimentController from eos.web_api.orchestrator.controllers.file_controller import FileController @@ -25,33 +26,15 @@ from eos.web_api.orchestrator.exception_handling import global_exception_handler -def load_config(config_file: str) -> DictConfig: - default_config = { - "user_dir": "./user", - "labs": [], - "experiments": [], - "log_level": "INFO", - "web_api": { - "host": "localhost", - "port": 8070, - }, - "db": { - "host": "localhost", - "port": 27017, - "username": None, - "password": None, - }, - "file_db": { - "host": "localhost", - "port": 9004, - "username": None, - "password": None, - }, - } - - if not Path(config_file).exists(): +def load_config(config_file: str) -> EosConfig: + config_file_path = Path(config_file) + if not config_file_path.exists(): raise FileNotFoundError(f"Config file '{config_file}' does not exist") - return OmegaConf.merge(OmegaConf.create(default_config), OmegaConf.load(config_file)) + + with Path.open(config_file_path) as f: + user_config = yaml.safe_load(f) or {} + + return EosConfig(**user_config) def parse_list_arg(arg: str | None) -> list[str]: @@ -93,19 +76,16 @@ def signal_handler(*_) -> None: log.info("EOS shut down.") -async def setup_orchestrator(config: DictConfig) -> Orchestrator: - db_credentials = ServiceCredentials(**config.db) - file_db_credentials = ServiceCredentials(**config.file_db) - - orchestrator = Orchestrator(config.user_dir, db_credentials, file_db_credentials) +async def setup_orchestrator(config: EosConfig) -> Orchestrator: + orchestrator = Orchestrator(str(config.user_dir), config.db, config.file_db) await orchestrator.initialize() - await orchestrator.load_labs(config.labs) - orchestrator.load_experiments(config.experiments) + await orchestrator.loading.load_labs(config.labs) + orchestrator.loading.load_experiments(config.experiments) return orchestrator -def setup_web_api(orchestrator: Orchestrator, config: DictConfig) -> uvicorn.Server: +def setup_web_api(orchestrator: Orchestrator, config: WebApiConfig) -> uvicorn.Server: litestar_logging_config = LoggingConfig( configure_root_logger=False, loggers={"litestar": {"level": "CRITICAL"}}, @@ -125,14 +105,14 @@ def setup_web_api(orchestrator: Orchestrator, config: DictConfig) -> uvicorn.Ser exception_handlers={Exception: global_exception_handler}, ) - uv_config = uvicorn.Config(web_api_app, host=config.web_api.host, port=config.web_api.port, log_level="critical") + uv_config = uvicorn.Config(web_api_app, host=config.host, port=config.port, log_level="critical") return uvicorn.Server(uv_config) -async def run_eos(config: DictConfig) -> None: +async def run_eos(config: EosConfig) -> None: orchestrator = await setup_orchestrator(config) - web_api_server = setup_web_api(orchestrator, config) + web_api_server = setup_web_api(orchestrator, config.web_api) log.info("EOS initialized.") @@ -162,21 +142,32 @@ def start_orchestrator( typer.echo(EOS_BANNER) file_config = load_config(config_file) - cli_config = { - "user_dir": user_dir, - "labs": parse_list_arg(labs) if labs else None, - "experiments": parse_list_arg(experiments) if experiments else None, - "log_level": log_level.value if log_level else None, - } - cli_config = {k: v for k, v in cli_config.items() if v is not None} - config = OmegaConf.merge(file_config, OmegaConf.create(cli_config)) + + cli_overrides = {} + if user_dir: + cli_overrides["user_dir"] = user_dir + parsed_labs = parse_list_arg(labs) + if parsed_labs: + cli_overrides["labs"] = parsed_labs + parsed_experiments = parse_list_arg(experiments) + if parsed_experiments: + cli_overrides["experiments"] = parsed_experiments + if log_level is not None: + cli_overrides["log_level"] = log_level.value + + if cli_overrides: + config_dict = file_config.model_dump() + config_dict.update(cli_overrides) + config = EosConfig(**config_dict) + else: + config = file_config log.set_level(config.log_level) asyncio.run(run_eos(config)) -EOS_BANNER = r"""The Experiment Orchestration System +EOS_BANNER = f"""The Experiment Orchestration System, {importlib.metadata.version("eos")} ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄ ▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌ ▐░█▀▀▀▀▀▀▀▀▀ ▐░█▀▀▀▀▀▀▀█░▌▐░█▀▀▀▀▀▀▀▀▀ diff --git a/eos/configuration/configuration_manager.py b/eos/configuration/configuration_manager.py index 291e00a..318724b 100644 --- a/eos/configuration/configuration_manager.py +++ b/eos/configuration/configuration_manager.py @@ -8,9 +8,9 @@ from eos.configuration.plugin_registries.campaign_optimizer_plugin_registry import CampaignOptimizerPluginRegistry from eos.configuration.plugin_registries.device_plugin_registry import DevicePluginRegistry from eos.configuration.plugin_registries.task_plugin_registry import TaskPluginRegistry -from eos.configuration.spec_registries.device_specification_registry import DeviceSpecificationRegistry -from eos.configuration.spec_registries.task_specification_registry import ( - TaskSpecificationRegistry, +from eos.configuration.spec_registries.device_spec_registry import DeviceSpecRegistry +from eos.configuration.spec_registries.task_spec_registry import ( + TaskSpecRegistry, ) from eos.configuration.validation.experiment_validator import ExperimentValidator from eos.configuration.validation.lab_validator import LabValidator @@ -33,17 +33,17 @@ def __init__(self, user_dir: str): self._user_dir = user_dir self._package_manager = PackageManager(user_dir) - self.labs: dict[str, LabConfig] = {} - self.experiments: dict[str, ExperimentConfig] = {} - task_configs, task_dirs_to_task_types = self._package_manager.read_task_configs() - self.task_specs = TaskSpecificationRegistry(task_configs, task_dirs_to_task_types) + self.task_specs = TaskSpecRegistry(task_configs, task_dirs_to_task_types) self.tasks = TaskPluginRegistry(self._package_manager) device_configs, device_dirs_to_device_types = self._package_manager.read_device_configs() - self.device_specs = DeviceSpecificationRegistry(device_configs, device_dirs_to_device_types) + self.device_specs = DeviceSpecRegistry(device_configs, device_dirs_to_device_types) self.devices = DevicePluginRegistry(self._package_manager) + self.labs: dict[str, LabConfig] = {} + self.experiments: dict[str, ExperimentConfig] = {} + self.campaign_optimizers = CampaignOptimizerPluginRegistry(self._package_manager) log.debug("Configuration manager initialized") @@ -181,7 +181,7 @@ def load_experiments(self, experiment_types: set[str]) -> None: Load multiple experiments to the configuration manager. :param experiment_types: A list of experiment names. Each name should match the name of the experiment's - configuration file in the experiments directory. + configuration file in the 'experiments' directory. """ for experiment_type in experiment_types: self.load_experiment(experiment_type) diff --git a/eos/configuration/entities/device_spec.py b/eos/configuration/entities/device_spec.py new file mode 100644 index 0000000..9bae0df --- /dev/null +++ b/eos/configuration/entities/device_spec.py @@ -0,0 +1,9 @@ +from typing import Any + +from pydantic import BaseModel + + +class DeviceSpec(BaseModel): + type: str + desc: str | None = None + init_parameters: dict[str, Any] | None = None diff --git a/eos/configuration/entities/device_specification.py b/eos/configuration/entities/device_specification.py deleted file mode 100644 index 612ceb4..0000000 --- a/eos/configuration/entities/device_specification.py +++ /dev/null @@ -1,9 +0,0 @@ -from dataclasses import dataclass -from typing import Any - - -@dataclass -class DeviceSpecification: - type: str - description: str | None = None - initialization_parameters: dict[str, Any] | None = None diff --git a/eos/configuration/entities/eos_config.py b/eos/configuration/entities/eos_config.py new file mode 100644 index 0000000..5586acf --- /dev/null +++ b/eos/configuration/entities/eos_config.py @@ -0,0 +1,38 @@ +from pydantic import BaseModel, Field, field_validator +from pathlib import Path + + +class WebApiConfig(BaseModel): + host: str = "localhost" + port: int = 8070 + + +class DbConfig(BaseModel): + host: str = "localhost" + port: int + username: str | None = None + password: str | None = None + + +class EosConfig(BaseModel): + user_dir: Path = Field(default=Path("./user")) + labs: set[str] = Field(default_factory=set) + experiments: set[str] = Field(default_factory=set) + + log_level: str = "INFO" + + web_api: WebApiConfig = Field(default_factory=WebApiConfig) + db: DbConfig = Field(default_factory=lambda: DbConfig(port=27017)) + file_db: DbConfig = Field(default_factory=lambda: DbConfig(port=9004)) + + @field_validator("user_dir") + def _validate_user_dir(cls, user_dir: Path) -> Path: + if not user_dir.name == "user": + raise ValueError( + f"EOS requires that the directory containing packages is named 'user'. The configured user_dir is " + f"currently named '{user_dir.name}', which is invalid." + ) + return user_dir + + class Config: + validate_assignment = True diff --git a/eos/configuration/entities/experiment.py b/eos/configuration/entities/experiment.py index 48fbfc8..ab4988e 100644 --- a/eos/configuration/entities/experiment.py +++ b/eos/configuration/entities/experiment.py @@ -1,21 +1,21 @@ -from dataclasses import dataclass from typing import Any +from pydantic import BaseModel + from eos.configuration.entities.task import TaskConfig -@dataclass -class ExperimentContainerConfig: +class ExperimentContainerConfig(BaseModel): id: str - description: str | None = None + desc: str | None = None metadata: dict[str, Any] | None = None - tags: list[str] | None = None -@dataclass -class ExperimentConfig: +class ExperimentConfig(BaseModel): type: str - description: str + desc: str labs: list[str] + tasks: list[TaskConfig] + containers: list[ExperimentContainerConfig] | None = None diff --git a/eos/configuration/entities/lab.py b/eos/configuration/entities/lab.py index 8ebefec..2e57262 100644 --- a/eos/configuration/entities/lab.py +++ b/eos/configuration/entities/lab.py @@ -1,42 +1,39 @@ -from dataclasses import dataclass, field from typing import Any +from bofire.data_models.base import BaseModel +from pydantic import Field -@dataclass -class Location: - description: str + +class Location(BaseModel): + desc: str metadata: dict[str, Any] | None = None -@dataclass -class LabComputerConfig: +class LabComputerConfig(BaseModel): ip: str - description: str | None = None + desc: str | None = None -@dataclass -class LabDeviceConfig: +class LabDeviceConfig(BaseModel): type: str computer: str location: str | None = None - description: str | None = None - initialization_parameters: dict[str, Any] | None = None + desc: str | None = None + init_parameters: dict[str, Any] | None = None -@dataclass -class LabContainerConfig: +class LabContainerConfig(BaseModel): type: str location: str ids: list[str] - description: str | None = None + desc: str | None = None metadata: dict[str, Any] | None = None -@dataclass -class LabConfig: +class LabConfig(BaseModel): type: str - description: str + desc: str devices: dict[str, LabDeviceConfig] - locations: dict[str, Location] = field(default_factory=dict) - computers: dict[str, LabComputerConfig] = field(default_factory=dict) - containers: list[LabContainerConfig] = field(default_factory=list) + locations: dict[str, Location] = Field(default_factory=dict) + computers: dict[str, LabComputerConfig] = Field(default_factory=dict) + containers: list[LabContainerConfig] = Field(default_factory=list) diff --git a/eos/configuration/entities/parameters.py b/eos/configuration/entities/parameters.py deleted file mode 100644 index 7b80a20..0000000 --- a/eos/configuration/entities/parameters.py +++ /dev/null @@ -1,209 +0,0 @@ -from dataclasses import dataclass -from enum import Enum -from typing import Any, ClassVar - -from omegaconf import ListConfig - -from eos.configuration.exceptions import EosConfigurationError - -AllowedParameterTypes = int | float | bool | str | list | dict - - -def is_dynamic_parameter(parameter: AllowedParameterTypes) -> bool: - return isinstance(parameter, str) and parameter.lower() == "eos_dynamic" - - -class ParameterType(Enum): - integer = "integer" - decimal = "decimal" - string = "string" - boolean = "boolean" - choice = "choice" - list = "list" - dictionary = "dictionary" - - def python_type(self) -> type: - mapping = { - "integer": int, - "decimal": float, - "string": str, - "boolean": bool, - "choice": str, - "list": list, - "dictionary": dict, - } - return mapping[self.value] - - -@dataclass(kw_only=True) -class Parameter: - type: ParameterType - description: str - value: Any | None = None - - def __post_init__(self): - self._validate_type() - - def _validate_type(self) -> None: - try: - self.type = ParameterType(self.type) - except ValueError as e: - raise EosConfigurationError(f"Invalid task parameter type '{self.type}'") from e - - -@dataclass(kw_only=True) -class NumericParameter(Parameter): - unit: str - min: int | float | None = None - max: int | float | None = None - - def __post_init__(self): - super().__post_init__() - self._validate_unit() - self._validate_min_max() - self._validate_value_range() - - def _validate_unit(self) -> None: - if not self.unit: - raise EosConfigurationError("Task parameter type is numeric but no unit is specified.") - - def _validate_min_max(self) -> None: - if self.min is not None and self.max is not None and self.min >= self.max: - raise EosConfigurationError("Task parameter 'min' is greater than or equal to 'max'.") - - def _validate_value_range(self) -> None: - if self.value is None or is_dynamic_parameter(self.value): - return - - if not isinstance(self.value, int | float): - raise EosConfigurationError("Task parameter value is not numerical.") - if self.min is not None and self.value < self.min: - raise EosConfigurationError("Task parameter value is less than 'min'.") - if self.max is not None and self.value > self.max: - raise EosConfigurationError("Task parameter value is greater than 'max'.") - - -@dataclass(kw_only=True) -class BooleanParameter(Parameter): - def __post_init__(self): - super().__post_init__() - self._validate_value() - - def _validate_value(self) -> None: - if not isinstance(self.value, bool) and not is_dynamic_parameter(self.value): - raise EosConfigurationError( - f"Task parameter value '{self.value}' is not true/false but the declared type is 'boolean'." - ) - - -@dataclass(kw_only=True) -class ChoiceParameter(Parameter): - choices: list[str] - - def __post_init__(self): - super().__post_init__() - self._validate_choices() - - def _validate_choices(self) -> None: - if not self.choices: - raise EosConfigurationError("Task parameter choices are not specified when the type is 'choice'.") - - if ( - not self.value - or len(self.value) == 0 - or self.value not in self.choices - and not is_dynamic_parameter(self.value) - ): - raise EosConfigurationError( - f"Task parameter value '{self.value}' is not one of the choices {self.choices}." - ) - - -@dataclass(kw_only=True) -class ListParameter(Parameter): - element_type: ParameterType - length: int | None = None - min: list[int | float] | None = None - max: list[int | float] | None = None - - def __post_init__(self): - super().__post_init__() - self._validate_element_type() - self._validate_list_attributes() - self._validate_elements_within_bounds() - - def _validate_element_type(self) -> None: - if isinstance(self.element_type, str): - try: - self.element_type = ParameterType[self.element_type] - except KeyError as e: - raise EosConfigurationError(f"Invalid list parameter element type '{self.element_type}'") from e - if self.element_type == ParameterType.list: - raise EosConfigurationError("List parameter element type cannot be 'list'. Nested lists are not supported.") - - def _validate_list_attributes(self) -> None: - for attr_name in ["value", "min", "max"]: - attr_value = getattr(self, attr_name) - if attr_value is None: - continue - - if not isinstance(attr_value, list) and not isinstance(attr_value, ListConfig): - raise EosConfigurationError( - f"List parameter '{attr_name}' must be a list for 'list' type parameters.", - EosConfigurationError, - ) - if not all(isinstance(item, self.element_type.python_type()) for item in attr_value): - raise EosConfigurationError( - f"All elements of list parameter '{attr_name}' must be of the same type as specified " - f"by 'element_type'." - ) - if self.length is not None and len(attr_value) != self.length: - raise EosConfigurationError(f"List parameter '{attr_name}' length must be {self.length}.") - - def _validate_elements_within_bounds(self) -> None: - if self.value is None or is_dynamic_parameter(self.value) or self.min is None and self.max is None: - return - - if self.length is None and (self.min is not None or self.max is not None): - raise EosConfigurationError( - "List parameter 'min' and 'max' can only be specified when 'length' is specified." - ) - - _min = self.min or [float("-inf")] * self.length - _max = self.max or [float("inf")] * self.length - for i, val in enumerate(self.value): - if not _min[i] <= val <= _max[i]: - raise EosConfigurationError( - f"Element {i} of the list with value {val} is not within the the bounds [{_min[i]}, {_max[i]}]." - ) - - -@dataclass(kw_only=True) -class DictionaryParameter(Parameter): - pass - - -class ParameterFactory: - _TYPE_MAPPING: ClassVar = { - ParameterType.integer: NumericParameter, - ParameterType.decimal: NumericParameter, - ParameterType.string: Parameter, - ParameterType.boolean: BooleanParameter, - ParameterType.choice: ChoiceParameter, - ParameterType.list: ListParameter, - ParameterType.dictionary: DictionaryParameter, - } - - @staticmethod - def create_parameter(parameter_type: ParameterType | str, **kwargs) -> Parameter: - if isinstance(parameter_type, str): - parameter_type = ParameterType(parameter_type) - - parameter_class = ParameterFactory._TYPE_MAPPING.get(parameter_type) - if not parameter_class: - raise EosConfigurationError(f"Unsupported parameter type: {parameter_type}") - - if "type" not in kwargs: - kwargs["type"] = parameter_type - - return parameter_class(**kwargs) diff --git a/eos/configuration/entities/task.py b/eos/configuration/entities/task.py index eca8b1c..7458ce5 100644 --- a/eos/configuration/entities/task.py +++ b/eos/configuration/entities/task.py @@ -1,21 +1,21 @@ -from dataclasses import dataclass, field from typing import Any +from pydantic import BaseModel, Field -@dataclass -class TaskDeviceConfig: + +class TaskDeviceConfig(BaseModel): lab_id: str id: str -@dataclass -class TaskConfig: +class TaskConfig(BaseModel): id: str type: str - devices: list[TaskDeviceConfig] = field(default_factory=list) - containers: dict[str, str] = field(default_factory=dict) - parameters: dict[str, Any] = field(default_factory=dict) - dependencies: list[str] = field(default_factory=list) + desc: str | None = None + duration: int | None = None + + devices: list[TaskDeviceConfig] = Field(default_factory=list) + containers: dict[str, str] = Field(default_factory=dict) + parameters: dict[str, Any] = Field(default_factory=dict) - max_duration_seconds: int | None = None - description: str | None = None + dependencies: list[str] = Field(default_factory=list) diff --git a/eos/configuration/entities/task_parameters.py b/eos/configuration/entities/task_parameters.py new file mode 100644 index 0000000..f497b38 --- /dev/null +++ b/eos/configuration/entities/task_parameters.py @@ -0,0 +1,199 @@ +from enum import Enum +from typing import Any, ClassVar + +from pydantic import BaseModel, field_validator, model_validator, Field +from typing_extensions import Self + +from eos.configuration.validation.validation_utils import is_dynamic_parameter + + +class TaskParameterType(str, Enum): + """Enumeration of supported parameter types.""" + + INT = "int" + FLOAT = "float" + STR = "str" + BOOL = "bool" + CHOICE = "choice" + LIST = "list" + DICT = "dict" + + @property + def python_type(self) -> type: + """Returns the corresponding Python type for the parameter type.""" + return { + self.INT: int, + self.FLOAT: float, + self.STR: str, + self.BOOL: bool, + self.CHOICE: str, + self.LIST: list, + self.DICT: dict, + }[self] + + @property + def is_numeric(self) -> bool: + return self in (self.INT, self.FLOAT) + + +class TaskParameter(BaseModel): + """Base class for all task parameters.""" + + type: TaskParameterType + desc: str | None = None + value: Any | None = None + + class Config: + extra = "forbid" + + +class NumericTaskParameter(TaskParameter): + """Parameter type for numeric values (int or float).""" + + unit: str + min: int | float | None = None + max: int | float | None = None + + @field_validator("unit") + def validate_unit(cls, unit: str) -> str: + if not unit.strip(): + raise ValueError("Task numeric parameter requires a unit to be specified.") + return unit.strip() + + @model_validator(mode="after") + def _validate_bounds(self) -> Self: + if self.min is not None and self.max is not None and self.min >= self.max: + raise ValueError("Task parameter 'min' is greater than or equal to 'max'.") + + if self.value is not None and not is_dynamic_parameter(self.value): + if not isinstance(self.value, int | float): + raise ValueError("Task parameter value is not numerical.") + + if self.min is not None and self.value < self.min: + raise ValueError("Task parameter value is less than 'min'.") + if self.max is not None and self.value > self.max: + raise ValueError("Task parameter value is greater than 'max'.") + + return self + + +class StringTaskParameter(TaskParameter): + """Parameter type for string values.""" + + +class BooleanTaskParameter(TaskParameter): + """Parameter type for boolean values.""" + + @field_validator("value") + def _validate_boolean(cls, value: Any) -> Any: + if not isinstance(value, bool) and not is_dynamic_parameter(value): + raise ValueError( + f"Task parameter value '{value}' is declared as 'boolean' but its value is not true/false." + ) + return value + + +class ChoiceTaskParameter(TaskParameter): + """Parameter type for list values.""" + + choices: list[str] = Field(..., min_length=1) + + @model_validator(mode="after") + def _validate_choice(self) -> Self: + if not self.value or self.value not in self.choices and not is_dynamic_parameter(self.value): + raise ValueError(f"Task parameter value '{self.value}' is not one of the choices {self.choices}.") + return self + + +class ListTaskParameter(TaskParameter): + """Parameter type for list values.""" + + element_type: TaskParameterType + length: int | None = None + min: list[int | float] | None = None + max: list[int | float] | None = None + + @field_validator("element_type") + def _validate_element_type(cls, element_type: str | TaskParameterType) -> TaskParameterType: + if isinstance(element_type, str): + try: + element_type = TaskParameterType(element_type) + except ValueError as e: + raise ValueError(f"Invalid list parameter element type '{element_type}'") from e + + if element_type == TaskParameterType.LIST: + raise ValueError("Nested lists are not supported. List parameter element type cannot be 'list'.") + + return element_type + + @model_validator(mode="after") + def _validate_list(self) -> Self: + if is_dynamic_parameter(self.value): + return self + + for attr_name in ("value", "min", "max"): + attr_value = getattr(self, attr_name) + if attr_value is None: + continue + + if not isinstance(attr_value, list): + raise ValueError(f"List parameter '{attr_name}' must be a list for 'list' type parameters.") + + # Check element types + if not all(isinstance(item, self.element_type.python_type) for item in attr_value): + raise ValueError( + f"All elements of list parameter '{attr_name}' must be of the same type as specified " + f"by 'element_type'." + ) + + # Check length if specified + if self.length is not None and len(attr_value) != self.length: + raise ValueError(f"List parameter '{attr_name}' length must be {self.length}.") + + # Validate elements within bounds + if self.value is not None and (self.min is not None or self.max is not None): + + if self.length is None: + raise ValueError("List parameter 'min' and 'max' can only be specified when 'length' is specified.") + + bounds_min = self.min or [float("-inf")] * self.length + bounds_max = self.max or [float("inf")] * self.length + + for i, val in enumerate(self.value): + if not bounds_min[i] <= val <= bounds_max[i]: + raise ValueError( + f"Element {i} of the list with value {val} is not within the bounds " + f"[{bounds_min[i]}, {bounds_max[i]}]." + ) + + return self + + +class DictionaryTaskParameter(TaskParameter): + pass + + +class TaskParameterFactory: + _PARAMETER_CLASSES: ClassVar = { + TaskParameterType.INT: NumericTaskParameter, + TaskParameterType.FLOAT: NumericTaskParameter, + TaskParameterType.STR: StringTaskParameter, + TaskParameterType.BOOL: BooleanTaskParameter, + TaskParameterType.CHOICE: ChoiceTaskParameter, + TaskParameterType.LIST: ListTaskParameter, + TaskParameterType.DICT: DictionaryTaskParameter, + } + + @classmethod + def create(cls, parameter_type: TaskParameterType | str, **kwargs) -> TaskParameter: + """Create a task parameter instance of the specified type.""" + if isinstance(parameter_type, str): + parameter_type = TaskParameterType(parameter_type) + + parameter_class = cls._PARAMETER_CLASSES.get(parameter_type) + if not parameter_class: + raise ValueError(f"Unsupported parameter type: {parameter_type}") + + kwargs.setdefault("type", parameter_type) + + return parameter_class(**kwargs) diff --git a/eos/configuration/entities/task_spec.py b/eos/configuration/entities/task_spec.py new file mode 100644 index 0000000..54e36b2 --- /dev/null +++ b/eos/configuration/entities/task_spec.py @@ -0,0 +1,77 @@ +from typing import Any, Annotated + +from pydantic import BaseModel, Field, field_validator, model_validator +from typing_extensions import Self + +from eos.configuration.entities.task_parameters import ( + TaskParameterFactory, + TaskParameterType, +) + + +class TaskSpecContainerConfig(BaseModel): + type: str + + @field_validator("type") + def _validate_type_not_empty(cls, v: str) -> str: + if not v.strip(): + raise ValueError("Container 'type' field must be specified.") + return v + + +class TaskSpecOutputParameterConfig(BaseModel): + type: TaskParameterType + desc: str | None = None + unit: str | None = None + + @field_validator("type") + def _validate_parameter_type(cls, v: str) -> TaskParameterType: + try: + return TaskParameterType(v) + except ValueError as e: + raise ValueError(f"Invalid task output parameter type '{v}'") from e + + @model_validator(mode="after") + def _validate_unit(self) -> Self: + numeric_types = {TaskParameterType.INT, TaskParameterType.FLOAT} + is_numeric = self.type in numeric_types + has_unit = self.unit is not None and self.unit.strip() != "" + + if is_numeric and not has_unit: + raise ValueError("Task output parameter type is numeric but no unit is specified.") + if not is_numeric and has_unit: + raise ValueError("Task output parameter type is not numeric but a unit is specified.") + return self + + +ValidName = Annotated[str, Field(pattern=r"^[a-zA-Z0-9_.]*$")] + + +class TaskSpecConfig(BaseModel): + type: str + desc: str | None = None + device_types: list[str] | None = None + + input_containers: dict[ValidName, TaskSpecContainerConfig] = Field(default_factory=dict) + input_parameters: dict[ValidName, Any] = Field(default_factory=dict) + + output_containers: dict[ValidName, TaskSpecContainerConfig] = Field(default_factory=dict) + output_parameters: dict[ValidName, TaskSpecOutputParameterConfig] = Field(default_factory=dict) + + @model_validator(mode="after") + def _set_default_output_containers(self) -> Self: + """Set output containers to input containers if not specified""" + if not self.output_containers: + self.output_containers = self.input_containers.copy() + return self + + @field_validator("input_parameters") + def _validate_parameters(cls, input_parameters: dict) -> dict: + """Validate that all input parameters can be created""" + for param_name, param_config in input_parameters.items(): + try: + param_type = TaskParameterType(param_config["type"]) + input_parameters[param_name] = TaskParameterFactory.create(param_type, **param_config) + except (ValueError, KeyError) as e: + raise ValueError(f"Invalid parameter configuration: {e!s}") from e + return input_parameters diff --git a/eos/configuration/entities/task_specification.py b/eos/configuration/entities/task_specification.py deleted file mode 100644 index 7f5e232..0000000 --- a/eos/configuration/entities/task_specification.py +++ /dev/null @@ -1,110 +0,0 @@ -import re -from dataclasses import dataclass, field -from typing import Any - -from eos.configuration.entities.parameters import ( - ParameterFactory, - ParameterType, -) -from eos.configuration.exceptions import EosConfigurationError - - -@dataclass -class TaskSpecificationContainer: - type: str - - def __post_init__(self): - self._validate_type() - - def _validate_type(self) -> None: - if not self.type.strip(): - raise EosConfigurationError("Container 'type' field must be specified.") - - -@dataclass -class TaskSpecificationOutputParameter: - type: ParameterType - description: str - unit: str | None = None - - def __post_init__(self): - self._validate_type() - self._validate_unit_specified_if_type_numeric() - self._validate_unit_not_specified_if_type_not_numeric() - - def _validate_type(self) -> None: - try: - self.type = ParameterType(self.type) - except ValueError as e: - raise EosConfigurationError(f"Invalid task output parameter type '{self.type}'") from e - - def _validate_unit_specified_if_type_numeric(self) -> None: - if self.type not in [ParameterType.integer, ParameterType.decimal]: - return - if self.unit is None or self.unit.strip() == "": - raise EosConfigurationError("Task output parameter type is numeric but no unit is specified.") - - def _validate_unit_not_specified_if_type_not_numeric(self) -> None: - if self.type in [ParameterType.integer, ParameterType.decimal]: - return - if self.unit is not None: - raise EosConfigurationError("Task output parameter type is not numeric but a unit is specified.") - - -@dataclass -class TaskSpecification: - type: str - description: str - device_types: list[str] | None = None - - input_containers: dict[str, TaskSpecificationContainer] = field(default_factory=dict) - input_parameters: dict[str, Any] = field(default_factory=dict) - - output_parameters: dict[str, TaskSpecificationOutputParameter] = field(default_factory=dict) - output_containers: dict[str, TaskSpecificationContainer] = field(default_factory=dict) - - def __post_init__(self): - if not self.output_containers: - self.output_containers = self.input_containers.copy() - - self._validate_parameters() - self._validate_parameter_names() - self._validate_container_names() - - def _validate_parameters(self) -> None: - for parameter in self.input_parameters.values(): - _ = ParameterFactory.create_parameter(ParameterType(parameter["type"]), **parameter) - - def _validate_parameter_names(self) -> None: - valid_name_pattern = re.compile(r"^[a-zA-Z0-9_.]*$") - - for name in self.input_parameters: - if not valid_name_pattern.match(name): - raise EosConfigurationError( - f"Invalid task parameter name '{name}'. " - f"Only characters, numbers, dots, and underscores are allowed." - ) - - for name in self.output_parameters: - if not valid_name_pattern.match(name): - raise EosConfigurationError( - f"Invalid task parameter name '{name}'. " - f"Only characters, numbers, dots, and underscores are allowed." - ) - - def _validate_container_names(self) -> None: - valid_name_pattern = re.compile(r"^[a-zA-Z0-9_.]*$") - - for name in self.input_containers: - if not valid_name_pattern.match(name): - raise EosConfigurationError( - f"Invalid task input container name '{name}'. " - f"Only characters, numbers, dots, and underscores are allowed." - ) - - for name in self.output_containers: - if not valid_name_pattern.match(name): - raise EosConfigurationError( - f"Invalid task output container name '{name}'. " - f"Only characters, numbers, dots, and underscores are allowed." - ) diff --git a/eos/configuration/experiment_graph/experiment_graph.py b/eos/configuration/experiment_graph/experiment_graph.py index 2801796..e8b0ea1 100644 --- a/eos/configuration/experiment_graph/experiment_graph.py +++ b/eos/configuration/experiment_graph/experiment_graph.py @@ -5,10 +5,10 @@ from eos.configuration.entities.experiment import ExperimentConfig from eos.configuration.entities.task import TaskConfig -from eos.configuration.entities.task_specification import TaskSpecification +from eos.configuration.entities.task_spec import TaskSpecConfig from eos.configuration.exceptions import EosTaskGraphError from eos.configuration.experiment_graph.experiment_graph_builder import ExperimentGraphBuilder -from eos.configuration.spec_registries.task_specification_registry import TaskSpecificationRegistry +from eos.configuration.spec_registries.task_spec_registry import TaskSpecRegistry @dataclass @@ -20,7 +20,7 @@ class TaskNodeIO: class ExperimentGraph: def __init__(self, experiment_config: ExperimentConfig): self._experiment_config = experiment_config - self._task_specs = TaskSpecificationRegistry() + self._task_specs = TaskSpecRegistry() self._graph = ExperimentGraphBuilder(experiment_config).build_graph() @@ -49,9 +49,9 @@ def get_task_node(self, task_id: str) -> dict[str, Any]: return self._graph.nodes[task_id] def get_task_config(self, task_id: str) -> TaskConfig: - return TaskConfig(**self.get_task_node(task_id)["task_config"]) + return self.get_task_node(task_id)["task_config"].model_copy(deep=True) - def get_task_spec(self, task_id: str) -> TaskSpecification: + def get_task_spec(self, task_id: str) -> TaskSpecConfig: return self._task_specs.get_spec_by_type(self.get_task_node(task_id)["task_config"].type) def get_task_dependencies(self, task_id: str) -> list[str]: diff --git a/eos/configuration/experiment_graph/experiment_graph_builder.py b/eos/configuration/experiment_graph/experiment_graph_builder.py index ab7ea2b..c92fe26 100644 --- a/eos/configuration/experiment_graph/experiment_graph_builder.py +++ b/eos/configuration/experiment_graph/experiment_graph_builder.py @@ -1,8 +1,8 @@ import networkx as nx from eos.configuration.entities.experiment import ExperimentConfig -from eos.configuration.spec_registries.task_specification_registry import ( - TaskSpecificationRegistry, +from eos.configuration.spec_registries.task_spec_registry import ( + TaskSpecRegistry, ) from eos.configuration.validation import validation_utils @@ -14,7 +14,7 @@ class ExperimentGraphBuilder: def __init__(self, experiment_config: ExperimentConfig): self._experiment = experiment_config - self._task_specs = TaskSpecificationRegistry() + self._task_specs = TaskSpecRegistry() def build_graph(self) -> nx.DiGraph: graph = nx.DiGraph() diff --git a/eos/configuration/packages/entities.py b/eos/configuration/packages/entities.py index 5241b56..fba3393 100644 --- a/eos/configuration/packages/entities.py +++ b/eos/configuration/packages/entities.py @@ -11,12 +11,12 @@ DEVICES_DIR, DEVICE_CONFIG_FILE_NAME, ) -from eos.configuration.entities.device_specification import DeviceSpecification +from eos.configuration.entities.device_spec import DeviceSpec from eos.configuration.entities.experiment import ExperimentConfig from eos.configuration.entities.lab import LabConfig -from eos.configuration.entities.task_specification import TaskSpecification +from eos.configuration.entities.task_spec import TaskSpecConfig -EntityConfigType = LabConfig | ExperimentConfig | TaskSpecification | DeviceSpecification +EntityConfigType = LabConfig | ExperimentConfig | TaskSpecConfig | DeviceSpec class EntityType(Enum): @@ -42,6 +42,6 @@ class EntityLocationInfo: ENTITY_INFO: dict[EntityType, EntityInfo] = { EntityType.LAB: EntityInfo(LABS_DIR, LAB_CONFIG_FILE_NAME, LabConfig), EntityType.EXPERIMENT: EntityInfo(EXPERIMENTS_DIR, EXPERIMENT_CONFIG_FILE_NAME, ExperimentConfig), - EntityType.TASK: EntityInfo(TASKS_DIR, TASK_CONFIG_FILE_NAME, TaskSpecification), - EntityType.DEVICE: EntityInfo(DEVICES_DIR, DEVICE_CONFIG_FILE_NAME, DeviceSpecification), + EntityType.TASK: EntityInfo(TASKS_DIR, TASK_CONFIG_FILE_NAME, TaskSpecConfig), + EntityType.DEVICE: EntityInfo(DEVICES_DIR, DEVICE_CONFIG_FILE_NAME, DeviceSpec), } diff --git a/eos/configuration/packages/entity_reader.py b/eos/configuration/packages/entity_reader.py index f6dc0e3..58b1d53 100644 --- a/eos/configuration/packages/entity_reader.py +++ b/eos/configuration/packages/entity_reader.py @@ -1,15 +1,17 @@ import os from pathlib import Path -from typing import Any +from typing import Any, TypeVar import jinja2 import yaml -from omegaconf import OmegaConf, ValidationError +from pydantic import BaseModel from eos.configuration.exceptions import EosConfigurationError from eos.configuration.packages.entities import EntityType, EntityConfigType, ENTITY_INFO from eos.logging.logger import log +T = TypeVar("T", bound=BaseModel) + class EntityReader: """ @@ -24,7 +26,7 @@ def read_entity(self, file_path: str, entity_type: EntityType) -> EntityConfigTy def read_all_entities( self, base_dir: str, entity_type: EntityType - ) -> tuple[dict[str, EntityConfigType], dict[str, str]]: + ) -> tuple[dict[str, EntityConfigType], dict[Path, str]]: entity_info = ENTITY_INFO[entity_type] configs = {} dirs_to_types = {} @@ -56,19 +58,13 @@ def read_all_entities( def _read_config(self, file_path: str, config_type: type[EntityConfigType], config_name: str) -> EntityConfigType: try: config_data = self._render_jinja_yaml(file_path) - - structured_config = OmegaConf.merge(OmegaConf.structured(config_type), OmegaConf.create(config_data)) - _ = OmegaConf.to_object(structured_config) - - return structured_config + return self._parse_yaml(yaml.dump(config_data), config_type) except OSError as e: raise EosConfigurationError(f"Error reading configuration file '{file_path}': {e!s}") from e - except ValidationError as e: - raise EosConfigurationError(f"Configuration is invalid: {e!s}") from e except jinja2.exceptions.TemplateError as e: - raise EosConfigurationError(f"Error in Jinja2 template processing for '{config_name}': {e!s}") from e + raise EosConfigurationError(f"Error in Jinja2 template for '{config_name.lower()}': {e!s}") from e except Exception as e: - raise EosConfigurationError(f"Error processing {config_name} configuration: {e!s}") from e + raise EosConfigurationError(f"Error processing {config_name.lower()} configuration: {e!s}") from e def _render_jinja_yaml(self, file_path: str) -> dict[str, Any]: """ @@ -95,3 +91,22 @@ def _render_jinja_yaml(self, file_path: str) -> dict[str, Any]: raise EosConfigurationError(f"Error parsing YAML in {file_path}: {e}") from e except jinja2.exceptions.TemplateError as e: raise EosConfigurationError(f"Error in Jinja2 template processing: {e}") from e + + @staticmethod + def _parse_yaml(yaml_string: str, model_class: type[T]) -> T: + """ + Parse a YAML string into a Pydantic model instance. + + Args: + yaml_string: YAML content as a string + model_class: The Pydantic model class to parse into + + Returns: + An instance of the provided Pydantic model class + + Raises: + ValidationError: If the YAML data doesn't match the model schema + yaml.YAMLError: If the YAML string is invalid + """ + yaml_data = yaml.safe_load(yaml_string) + return model_class.model_validate(yaml_data) diff --git a/eos/configuration/packages/package_manager.py b/eos/configuration/packages/package_manager.py index a1a523e..23a117f 100644 --- a/eos/configuration/packages/package_manager.py +++ b/eos/configuration/packages/package_manager.py @@ -1,9 +1,9 @@ from pathlib import Path -from eos.configuration.entities.device_specification import DeviceSpecification +from eos.configuration.entities.device_spec import DeviceSpec from eos.configuration.entities.experiment import ExperimentConfig from eos.configuration.entities.lab import LabConfig -from eos.configuration.entities.task_specification import TaskSpecification +from eos.configuration.entities.task_spec import TaskSpecConfig from eos.configuration.exceptions import EosMissingConfigurationError from eos.configuration.packages.entities import EntityType, EntityLocationInfo, ENTITY_INFO, EntityConfigType from eos.configuration.packages.entity_index import EntityIndex @@ -46,10 +46,10 @@ def read_lab_config(self, lab_name: str) -> LabConfig: def read_experiment_config(self, experiment_name: str) -> ExperimentConfig: return self._read_entity_config(experiment_name, EntityType.EXPERIMENT) - def read_task_configs(self) -> tuple[dict[str, TaskSpecification], dict[str, str]]: + def read_task_configs(self) -> tuple[dict[str, TaskSpecConfig], dict[str, str]]: return self._read_all_entity_configs(EntityType.TASK) - def read_device_configs(self) -> tuple[dict[str, DeviceSpecification], dict[str, str]]: + def read_device_configs(self) -> tuple[dict[str, DeviceSpec], dict[str, str]]: return self._read_all_entity_configs(EntityType.DEVICE) def _read_entity_config(self, entity_name: str, entity_type: EntityType) -> EntityConfigType: diff --git a/eos/configuration/plugin_registries/campaign_optimizer_plugin_registry.py b/eos/configuration/plugin_registries/campaign_optimizer_plugin_registry.py index 0ade16a..ff97987 100644 --- a/eos/configuration/plugin_registries/campaign_optimizer_plugin_registry.py +++ b/eos/configuration/plugin_registries/campaign_optimizer_plugin_registry.py @@ -14,10 +14,10 @@ from eos.logging.logger import log from eos.optimization.abstract_sequential_optimizer import AbstractSequentialOptimizer +CampaignOptimizerCreationFunction = Callable[[], tuple[dict[str, Any], type[AbstractSequentialOptimizer]]] -class CampaignOptimizerPluginRegistry( - PluginRegistry[Callable[[], tuple[dict[str, Any], type[AbstractSequentialOptimizer]]], Any] -): + +class CampaignOptimizerPluginRegistry(PluginRegistry[CampaignOptimizerCreationFunction, Any]): """ Responsible for dynamically loading campaign optimizers from all packages and providing references to them for later use. @@ -29,7 +29,6 @@ def __init__(self, package_manager: PackageManager): base_class=None, # Campaign optimizers don't have a base class config_file_name=None, # Campaign optimizers don't have a separate config file implementation_file_name=CAMPAIGN_OPTIMIZER_FILE_NAME, - class_suffix="", # Campaign optimizers don't use a class suffix not_found_exception_class=EosCampaignOptimizerImplementationClassNotFoundError, entity_type=EntityType.EXPERIMENT, ) @@ -50,24 +49,43 @@ def get_campaign_optimizer_creation_parameters( return optimizer_function() return None - def _load_single_plugin(self, package_name: str, dir_path: str, implementation_file: str) -> None: - module_name = Path(dir_path).name - spec = importlib.util.spec_from_file_location(module_name, implementation_file) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) + def _load_single_plugin(self, package_name: str, dir_path: str, implementation_path: str) -> None: + module = self._import_optimizer_module(dir_path, implementation_path) - if CAMPAIGN_OPTIMIZER_CREATION_FUNCTION_NAME in module.__dict__: - experiment_type = module_name - self._plugin_types[experiment_type] = module.__dict__[CAMPAIGN_OPTIMIZER_CREATION_FUNCTION_NAME] - self._plugin_modules[experiment_type] = implementation_file - log.info(f"Loaded campaign optimizer for experiment '{experiment_type}' from package '{package_name}'.") - else: + experiment_type = Path(dir_path).name + if not self._register_optimizer_if_valid(module, experiment_type, package_name, implementation_path): log.warning( f"Optimizer configuration function '{CAMPAIGN_OPTIMIZER_CREATION_FUNCTION_NAME}' not found in the " f"campaign optimizer file '{self._config.implementation_file_name}' of experiment " f"'{Path(dir_path).name}' in package '{package_name}'." ) + def _import_optimizer_module(self, dir_path: str, implementation_path: str) -> object | None: + """Import the optimizer module from the given path.""" + module_name = Path(dir_path).name + spec = importlib.util.spec_from_file_location(module_name, implementation_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + def _register_optimizer_if_valid( + self, + module: object, + experiment_type: str, + package_name: str, + implementation_path: str, + ) -> bool: + """Register the optimizer if its module contains the required creation function.""" + if CAMPAIGN_OPTIMIZER_CREATION_FUNCTION_NAME not in module.__dict__: + return False + + optimizer_creator = module.__dict__[CAMPAIGN_OPTIMIZER_CREATION_FUNCTION_NAME] + self._plugin_types[experiment_type] = optimizer_creator + self._plugin_modules[experiment_type] = implementation_path + + log.info(f"Loaded campaign optimizer for experiment '{experiment_type}' from package '{package_name}'.") + return True + def load_campaign_optimizer(self, experiment_type: str) -> None: """ Load the optimizer configuration function for the given experiment from the appropriate package. @@ -81,8 +99,7 @@ def load_campaign_optimizer(self, experiment_type: str) -> None: optimizer_file = ( self._package_manager.get_entity_dir(experiment_type, EntityType.EXPERIMENT) / CAMPAIGN_OPTIMIZER_FILE_NAME ) - - if not Path(optimizer_file).exists(): + if not optimizer_file.exists(): log.warning( f"No campaign optimizer found for experiment '{experiment_type}' in package " f"'{experiment_package.name}'." diff --git a/eos/configuration/plugin_registries/device_plugin_registry.py b/eos/configuration/plugin_registries/device_plugin_registry.py index cbb2adc..61e30ac 100644 --- a/eos/configuration/plugin_registries/device_plugin_registry.py +++ b/eos/configuration/plugin_registries/device_plugin_registry.py @@ -3,22 +3,18 @@ from eos.configuration.packages.entities import EntityType from eos.configuration.packages.package_manager import PackageManager from eos.configuration.plugin_registries.plugin_registry import PluginRegistry, PluginRegistryConfig -from eos.configuration.spec_registries.device_specification_registry import DeviceSpecificationRegistry +from eos.configuration.spec_registries.device_spec_registry import DeviceSpecRegistry from eos.devices.base_device import BaseDevice -class DevicePluginRegistry(PluginRegistry[BaseDevice, DeviceSpecificationRegistry]): +class DevicePluginRegistry(PluginRegistry[BaseDevice, DeviceSpecRegistry]): def __init__(self, package_manager: PackageManager): config = PluginRegistryConfig( - spec_registry=DeviceSpecificationRegistry(), + spec_registry=DeviceSpecRegistry(), base_class=BaseDevice, config_file_name=DEVICE_CONFIG_FILE_NAME, implementation_file_name=DEVICE_IMPLEMENTATION_FILE_NAME, - class_suffix="Device", not_found_exception_class=EosDeviceImplementationClassNotFoundError, entity_type=EntityType.DEVICE, ) super().__init__(package_manager, config) - - def get_device_class_type(self, device_type: str) -> type[BaseDevice]: - return self.get_plugin_class_type(device_type) diff --git a/eos/configuration/plugin_registries/plugin_registry.py b/eos/configuration/plugin_registries/plugin_registry.py index f1110e8..f163568 100644 --- a/eos/configuration/plugin_registries/plugin_registry.py +++ b/eos/configuration/plugin_registries/plugin_registry.py @@ -1,16 +1,16 @@ -import importlib import inspect import os from dataclasses import dataclass from pathlib import Path from typing import Generic, TypeVar +from importlib import util as importlib_util from eos.configuration.packages.entities import EntityType from eos.configuration.packages.package_manager import PackageManager from eos.logging.batch_error_logger import batch_error, raise_batched_errors from eos.logging.logger import log -T = TypeVar("T") +T = TypeVar("T") # Plugin class type S = TypeVar("S") # Specification registry type @@ -18,17 +18,14 @@ class PluginRegistryConfig: spec_registry: S base_class: type[T] - config_file_name: str + config_file_name: str | None implementation_file_name: str - class_suffix: str not_found_exception_class: type[Exception] entity_type: EntityType class PluginRegistry(Generic[T, S]): - """ - A generic registry for dynamically discovering and loading plugin-like implementation classes. - """ + """A generic registry for dynamically discovering and loading plugin-like implementation classes.""" def __init__(self, package_manager: PackageManager, config: PluginRegistryConfig): self._package_manager = package_manager @@ -36,16 +33,33 @@ def __init__(self, package_manager: PackageManager, config: PluginRegistryConfig self._plugin_types: dict[str, type[T]] = {} self._plugin_modules: dict[str, str] = {} # Maps type_name to module path - self._load_plugin_modules() + self._initialize_registry() def get_plugin_class_type(self, type_name: str) -> type[T]: - """Get the plugin class type for the given type name.""" if type_name not in self._plugin_types: raise self._config.not_found_exception_class(f"Plugin implementation for '{type_name}' not found.") return self._plugin_types[type_name] - def _load_plugin_modules(self) -> None: + def reload_plugin(self, type_name: str) -> None: + """Reload a specific plugin by type name.""" + if type_name not in self._plugin_modules: + raise self._config.not_found_exception_class(f"Plugin '{type_name}' not found.") + + module_path = Path(self._plugin_modules[type_name]) + package_name = module_path.parent.parent.name + dir_path = module_path.parent + + self._load_single_plugin(package_name, dir_path, module_path) + log.debug(f"Reloaded plugin '{type_name}'") + + def reload_all_plugins(self) -> None: + """Reload all plugins in the registry.""" + self._initialize_registry() + log.debug("Reloaded all plugins") + + def _initialize_registry(self) -> None: + """Load all plugins from all packages.""" self._plugin_types.clear() self._plugin_modules.clear() @@ -55,71 +69,94 @@ def _load_plugin_modules(self) -> None: raise_batched_errors(root_exception_type=self._config.not_found_exception_class) def _load_package_plugins(self, package) -> None: + """Load all plugins from a specific package.""" directory = package.get_entity_dir(self._config.entity_type) - if not Path(directory).is_dir(): + if not directory.is_dir(): return for current_dir, _, files in os.walk(directory): if self._config.config_file_name not in files: continue - dir_path = Path(current_dir).relative_to(Path(directory)) - implementation_file = Path(current_dir) / self._config.implementation_file_name + current_dir_path = Path(current_dir) + dir_path = current_dir_path.relative_to(directory) + implementation_path = current_dir_path / self._config.implementation_file_name - self._load_single_plugin(package.name, dir_path, implementation_file) + self._load_single_plugin(package.name, dir_path, implementation_path) - def _load_single_plugin(self, package_name: str, dir_path: Path, implementation_file: Path) -> None: - if not implementation_file.exists(): + def _load_single_plugin(self, package_name: str, dir_path: Path, implementation_path: Path) -> None: + """Load a single plugin from a file.""" + if not implementation_path.exists(): batch_error( - f"Implementation file '{implementation_file}' for package '{package_name}' not found.", + f"Implementation file '{implementation_path}' for package '{package_name}' not found.", self._config.not_found_exception_class, ) return - module_name = Path(dir_path).name + module = self._import_plugin_module(implementation_path, dir_path.name) + if not module: + return + + implementation_class = self._extract_implementation_class(module, package_name) + if not implementation_class: + return + + self._register_plugin(implementation_class, package_name, dir_path, implementation_path) + def _import_plugin_module(self, implementation_path: Path, module_name: str) -> object | None: + """Import a module from a file.""" try: - spec = importlib.util.spec_from_file_location(module_name, implementation_file) - module = importlib.util.module_from_spec(spec) + spec = importlib_util.spec_from_file_location(module_name, implementation_path) + module = importlib_util.module_from_spec(spec) spec.loader.exec_module(module) + return module except Exception as e: batch_error( - f"Failed to load module '{module_name}' from '{implementation_file}': {e!s}", + f"Failed to load module '{module_name}' from '{implementation_path}': {e!s}", self._config.not_found_exception_class, ) - return + return None + + def _extract_implementation_class(self, module: object, package_name: str) -> type[T] | None: + """Find and extract the implementation class in a module.""" + implementation_classes = self._find_implementation_classes(module) - found_implementation_class = False - for name, obj in module.__dict__.items(): - if inspect.isclass(obj) and obj is not self._config.base_class and name.endswith(self._config.class_suffix): - type_name = self._config.spec_registry.get_spec_by_dir(Path(package_name) / dir_path) - self._plugin_types[type_name] = obj - self._plugin_modules[type_name] = str(implementation_file) - found_implementation_class = True - log.debug( - f"Loaded {self._config.class_suffix.lower()} plugin '{name}' for type '{type_name}' from package " - f"'{package_name}'" - ) - break - - if not found_implementation_class: + if not implementation_classes: batch_error( - f"{self._config.class_suffix} implementation class for '{module_name}' in package '{package_name}'" - f" not found. Make sure that its name ends in '{self._config.class_suffix}'.", + f"No implementation class inheriting from '{self._config.base_class.__name__}' found in " + f"'{module}' in package '{package_name}'.", self._config.not_found_exception_class, ) + return None - def reload_plugin(self, type_name: str) -> None: - if type_name not in self._plugin_modules: - raise self._config.not_found_exception_class(f"Plugin '{type_name}' not found.") - - implementation_file = Path(self._plugin_modules[type_name]) - package_name = Path(implementation_file).parent.parent.name - dir_path = Path(os.path.relpath(Path(implementation_file).parent, Path(implementation_file).parent.parent)) + if len(implementation_classes) > 1: + batch_error( + f"Multiple implementation classes found in '{module}' in package '{package_name}': " + f"{', '.join(implementation_classes)}. Each module should contain exactly one implementation class.", + self._config.not_found_exception_class, + ) + return None - self._load_single_plugin(package_name, dir_path, implementation_file) - log.info(f"Reloaded plugin '{type_name}'") + return implementation_classes[0] - def reload_all_plugins(self) -> None: - self._load_plugin_modules() - log.info("Reloaded all plugins") + def _find_implementation_classes(self, module: object) -> list[type[T]]: + return [ + obj + for _, obj in module.__dict__.items() + if ( + inspect.isclass(obj) and obj is not self._config.base_class and issubclass(obj, self._config.base_class) + ) + ] + + def _register_plugin( + self, implementation_class: type[T], package_name: str, dir_path: Path, implementation_file: Path + ) -> None: + """Register a loaded plugin in the registry.""" + type_name = self._config.spec_registry.get_spec_by_dir(Path(package_name) / dir_path) + self._plugin_types[type_name] = implementation_class + self._plugin_modules[type_name] = str(implementation_file) + + log.debug( + f"Loaded plugin class '{implementation_class.__name__}' " + f"for type '{type_name}' from package '{package_name}'" + ) diff --git a/eos/configuration/plugin_registries/task_plugin_registry.py b/eos/configuration/plugin_registries/task_plugin_registry.py index 1c58269..4134d11 100644 --- a/eos/configuration/plugin_registries/task_plugin_registry.py +++ b/eos/configuration/plugin_registries/task_plugin_registry.py @@ -3,22 +3,18 @@ from eos.configuration.packages.entities import EntityType from eos.configuration.packages.package_manager import PackageManager from eos.configuration.plugin_registries.plugin_registry import PluginRegistry, PluginRegistryConfig -from eos.configuration.spec_registries.task_specification_registry import TaskSpecificationRegistry +from eos.configuration.spec_registries.task_spec_registry import TaskSpecRegistry from eos.tasks.base_task import BaseTask -class TaskPluginRegistry(PluginRegistry[BaseTask, TaskSpecificationRegistry]): +class TaskPluginRegistry(PluginRegistry[BaseTask, TaskSpecRegistry]): def __init__(self, package_manager: PackageManager): config = PluginRegistryConfig( - spec_registry=TaskSpecificationRegistry(), + spec_registry=TaskSpecRegistry(), base_class=BaseTask, config_file_name=TASK_CONFIG_FILE_NAME, implementation_file_name=TASK_IMPLEMENTATION_FILE_NAME, - class_suffix="Task", not_found_exception_class=EosTaskImplementationClassNotFoundError, entity_type=EntityType.TASK, ) super().__init__(package_manager, config) - - def get_task_class_type(self, task_type: str) -> type[BaseTask]: - return self.get_plugin_class_type(task_type) diff --git a/eos/configuration/spec_registries/device_spec_registry.py b/eos/configuration/spec_registries/device_spec_registry.py new file mode 100644 index 0000000..c3e6b5e --- /dev/null +++ b/eos/configuration/spec_registries/device_spec_registry.py @@ -0,0 +1,9 @@ +from eos.configuration.entities.device_spec import DeviceSpec +from eos.configuration.entities.lab import LabDeviceConfig +from eos.configuration.spec_registries.spec_registry import SpecRegistry + + +class DeviceSpecRegistry(SpecRegistry[DeviceSpec, LabDeviceConfig]): + """ + The device specification registry stores the specifications for all devices that are available in EOS. + """ diff --git a/eos/configuration/spec_registries/device_specification_registry.py b/eos/configuration/spec_registries/device_specification_registry.py deleted file mode 100644 index e69fdb6..0000000 --- a/eos/configuration/spec_registries/device_specification_registry.py +++ /dev/null @@ -1,9 +0,0 @@ -from eos.configuration.entities.device_specification import DeviceSpecification -from eos.configuration.entities.lab import LabDeviceConfig -from eos.configuration.spec_registries.specification_registry import SpecificationRegistry - - -class DeviceSpecificationRegistry(SpecificationRegistry[DeviceSpecification, LabDeviceConfig]): - """ - The device specification registry stores the specifications for all devices that are available in EOS. - """ diff --git a/eos/configuration/spec_registries/specification_registry.py b/eos/configuration/spec_registries/spec_registry.py similarity index 94% rename from eos/configuration/spec_registries/specification_registry.py rename to eos/configuration/spec_registries/spec_registry.py index d9ee45c..2fc481f 100644 --- a/eos/configuration/spec_registries/specification_registry.py +++ b/eos/configuration/spec_registries/spec_registry.py @@ -6,7 +6,7 @@ C = TypeVar("C") # Configuration type -class SpecificationRegistry(Generic[T, C], metaclass=Singleton): +class SpecRegistry(Generic[T, C], metaclass=Singleton): """ A generic registry for storing and retrieving specifications. """ diff --git a/eos/configuration/spec_registries/task_specification_registry.py b/eos/configuration/spec_registries/task_spec_registry.py similarity index 58% rename from eos/configuration/spec_registries/task_specification_registry.py rename to eos/configuration/spec_registries/task_spec_registry.py index 9b9c160..4082e44 100644 --- a/eos/configuration/spec_registries/task_specification_registry.py +++ b/eos/configuration/spec_registries/task_spec_registry.py @@ -1,23 +1,23 @@ from eos.configuration.entities.task import TaskConfig -from eos.configuration.entities.task_specification import TaskSpecification -from eos.configuration.spec_registries.specification_registry import SpecificationRegistry +from eos.configuration.entities.task_spec import TaskSpecConfig +from eos.configuration.spec_registries.spec_registry import SpecRegistry -class TaskSpecificationRegistry(SpecificationRegistry[TaskSpecification, TaskConfig]): +class TaskSpecRegistry(SpecRegistry[TaskSpecConfig, TaskConfig]): """ The task specification registry stores the specifications for all tasks that are available in EOS. """ def __init__( self, - task_specifications: dict[str, TaskSpecification], + task_specifications: dict[str, TaskSpecConfig], task_dirs_to_task_types: dict[str, str], ): updated_specs = self._update_output_containers(task_specifications) super().__init__(updated_specs, task_dirs_to_task_types) @staticmethod - def _update_output_containers(specs: dict[str, TaskSpecification]) -> dict[str, TaskSpecification]: + def _update_output_containers(specs: dict[str, TaskSpecConfig]) -> dict[str, TaskSpecConfig]: for spec in specs.values(): if not spec.output_containers: spec.output_containers = spec.input_containers.copy() diff --git a/eos/configuration/validation/container_registry.py b/eos/configuration/validation/experiment_container_registry.py similarity index 91% rename from eos/configuration/validation/container_registry.py rename to eos/configuration/validation/experiment_container_registry.py index f27d6c5..3ce8b4d 100644 --- a/eos/configuration/validation/container_registry.py +++ b/eos/configuration/validation/experiment_container_registry.py @@ -7,9 +7,9 @@ ) -class ContainerRegistry: +class ExperimentContainerRegistry: """ - The container registry stores information about the containers in labs. + The container registry stores information about the containers in the labs used by an experiment. """ def __init__(self, experiment_config: ExperimentConfig, lab_configs: list[LabConfig]): diff --git a/eos/configuration/validation/container_validator.py b/eos/configuration/validation/experiment_container_validator.py similarity index 85% rename from eos/configuration/validation/container_validator.py rename to eos/configuration/validation/experiment_container_validator.py index b026a79..e4fbe20 100644 --- a/eos/configuration/validation/container_validator.py +++ b/eos/configuration/validation/experiment_container_validator.py @@ -4,7 +4,7 @@ ) from eos.configuration.entities.lab import LabConfig from eos.configuration.exceptions import EosContainerConfigurationError -from eos.configuration.validation.container_registry import ContainerRegistry +from eos.configuration.validation.experiment_container_registry import ExperimentContainerRegistry class ExperimentContainerValidator: @@ -16,7 +16,7 @@ def __init__(self, experiment_config: ExperimentConfig, lab_configs: list[LabCon self._experiment_config = experiment_config self._lab_configs = lab_configs - self._container_registry = ContainerRegistry(experiment_config, lab_configs) + self._container_registry = ExperimentContainerRegistry(experiment_config, lab_configs) def validate(self) -> None: self._validate_containers() @@ -24,6 +24,7 @@ def validate(self) -> None: def _validate_containers(self) -> None: if not self._experiment_config.containers: return + for container in self._experiment_config.containers: self._validate_container_exists(container) diff --git a/eos/configuration/validation/experiment_validator.py b/eos/configuration/validation/experiment_validator.py index 15d99ce..f8bd735 100644 --- a/eos/configuration/validation/experiment_validator.py +++ b/eos/configuration/validation/experiment_validator.py @@ -1,7 +1,7 @@ from eos.configuration.entities.experiment import ExperimentConfig from eos.configuration.entities.lab import LabConfig from eos.configuration.exceptions import EosExperimentConfigurationError -from eos.configuration.validation.container_validator import ( +from eos.configuration.validation.experiment_container_validator import ( ExperimentContainerValidator, ) diff --git a/eos/configuration/validation/lab_validator.py b/eos/configuration/validation/lab_validator.py index 3f0db53..356b273 100644 --- a/eos/configuration/validation/lab_validator.py +++ b/eos/configuration/validation/lab_validator.py @@ -3,9 +3,9 @@ from eos.configuration.constants import LABS_DIR, EOS_COMPUTER_NAME from eos.configuration.entities.lab import LabConfig from eos.configuration.exceptions import EosLabConfigurationError -from eos.configuration.spec_registries.device_specification_registry import DeviceSpecificationRegistry -from eos.configuration.spec_registries.task_specification_registry import ( - TaskSpecificationRegistry, +from eos.configuration.spec_registries.device_spec_registry import DeviceSpecRegistry +from eos.configuration.spec_registries.task_spec_registry import ( + TaskSpecRegistry, ) from eos.logging.batch_error_logger import batch_error, raise_batched_errors @@ -19,8 +19,8 @@ class LabValidator: def __init__(self, config_dir: str, lab_config: LabConfig): self._lab_config = lab_config self._lab_config_dir = Path(config_dir) / LABS_DIR / lab_config.type.lower() - self._tasks = TaskSpecificationRegistry() - self._devices = DeviceSpecificationRegistry() + self._tasks = TaskSpecRegistry() + self._devices = DeviceSpecRegistry() def validate(self) -> None: self._validate_lab_folder_name_matches_lab_type() @@ -34,10 +34,9 @@ def _validate_locations(self) -> None: self._validate_container_locations() def _validate_lab_folder_name_matches_lab_type(self) -> None: - if Path(self._lab_config_dir).name != self._lab_config.type: + if self._lab_config_dir.name != self._lab_config.type: raise EosLabConfigurationError( - f"Lab folder name '{Path(self._lab_config_dir).name}' does not match lab type " - f"'{self._lab_config.type}'." + f"Lab folder name '{self._lab_config_dir.name}' does not match lab type '{self._lab_config.type}'." ) def _validate_device_locations(self) -> None: @@ -90,8 +89,9 @@ def _validate_eos_computer_not_specified(self) -> None: raise_batched_errors(EosLabConfigurationError) def _validate_devices(self) -> None: + self._validate_device_types() self._validate_devices_have_computers() - self._validate_device_initialization_parameters() + self._validate_device_init_parameters() def _validate_devices_have_computers(self) -> None: for device_name, device in self._lab_config.devices.items(): @@ -104,19 +104,22 @@ def _validate_devices_have_computers(self) -> None: ) raise_batched_errors(EosLabConfigurationError) - def _validate_device_initialization_parameters(self) -> None: + def _validate_device_types(self) -> None: for device_name, device in self._lab_config.devices.items(): - device_spec = self._devices.get_spec_by_config(device) - if not device_spec: + if not self._devices.get_spec_by_config(device): batch_error( - f"No specification found for device type '{device.type}' of device '{device_name}'.", + f"Device type '{device.type}' of device '{device_name}' does not exist.", EosLabConfigurationError, ) - continue + raise_batched_errors(EosLabConfigurationError) + + def _validate_device_init_parameters(self) -> None: + for device_name, device in self._lab_config.devices.items(): + device_spec = self._devices.get_spec_by_config(device) - if device.initialization_parameters: - spec_params = device_spec.initialization_parameters or {} - for param_name in device.initialization_parameters: + if device.init_parameters: + spec_params = device_spec.init_parameters or {} + for param_name in device.init_parameters: if param_name not in spec_params: batch_error( f"Invalid initialization parameter '{param_name}' for device '{device_name}' " diff --git a/eos/configuration/validation/task_sequence/base_task_sequence_validator.py b/eos/configuration/validation/task_sequence/base_task_sequence_validator.py index 27b2551..8650229 100644 --- a/eos/configuration/validation/task_sequence/base_task_sequence_validator.py +++ b/eos/configuration/validation/task_sequence/base_task_sequence_validator.py @@ -5,8 +5,8 @@ ) from eos.configuration.entities.lab import LabConfig from eos.configuration.entities.task import TaskConfig -from eos.configuration.spec_registries.task_specification_registry import ( - TaskSpecificationRegistry, +from eos.configuration.spec_registries.task_spec_registry import ( + TaskSpecRegistry, ) @@ -18,7 +18,7 @@ def __init__( ): self._experiment_config = experiment_config self._lab_configs = lab_configs - self._tasks = TaskSpecificationRegistry() + self._tasks = TaskSpecRegistry() @abstractmethod def validate(self) -> None: diff --git a/eos/configuration/validation/task_sequence/task_input_container_validator.py b/eos/configuration/validation/task_sequence/task_input_container_validator.py index 1832e49..afbe168 100644 --- a/eos/configuration/validation/task_sequence/task_input_container_validator.py +++ b/eos/configuration/validation/task_sequence/task_input_container_validator.py @@ -1,9 +1,9 @@ from eos.configuration.entities.lab import LabContainerConfig from eos.configuration.entities.task import TaskConfig -from eos.configuration.entities.task_specification import TaskSpecification, TaskSpecificationContainer +from eos.configuration.entities.task_spec import TaskSpecConfig, TaskSpecContainerConfig from eos.configuration.exceptions import EosTaskValidationError from eos.configuration.validation import validation_utils -from eos.configuration.validation.container_registry import ContainerRegistry +from eos.configuration.validation.experiment_container_registry import ExperimentContainerRegistry from eos.logging.batch_error_logger import batch_error, raise_batched_errors @@ -15,8 +15,8 @@ class TaskInputContainerValidator: def __init__( self, task: TaskConfig, - task_spec: TaskSpecification, - container_registry: ContainerRegistry, + task_spec: TaskSpecConfig, + container_registry: ExperimentContainerRegistry, ): self._task_id = task.id self._input_containers = task.containers @@ -41,7 +41,7 @@ def _validate_input_container_requirements(self) -> None: self._validate_container_counts(required_containers, provided_containers) self._validate_container_types(required_containers, provided_containers) - def _get_required_containers(self) -> dict[str, TaskSpecificationContainer]: + def _get_required_containers(self) -> dict[str, TaskSpecContainerConfig]: """ Get the required containers as specified in the task specification. """ @@ -75,7 +75,7 @@ def _validate_container_exists(self, container_id: str) -> LabContainerConfig: return container def _validate_container_counts( - self, required: dict[str, TaskSpecificationContainer], provided: dict[str, str] + self, required: dict[str, TaskSpecContainerConfig], provided: dict[str, str] ) -> None: """ Validate that the total number of containers matches the requirements. @@ -86,9 +86,7 @@ def _validate_container_counts( EosTaskValidationError, ) - def _validate_container_types( - self, required: dict[str, TaskSpecificationContainer], provided: dict[str, str] - ) -> None: + def _validate_container_types(self, required: dict[str, TaskSpecContainerConfig], provided: dict[str, str]) -> None: """ Validate that the types of non-reference containers match the requirements. """ diff --git a/eos/configuration/validation/task_sequence/task_input_parameter_validator.py b/eos/configuration/validation/task_sequence/task_input_parameter_validator.py index 3ea6bf2..57d2f9c 100644 --- a/eos/configuration/validation/task_sequence/task_input_parameter_validator.py +++ b/eos/configuration/validation/task_sequence/task_input_parameter_validator.py @@ -1,11 +1,9 @@ import copy from typing import Any -from omegaconf import DictConfig, ListConfig, OmegaConf - -from eos.configuration.entities.parameters import ParameterType, ParameterFactory +from eos.configuration.entities.task_parameters import TaskParameterType, TaskParameterFactory from eos.configuration.entities.task import TaskConfig -from eos.configuration.entities.task_specification import TaskSpecification +from eos.configuration.entities.task_spec import TaskSpecConfig from eos.configuration.exceptions import ( EosTaskValidationError, EosConfigurationError, @@ -19,7 +17,7 @@ class TaskInputParameterValidator: Validates that the input parameters of a task conform to the task's specification. """ - def __init__(self, task: TaskConfig, task_spec: TaskSpecification): + def __init__(self, task: TaskConfig, task_spec: TaskSpecConfig): self._task_id = task.id self._input_parameters = task.parameters self._task_spec = task_spec @@ -90,10 +88,7 @@ def _validate_parameter_spec(self, parameter_name: str, parameter: Any) -> None: """ parameter_spec = copy.deepcopy(self._task_spec.input_parameters[parameter_name]) - if isinstance(parameter, ListConfig | DictConfig): - parameter = OmegaConf.to_object(parameter) - - if not isinstance(parameter, ParameterType(parameter_spec.type).python_type()): + if not isinstance(parameter, TaskParameterType(parameter_spec.type).python_type): batch_error( f"Parameter '{parameter_name}' in task '{self._task_id}' has incorrect type {type(parameter)}. " f"Expected type: '{parameter_spec.type}'.", @@ -101,11 +96,11 @@ def _validate_parameter_spec(self, parameter_name: str, parameter: Any) -> None: ) return - parameter_spec["value"] = parameter + parameter_spec.value = parameter try: - parameter_type = ParameterType(parameter_spec.type) - ParameterFactory.create_parameter(parameter_type, **parameter_spec) + parameter_type = TaskParameterType(parameter_spec.type) + TaskParameterFactory.create(parameter_type, **parameter_spec.model_dump()) except EosConfigurationError as e: batch_error( f"Parameter '{parameter_name}' in task '{self._task_id}' validation error: {e}", @@ -136,4 +131,4 @@ def _get_required_input_parameters(self) -> list[str]: """ Get all the required input parameters for the task. """ - return [param for param, spec in self._task_spec.input_parameters.items() if "value" not in spec] + return [param for param, spec in self._task_spec.input_parameters.items() if spec.value is None] diff --git a/eos/configuration/validation/task_sequence/task_sequence_input_container_validator.py b/eos/configuration/validation/task_sequence/task_sequence_input_container_validator.py index 84bf328..d19f0fb 100644 --- a/eos/configuration/validation/task_sequence/task_sequence_input_container_validator.py +++ b/eos/configuration/validation/task_sequence/task_sequence_input_container_validator.py @@ -3,7 +3,7 @@ from eos.configuration.entities.task import TaskConfig from eos.configuration.exceptions import EosTaskValidationError from eos.configuration.validation import validation_utils -from eos.configuration.validation.container_registry import ContainerRegistry +from eos.configuration.validation.experiment_container_registry import ExperimentContainerRegistry from eos.configuration.validation.task_sequence.base_task_sequence_validator import BaseTaskSequenceValidator from eos.configuration.validation.task_sequence.task_input_container_validator import TaskInputContainerValidator @@ -19,7 +19,7 @@ def __init__( lab_configs: list[LabConfig], ): super().__init__(experiment_config, lab_configs) - self._container_registry = ContainerRegistry(experiment_config, lab_configs) + self._container_registry = ExperimentContainerRegistry(experiment_config, lab_configs) def validate(self) -> None: for task in self._experiment_config.tasks: diff --git a/eos/configuration/validation/task_sequence/task_sequence_input_parameter_validator.py b/eos/configuration/validation/task_sequence/task_sequence_input_parameter_validator.py index 19fa902..2a67ffb 100644 --- a/eos/configuration/validation/task_sequence/task_sequence_input_parameter_validator.py +++ b/eos/configuration/validation/task_sequence/task_sequence_input_parameter_validator.py @@ -1,5 +1,5 @@ -from eos.configuration.entities.parameters import ( - ParameterType, +from eos.configuration.entities.task_parameters import ( + TaskParameterType, ) from eos.configuration.entities.task import TaskConfig from eos.configuration.exceptions import ( @@ -85,11 +85,11 @@ def _validate_parameter_reference( parameter_spec = task_spec.input_parameters[parameter_name] if ( - ParameterType(parameter_spec.type).python_type() - != ParameterType(referenced_parameter_spec.type).python_type() + TaskParameterType(parameter_spec.type).python_type + != TaskParameterType(referenced_parameter_spec.type).python_type ): raise EosTaskValidationError( f"Type mismatch for referenced parameter '{referenced_parameter}' in task '{task.id}'. " - f"The required parameter type is '{parameter_spec.type}' which does not match referenced the parameter " + f"The required parameter type is '{parameter_spec.type}' which does not match the referenced parameter " f"type '{referenced_parameter_spec.type.value}'." ) diff --git a/eos/configuration/validation/validation_utils.py b/eos/configuration/validation/validation_utils.py index 58d4c12..8578d1b 100644 --- a/eos/configuration/validation/validation_utils.py +++ b/eos/configuration/validation/validation_utils.py @@ -1,9 +1,7 @@ -from eos.configuration.entities.parameters import ( - AllowedParameterTypes, -) +from typing import Any -def is_parameter_reference(parameter: AllowedParameterTypes) -> bool: +def is_parameter_reference(parameter: Any) -> bool: return ( isinstance(parameter, str) and parameter.count(".") == 1 @@ -11,17 +9,10 @@ def is_parameter_reference(parameter: AllowedParameterTypes) -> bool: ) -def is_dynamic_parameter(parameter: AllowedParameterTypes) -> bool: +def is_dynamic_parameter(parameter: Any) -> bool: return isinstance(parameter, str) and parameter.lower() == "eos_dynamic" -def is_dynamic_container(container_id: str) -> bool: - """ - Check if the container ID is a dynamic container ID (eos_dynamic). - """ - return isinstance(container_id, str) and container_id.lower() == "eos_dynamic" - - def is_container_reference(container_id: str) -> bool: """ Check if the container ID is a reference. diff --git a/eos/containers/entities/container.py b/eos/containers/entities/container.py index 8e30d0d..62aeeb0 100644 --- a/eos/containers/entities/container.py +++ b/eos/containers/entities/container.py @@ -1,16 +1,16 @@ from typing import Any -from pydantic import BaseModel +from pydantic import BaseModel, Field class Container(BaseModel): id: str - type: str - lab: str + type: str | None = None + lab: str | None = None - location: str + location: str | None = None - metadata: dict[str, Any] = {} + metadata: dict[str, Any] = Field(default_factory=dict) class Config: arbitrary_types_allowed = True diff --git a/eos/devices/base_device.py b/eos/devices/base_device.py index 7974810..261a3bb 100644 --- a/eos/devices/base_device.py +++ b/eos/devices/base_device.py @@ -48,13 +48,13 @@ def __init__( self._lab_id = lab_id self._device_type = device_type self._status = DeviceStatus.DISABLED - self._initialization_parameters = {} + self._init_parameters = {} self._lock = asyncio.Lock() register_async_exit_callback(self.cleanup) - async def initialize(self, initialization_parameters: dict[str, Any]) -> None: + async def initialize(self, init_parameters: dict[str, Any]) -> None: """ Initialize the device. After calling this method, the device is ready to be used for tasks and the status is IDLE. @@ -64,9 +64,9 @@ async def initialize(self, initialization_parameters: dict[str, Any]) -> None: raise EosDeviceInitializationError(f"Device {self._device_id} is already initialized.") try: - await self._initialize(initialization_parameters) + await self._initialize(init_parameters) self._status = DeviceStatus.IDLE - self._initialization_parameters = initialization_parameters + self._init_parameters = init_parameters except Exception as e: self._status = DeviceStatus.ERROR raise EosDeviceInitializationError( @@ -105,7 +105,7 @@ async def enable(self) -> None: Enable the device. The status should be IDLE after calling this method. """ if self._status == DeviceStatus.DISABLED: - await self.initialize(self._initialization_parameters) + await self.initialize(self._init_parameters) async def disable(self) -> None: """ @@ -129,8 +129,8 @@ def get_lab_id(self) -> str: def get_device_type(self) -> str: return self._device_type - def get_initialization_parameters(self) -> dict[str, Any]: - return self._initialization_parameters + def get_init_parameters(self) -> dict[str, Any]: + return self._init_parameters @property def id(self) -> str: @@ -149,8 +149,8 @@ def status(self) -> DeviceStatus: return self._status @property - def initialization_parameters(self) -> dict[str, Any]: - return self._initialization_parameters + def init_parameters(self) -> dict[str, Any]: + return self._init_parameters @abstractmethod async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: diff --git a/eos/devices/device_manager.py b/eos/devices/device_manager.py index 4aa1773..367b737 100644 --- a/eos/devices/device_manager.py +++ b/eos/devices/device_manager.py @@ -3,7 +3,6 @@ from typing import Any import ray -from omegaconf import OmegaConf from ray.actor import ActorHandle from eos.configuration.configuration_manager import ConfigurationManager @@ -115,7 +114,6 @@ async def cleanup_device(actor_id: str) -> None: log.debug(f"Cleaned up devices for lab(s): {', '.join(lab_ids)}") else: await self._devices.delete_all() - log.info("All devices have been cleaned up.") async def _create_devices_for_lab(self, lab_id: str) -> None: lab_config = self._configuration_manager.labs[lab_id] @@ -171,14 +169,10 @@ async def _create_device_actor(self, device: Device) -> None: self._device_actor_computer_ips[device_actor_id] = computer_ip spec_initialization_parameters = ( - self._configuration_manager.device_specs.get_spec_by_type(device.type).initialization_parameters or {} + self._configuration_manager.device_specs.get_spec_by_type(device.type).init_parameters or {} ) - if spec_initialization_parameters: - spec_initialization_parameters = OmegaConf.to_object(spec_initialization_parameters) - device_config_initialization_parameters = device_config.initialization_parameters or {} - if device_config_initialization_parameters: - device_config_initialization_parameters = OmegaConf.to_object(device_config_initialization_parameters) + device_config_initialization_parameters = device_config.init_parameters or {} initialization_parameters: dict[str, Any] = { **spec_initialization_parameters, @@ -189,7 +183,7 @@ async def _create_device_actor(self, device: Device) -> None: {"eos-core": 0.0001} if computer_ip in ["localhost", "127.0.0.1"] else {f"node:{computer_ip}": 0.0001} ) - device_class = ray.remote(self._device_plugin_registry.get_device_class_type(device.type)) + device_class = ray.remote(self._device_plugin_registry.get_plugin_class_type(device.type)) self._device_actor_handles[device_actor_id] = device_class.options( name=device_actor_id, num_cpus=0, diff --git a/eos/devices/entities/device.py b/eos/devices/entities/device.py index 409da9c..d655793 100644 --- a/eos/devices/entities/device.py +++ b/eos/devices/entities/device.py @@ -17,7 +17,7 @@ class Device(BaseModel): computer: str location: str | None = None status: DeviceStatus = DeviceStatus.ACTIVE - metadata: dict[str, Any] = {} + metadata: dict[str, Any] = Field(default_factory=dict) actor_handle: ActorHandle | None = Field(exclude=True, default=None) diff --git a/eos/devices/repositories/device_repository.py b/eos/devices/repositories/device_repository.py index 38213e4..3003dc9 100644 --- a/eos/devices/repositories/device_repository.py +++ b/eos/devices/repositories/device_repository.py @@ -22,6 +22,7 @@ async def delete_devices_by_lab_ids( Delete all devices associated with the given lab IDs in a single operation. :param lab_ids: List of lab_ids for which to delete devices. + :param session: The database client session. :return: The result of the delete operation. """ return await self._collection.delete_many({"lab_id": {"$in": lab_ids}}, session=session) @@ -33,6 +34,7 @@ async def get_devices_by_lab_ids( Get all devices associated with the given lab IDs in a single operation. :param lab_ids: List of lab_ids for which to fetch devices. + :param session: The database client session. :return: A dictionary with lab_ids as keys and lists of devices as values. """ cursor = self._collection.find({"lab_id": {"$in": lab_ids}}, session=session) @@ -52,6 +54,7 @@ async def bulk_upsert( Perform a bulk upsert operation for multiple devices. :param devices: List of device dictionaries to upsert. + :param session: The database client session. :return: The result of the bulk write operation. """ operations = [ diff --git a/eos/experiments/entities/experiment.py b/eos/experiments/entities/experiment.py index a73f800..685e12b 100644 --- a/eos/experiments/entities/experiment.py +++ b/eos/experiments/entities/experiment.py @@ -2,7 +2,7 @@ from enum import Enum from typing import Any -from pydantic import BaseModel, field_serializer +from pydantic import BaseModel, Field, field_serializer class ExperimentStatus(Enum): @@ -14,31 +14,31 @@ class ExperimentStatus(Enum): FAILED = "FAILED" -class ExperimentExecutionParameters(BaseModel): - resume: bool = False - +class ExperimentDefinition(BaseModel): + """The definition of an experiment. Used for submission.""" -class Experiment(BaseModel): id: str type: str - execution_parameters: ExperimentExecutionParameters + owner: str | None = None - status: ExperimentStatus = ExperimentStatus.CREATED + dynamic_parameters: dict[str, dict[str, Any]] = Field(default_factory=dict) + metadata: dict[str, Any] | None = None + + resume: bool = False - labs: list[str] = [] - running_tasks: list[str] = [] - completed_tasks: list[str] = [] +class Experiment(ExperimentDefinition): + """The state of an experiment in the system.""" - dynamic_parameters: dict[str, dict[str, Any]] = {} + status: ExperimentStatus = ExperimentStatus.CREATED - metadata: dict[str, Any] = {} + running_tasks: list[str] = Field(default_factory=list) + completed_tasks: list[str] = Field(default_factory=list) start_time: datetime | None = None end_time: datetime | None = None - - created_at: datetime = datetime.now(tz=timezone.utc) + created_at: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc)) class Config: arbitrary_types_allowed = True @@ -46,3 +46,8 @@ class Config: @field_serializer("status") def status_enum_to_string(self, v: ExperimentStatus) -> str: return v.value + + @classmethod + def from_definition(cls, definition: ExperimentDefinition) -> "Experiment": + """Create an Experiment instance from an ExperimentDefinition.""" + return cls(**definition.model_dump()) diff --git a/eos/experiments/experiment_executor.py b/eos/experiments/experiment_executor.py index 942815c..7ed4d8e 100644 --- a/eos/experiments/experiment_executor.py +++ b/eos/experiments/experiment_executor.py @@ -4,7 +4,7 @@ from eos.configuration.experiment_graph.experiment_graph import ExperimentGraph from eos.configuration.validation import validation_utils from eos.containers.container_manager import ContainerManager -from eos.experiments.entities.experiment import ExperimentStatus, ExperimentExecutionParameters, Experiment +from eos.experiments.entities.experiment import ExperimentStatus, Experiment, ExperimentDefinition from eos.experiments.exceptions import ( EosExperimentExecutionError, EosExperimentTaskExecutionError, @@ -14,8 +14,7 @@ from eos.logging.logger import log from eos.scheduling.abstract_scheduler import AbstractScheduler from eos.scheduling.entities.scheduled_task import ScheduledTask -from eos.tasks.entities.task import TaskOutput -from eos.tasks.entities.task_execution_parameters import TaskExecutionParameters +from eos.tasks.entities.task import TaskOutput, TaskDefinition from eos.tasks.exceptions import EosTaskExecutionError, EosTaskCancellationError from eos.tasks.task_executor import TaskExecutor from eos.tasks.task_input_resolver import TaskInputResolver @@ -27,9 +26,7 @@ class ExperimentExecutor: def __init__( self, - experiment_id: str, - experiment_type: str, - execution_parameters: ExperimentExecutionParameters, + experiment_definition: ExperimentDefinition, experiment_graph: ExperimentGraph, experiment_manager: ExperimentManager, task_manager: TaskManager, @@ -37,9 +34,9 @@ def __init__( task_executor: TaskExecutor, scheduler: AbstractScheduler, ): - self._experiment_id = experiment_id - self._experiment_type = experiment_type - self._execution_parameters = execution_parameters + self._experiment_definition = experiment_definition + self._experiment_id = experiment_definition.id + self._experiment_type = experiment_definition.type self._experiment_graph = experiment_graph self._experiment_manager = experiment_manager @@ -49,15 +46,11 @@ def __init__( self._scheduler = scheduler self._task_input_resolver = TaskInputResolver(task_manager, experiment_manager) - self._current_task_execution_parameters: dict[str, TaskExecutionParameters] = {} + self._current_task_definitions: dict[str, TaskDefinition] = {} self._task_output_futures: dict[str, asyncio.Task] = {} self._experiment_status = None - async def start_experiment( - self, - dynamic_parameters: dict[str, dict[str, Any]] | None = None, - metadata: dict[str, Any] | None = None, - ) -> None: + async def start_experiment(self) -> None: """ Start the experiment and register the executor with the scheduler. """ @@ -65,7 +58,7 @@ async def start_experiment( if experiment: await self._handle_existing_experiment(experiment) else: - await self._create_new_experiment(dynamic_parameters, metadata) + await self._create_new_experiment() self._scheduler.register_experiment( experiment_id=self._experiment_id, @@ -76,7 +69,9 @@ async def start_experiment( await self._experiment_manager.start_experiment(self._experiment_id) self._experiment_status = ExperimentStatus.RUNNING - log.info(f"{'Resumed' if self._execution_parameters.resume else 'Started'} experiment '{self._experiment_id}'.") + log.info( + f"{'Resumed' if self._experiment_definition.resume else 'Started'} experiment '{self._experiment_id}'." + ) async def _handle_existing_experiment(self, experiment: Experiment) -> None: """ @@ -84,7 +79,7 @@ async def _handle_existing_experiment(self, experiment: Experiment) -> None: """ self._experiment_status = experiment.status - if not self._execution_parameters.resume: + if not self._experiment_definition.resume: def _raise_error(status: str) -> None: raise EosExperimentExecutionError( @@ -152,29 +147,21 @@ async def _resume_experiment(self) -> None: await self._experiment_manager.delete_non_completed_tasks(self._experiment_id) log.info(f"Experiment '{self._experiment_id}' resumed.") - async def _create_new_experiment( - self, dynamic_parameters: dict[str, dict[str, Any]], metadata: dict[str, Any] - ) -> None: + async def _create_new_experiment(self) -> None: """ - Create a new experiment with the given parameters. + Create a new experiment. """ - dynamic_parameters = dynamic_parameters or {} + dynamic_parameters = self._experiment_definition.dynamic_parameters or {} self._validate_dynamic_parameters(dynamic_parameters) - await self._experiment_manager.create_experiment( - experiment_id=self._experiment_id, - experiment_type=self._experiment_type, - execution_parameters=self._execution_parameters, - dynamic_parameters=dynamic_parameters, - metadata=metadata, - ) + await self._experiment_manager.create_experiment(self._experiment_definition) async def _cancel_running_tasks(self) -> None: """ Cancel all running tasks in the experiment. """ cancellation_futures = [ - self._task_executor.request_task_cancellation(params.experiment_id, params.task_config.id) - for params in self._current_task_execution_parameters.values() + self._task_executor.request_task_cancellation(task_definition.experiment_id, task_definition.id) + for task_definition in self._current_task_definitions.values() ] try: await asyncio.wait_for(asyncio.gather(*cancellation_futures), timeout=30) @@ -196,6 +183,7 @@ async def _complete_experiment(self) -> None: self._experiment_manager.complete_experiment(self._experiment_id), ) self._experiment_status = ExperimentStatus.COMPLETED + log.info(f"Completed experiment '{self._experiment_id}'.") async def _fail_experiment(self) -> None: """ @@ -233,7 +221,7 @@ async def _process_task_output(self, task_id: str) -> None: ) from e finally: del self._task_output_futures[task_id] - del self._current_task_execution_parameters[task_id] + del self._current_task_definitions[task_id] async def _update_containers(self, output_containers: dict[str, Any]) -> None: """ @@ -253,8 +241,6 @@ async def _add_task_output( Add task output to the task manager. """ task_output = TaskOutput( - experiment_id=self._experiment_id, - task_id=task_id, parameters=output_parameters, containers=output_containers, file_names=list(output_files.keys()), @@ -269,7 +255,7 @@ async def _execute_tasks(self) -> None: """ new_scheduled_tasks = await self._scheduler.request_tasks(self._experiment_id) for scheduled_task in new_scheduled_tasks: - if scheduled_task.id not in self._current_task_execution_parameters: + if scheduled_task.id not in self._current_task_definitions: await self._execute_task(scheduled_task) async def _execute_task(self, scheduled_task: ScheduledTask) -> None: @@ -278,16 +264,12 @@ async def _execute_task(self, scheduled_task: ScheduledTask) -> None: """ task_config = self._experiment_graph.get_task_config(scheduled_task.id) task_config = await self._task_input_resolver.resolve_task_inputs(self._experiment_id, task_config) - task_execution_parameters = TaskExecutionParameters( - task_id=scheduled_task.id, - experiment_id=self._experiment_id, - devices=scheduled_task.devices, - task_config=task_config, - ) + task_definition = TaskDefinition.from_config(task_config, self._experiment_id) + self._task_output_futures[scheduled_task.id] = asyncio.create_task( - self._task_executor.request_task_execution(task_execution_parameters, scheduled_task) + self._task_executor.request_task_execution(task_definition, scheduled_task) ) - self._current_task_execution_parameters[scheduled_task.id] = task_execution_parameters + self._current_task_definitions[scheduled_task.id] = task_definition def _validate_dynamic_parameters(self, dynamic_parameters: dict[str, dict[str, Any]]) -> None: """ diff --git a/eos/experiments/experiment_executor_factory.py b/eos/experiments/experiment_executor_factory.py index c2f3f37..fba84bd 100644 --- a/eos/experiments/experiment_executor_factory.py +++ b/eos/experiments/experiment_executor_factory.py @@ -1,7 +1,7 @@ from eos.configuration.configuration_manager import ConfigurationManager from eos.configuration.experiment_graph.experiment_graph import ExperimentGraph from eos.containers.container_manager import ContainerManager -from eos.experiments.entities.experiment import ExperimentExecutionParameters +from eos.experiments.entities.experiment import ExperimentDefinition from eos.experiments.experiment_executor import ExperimentExecutor from eos.experiments.experiment_manager import ExperimentManager from eos.scheduling.abstract_scheduler import AbstractScheduler @@ -30,16 +30,12 @@ def __init__( self._task_executor = task_executor self._scheduler = scheduler - def create( - self, experiment_id: str, experiment_type: str, execution_parameters: ExperimentExecutionParameters - ) -> ExperimentExecutor: - experiment_config = self._configuration_manager.experiments.get(experiment_type) + def create(self, experiment_definition: ExperimentDefinition) -> ExperimentExecutor: + experiment_config = self._configuration_manager.experiments.get(experiment_definition.type) experiment_graph = ExperimentGraph(experiment_config) return ExperimentExecutor( - experiment_id=experiment_id, - experiment_type=experiment_type, - execution_parameters=execution_parameters, + experiment_definition=experiment_definition, experiment_graph=experiment_graph, experiment_manager=self._experiment_manager, task_manager=self._task_manager, diff --git a/eos/experiments/experiment_manager.py b/eos/experiments/experiment_manager.py index 467de27..3852dde 100644 --- a/eos/experiments/experiment_manager.py +++ b/eos/experiments/experiment_manager.py @@ -3,7 +3,7 @@ from typing import Any from eos.configuration.configuration_manager import ConfigurationManager -from eos.experiments.entities.experiment import Experiment, ExperimentStatus, ExperimentExecutionParameters +from eos.experiments.entities.experiment import Experiment, ExperimentStatus, ExperimentDefinition from eos.experiments.exceptions import EosExperimentStateError from eos.experiments.repositories.experiment_repository import ExperimentRepository from eos.logging.logger import log @@ -32,21 +32,14 @@ async def initialize(self, db_interface: AsyncMongoDbInterface) -> None: async def create_experiment( self, - experiment_id: str, - experiment_type: str, - execution_parameters: ExperimentExecutionParameters | None = None, - dynamic_parameters: dict[str, dict[str, Any]] | None = None, - metadata: dict[str, Any] | None = None, + definition: ExperimentDefinition, ) -> None: """ - Create a new experiment of a given type with a unique id. - - :param experiment_id: A unique id for the experiment. - :param experiment_type: The type of the experiment as defined in the configuration. - :param dynamic_parameters: Dictionary of the dynamic parameters per task and their provided values. - :param execution_parameters: Parameters for the execution of the experiment. - :param metadata: Additional metadata to be stored with the experiment. + Create a new experiment from a definition. """ + experiment_id = definition.id + experiment_type = definition.type + if await self._experiments.exists(id=experiment_id): raise EosExperimentStateError(f"Experiment '{experiment_id}' already exists.") @@ -54,16 +47,7 @@ async def create_experiment( if not experiment_config: raise EosExperimentStateError(f"Experiment type '{experiment_type}' not found in the configuration.") - labs = experiment_config.labs - - experiment = Experiment( - id=experiment_id, - type=experiment_type, - execution_parameters=execution_parameters or ExperimentExecutionParameters(), - labs=labs, - dynamic_parameters=dynamic_parameters or {}, - metadata=metadata or {}, - ) + experiment = Experiment.from_definition(definition) await self._experiments.create(experiment.model_dump()) log.info(f"Created experiment '{experiment_id}'.") diff --git a/eos/orchestration/modules/__init__.py b/eos/orchestration/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/eos/orchestration/modules/campaign_module.py b/eos/orchestration/modules/campaign_module.py new file mode 100644 index 0000000..59c0560 --- /dev/null +++ b/eos/orchestration/modules/campaign_module.py @@ -0,0 +1,144 @@ +import asyncio +import traceback + +from eos.campaigns.campaign_executor import CampaignExecutor +from eos.campaigns.campaign_executor_factory import CampaignExecutorFactory +from eos.campaigns.campaign_manager import CampaignManager +from eos.campaigns.entities.campaign import Campaign, CampaignStatus, CampaignDefinition +from eos.campaigns.exceptions import EosCampaignExecutionError +from eos.configuration.configuration_manager import ConfigurationManager +from eos.logging.logger import log +from eos.orchestration.exceptions import EosExperimentDoesNotExistError + + +class CampaignModule: + """ + Top-level campaign functionality integration. + Exposes an interface for submission, monitoring, and cancellation of campaigns. + """ + + def __init__( + self, + configuration_manager: ConfigurationManager, + campaign_manager: CampaignManager, + campaign_executor_factory: CampaignExecutorFactory, + ): + self._configuration_manager = configuration_manager + self._campaign_manager = campaign_manager + self._campaign_executor_factory = campaign_executor_factory + + self._campaign_submission_lock = asyncio.Lock() + self._submitted_campaigns: dict[str, CampaignExecutor] = {} + self._campaign_cancellation_queue = asyncio.Queue(maxsize=100) + + async def get_campaign(self, campaign_id: str) -> Campaign | None: + """Get a campaign by its unique identifier.""" + return await self._campaign_manager.get_campaign(campaign_id) + + async def submit_campaign( + self, + campaign_definition: CampaignDefinition, + ) -> None: + """Submit a new campaign for execution.""" + campaign_id = campaign_definition.id + experiment_type = campaign_definition.experiment_type + + self._validate_experiment_type(experiment_type) + + async with self._campaign_submission_lock: + if campaign_id in self._submitted_campaigns: + log.warning(f"Campaign '{campaign_id}' is already submitted. Ignoring new submission.") + return + + campaign_executor = self._campaign_executor_factory.create(campaign_definition) + + try: + await campaign_executor.start_campaign() + self._submitted_campaigns[campaign_id] = campaign_executor + except EosCampaignExecutionError: + log.error(f"Failed to submit campaign '{campaign_id}': {traceback.format_exc()}") + del self._submitted_campaigns[campaign_id] + return + + log.info(f"Submitted campaign '{campaign_id}'.") + + async def cancel_campaign(self, campaign_id: str) -> None: + """Cancel a campaign that is currently being executed.""" + if campaign_id in self._submitted_campaigns: + await self._campaign_cancellation_queue.put(campaign_id) + log.info(f"Queued campaign '{campaign_id}' for cancellation.") + + async def fail_running_campaigns(self) -> None: + """Fail all running campaigns.""" + running_campaigns = await self._campaign_manager.get_campaigns(status=CampaignStatus.RUNNING.value) + + for campaign in running_campaigns: + await self._campaign_manager.fail_campaign(campaign.id) + + if running_campaigns: + log.warning( + "All running campaigns have been marked as failed. Please review the state of the system and re-submit " + "with resume=True." + ) + + async def process_campaigns(self) -> None: + """Try to make progress on all submitted campaigns.""" + if not self._submitted_campaigns: + return + + results = await asyncio.gather( + *(self._process_single_campaign(cid, executor) for cid, executor in self._submitted_campaigns.items()), + return_exceptions=True, + ) + + completed_campaigns: list[str] = [] + failed_campaigns: list[str] = [] + + for campaign_id, completed, failed in results: + if completed: + completed_campaigns.append(campaign_id) + elif failed: + failed_campaigns.append(campaign_id) + + for campaign_id in completed_campaigns: + log.info(f"Completed campaign '{campaign_id}'.") + self._submitted_campaigns[campaign_id].cleanup() + del self._submitted_campaigns[campaign_id] + + for campaign_id in failed_campaigns: + log.error(f"Failed campaign '{campaign_id}'.") + self._submitted_campaigns[campaign_id].cleanup() + del self._submitted_campaigns[campaign_id] + + async def _process_single_campaign( + self, campaign_id: str, campaign_executor: CampaignExecutor + ) -> tuple[str, bool, bool]: + try: + completed = await campaign_executor.progress_campaign() + return campaign_id, completed, False + except EosCampaignExecutionError: + log.error(f"Error in campaign '{campaign_id}': {traceback.format_exc()}") + return campaign_id, False, True + + async def process_campaign_cancellations(self) -> None: + """Try to cancel all campaigns that are queued for cancellation.""" + campaign_ids = [] + while not self._campaign_cancellation_queue.empty(): + campaign_ids.append(await self._campaign_cancellation_queue.get()) + + if not campaign_ids: + return + + log.warning(f"Attempting to cancel campaigns: {campaign_ids}") + await asyncio.gather(*[self._submitted_campaigns[camp_id].cancel_campaign() for camp_id in campaign_ids]) + + for campaign_id in campaign_ids: + self._submitted_campaigns[campaign_id].cleanup() + del self._submitted_campaigns[campaign_id] + + log.warning(f"Cancelled campaigns: {campaign_ids}") + + def _validate_experiment_type(self, experiment_type: str) -> None: + if experiment_type not in self._configuration_manager.experiments: + log.error(f"Cannot submit experiment of type '{experiment_type}' as it does not exist.") + raise EosExperimentDoesNotExistError diff --git a/eos/orchestration/modules/experiment_module.py b/eos/orchestration/modules/experiment_module.py new file mode 100644 index 0000000..794800a --- /dev/null +++ b/eos/orchestration/modules/experiment_module.py @@ -0,0 +1,165 @@ +import asyncio +import traceback +from typing import Any, TYPE_CHECKING + +from eos.configuration.configuration_manager import ConfigurationManager +from eos.configuration.validation import validation_utils +from eos.experiments.entities.experiment import Experiment, ExperimentStatus, ExperimentDefinition +from eos.experiments.exceptions import EosExperimentExecutionError +from eos.experiments.experiment_executor_factory import ExperimentExecutorFactory +from eos.experiments.experiment_manager import ExperimentManager +from eos.logging.logger import log +from eos.orchestration.exceptions import EosExperimentDoesNotExistError + +if TYPE_CHECKING: + from eos.experiments.experiment_executor import ExperimentExecutor + + +class ExperimentModule: + """ + Top-level experiment functionality integration. + Exposes an interface for submission, monitoring and cancellation of experiments. + """ + + def __init__( + self, + configuration_manager: ConfigurationManager, + experiment_manager: ExperimentManager, + experiment_executor_factory: ExperimentExecutorFactory, + ): + self._configuration_manager = configuration_manager + self._experiment_manager = experiment_manager + self._experiment_executor_factory = experiment_executor_factory + + self._experiment_submission_lock = asyncio.Lock() + self._submitted_experiments: dict[str, ExperimentExecutor] = {} + self._experiment_cancellation_queue = asyncio.Queue(maxsize=100) + + async def get_experiment(self, experiment_id: str) -> Experiment | None: + """Get an experiment by its unique identifier.""" + return await self._experiment_manager.get_experiment(experiment_id) + + async def submit_experiment( + self, + experiment_definition: ExperimentDefinition, + ) -> None: + """Submit a new experiment for execution. The experiment will be executed asynchronously.""" + experiment_id = experiment_definition.id + experiment_type = experiment_definition.type + + self._validate_experiment_type(experiment_type) + + async with self._experiment_submission_lock: + if experiment_id in self._submitted_experiments: + log.warning(f"Experiment '{experiment_id}' is already submitted. Ignoring new submission.") + return + + experiment_executor = self._experiment_executor_factory.create(experiment_definition) + + try: + await experiment_executor.start_experiment() + self._submitted_experiments[experiment_id] = experiment_executor + except EosExperimentExecutionError: + log.error(f"Failed to submit experiment '{experiment_id}': {traceback.format_exc()}") + del self._submitted_experiments[experiment_id] + return + + log.info(f"Submitted experiment '{experiment_id}'.") + + async def cancel_experiment(self, experiment_id: str) -> None: + """ + Cancel an experiment that is currently being executed. + + :param experiment_id: The unique identifier of the experiment. + """ + if experiment_id in self._submitted_experiments: + await self._experiment_cancellation_queue.put(experiment_id) + + async def fail_running_experiments(self) -> None: + """Fail all running experiments.""" + running_experiments = await self._experiment_manager.get_experiments(status=ExperimentStatus.RUNNING.value) + + for experiment in running_experiments: + await self._experiment_manager.fail_experiment(experiment.id) + + if running_experiments: + log.warning( + "All running experiments have been marked as failed. Please review the state of the system and " + "re-submit with resume=True." + ) + + async def get_experiment_types(self) -> list[str]: + """ + Get a list of all experiment types that are defined in the configuration. + """ + return list(self._configuration_manager.experiments.keys()) + + async def get_experiment_dynamic_params_template(self, experiment_type: str) -> dict[str, Any]: + """ + Get the dynamic parameters template for a given experiment type. + + :param experiment_type: The type of the experiment. + :return: The dynamic parameter template. + """ + experiment_config = self._configuration_manager.experiments[experiment_type] + dynamic_parameters = {} + + for task in experiment_config.tasks: + task_dynamic_parameters = { + name: "PLACEHOLDER" + for name, value in task.parameters.items() + if validation_utils.is_dynamic_parameter(value) + } + if task_dynamic_parameters: + dynamic_parameters[task.id] = task_dynamic_parameters + + return dynamic_parameters + + async def process_experiments(self) -> None: + """Try to make progress on all submitted experiments""" + if not self._submitted_experiments: + return + + completed_experiments = [] + failed_experiments = [] + + for experiment_id, experiment_executor in self._submitted_experiments.items(): + try: + completed = await experiment_executor.progress_experiment() + + if completed: + completed_experiments.append(experiment_id) + except EosExperimentExecutionError: + log.error(f"Error in experiment '{experiment_id}': {traceback.format_exc()}") + failed_experiments.append(experiment_id) + + for experiment_id in completed_experiments: + log.info(f"Completed experiment '{experiment_id}'.") + del self._submitted_experiments[experiment_id] + + for experiment_id in failed_experiments: + log.error(f"Failed experiment '{experiment_id}'.") + del self._submitted_experiments[experiment_id] + + async def process_experiment_cancellations(self) -> None: + """Try to cancel all experiments that are queued for cancellation.""" + experiment_ids = [] + while not self._experiment_cancellation_queue.empty(): + experiment_ids.append(await self._experiment_cancellation_queue.get()) + + if not experiment_ids: + return + + log.warning(f"Attempting to cancel experiments: {experiment_ids}") + cancellation_tasks = [self._submitted_experiments[exp_id].cancel_experiment() for exp_id in experiment_ids] + await asyncio.gather(*cancellation_tasks) + + for exp_id in experiment_ids: + del self._submitted_experiments[exp_id] + + log.warning(f"Cancelled experiments: {experiment_ids}") + + def _validate_experiment_type(self, experiment_type: str) -> None: + if experiment_type not in self._configuration_manager.experiments: + log.error(f"Cannot submit experiment of type '{experiment_type}' as it does not exist.") + raise EosExperimentDoesNotExistError diff --git a/eos/orchestration/modules/lab_module.py b/eos/orchestration/modules/lab_module.py new file mode 100644 index 0000000..d290ae5 --- /dev/null +++ b/eos/orchestration/modules/lab_module.py @@ -0,0 +1,50 @@ +from eos.configuration.configuration_manager import ConfigurationManager +from eos.configuration.entities.lab import LabDeviceConfig + + +class LabModule: + """ + Top-level lab functionality integration. + Exposes an interface for querying lab state. + """ + + def __init__( + self, + configuration_manager: ConfigurationManager, + ): + self._configuration_manager = configuration_manager + + async def get_lab_devices( + self, lab_types: set[str] | None = None, task_type: str | None = None + ) -> dict[str, dict[str, LabDeviceConfig]]: + """ + Get the devices that are available in the given labs or for a specific task type. + + :param lab_types: The lab types. If None, all labs will be considered. + :param task_type: The task type. If provided, only devices supporting this task type will be returned. + :return: A dictionary of lab types and the devices available in each lab. + """ + lab_devices = {} + + if not lab_types or not any(lab_type.strip() for lab_type in lab_types): + lab_types = set(self._configuration_manager.labs.keys()) + + task_device_types = set() + if task_type: + task_spec = self._configuration_manager.task_specs.get_spec_by_type(task_type) + task_device_types = set(task_spec.device_types) if task_spec.device_types else set() + + for lab_type in lab_types: + lab = self._configuration_manager.labs.get(lab_type) + if not lab: + continue + + if task_device_types: + devices = {name: device for name, device in lab.devices.items() if device.type in task_device_types} + else: + devices = lab.devices + + if devices: + lab_devices[lab_type] = devices + + return lab_devices diff --git a/eos/orchestration/modules/loading_module.py b/eos/orchestration/modules/loading_module.py new file mode 100644 index 0000000..4ce8442 --- /dev/null +++ b/eos/orchestration/modules/loading_module.py @@ -0,0 +1,183 @@ +import traceback + +from eos.configuration.configuration_manager import ConfigurationManager +from eos.configuration.exceptions import EosConfigurationError +from eos.containers.container_manager import ContainerManager +from eos.devices.device_manager import DeviceManager +from eos.experiments.entities.experiment import ExperimentStatus +from eos.experiments.experiment_manager import ExperimentManager +from asyncio import Lock as AsyncLock + +from eos.logging.logger import log +from eos.orchestration.exceptions import EosExperimentTypeInUseError + + +class LoadingModule: + """Responsible for loading/unloading entities such as labs, experiments, etc.""" + + def __init__( + self, + configuration_manager: ConfigurationManager, + device_manager: DeviceManager, + container_manager: ContainerManager, + experiment_manager: ExperimentManager, + ): + self._configuration_manager = configuration_manager + self._device_manager = device_manager + self._container_manager = container_manager + self._experiment_manager = experiment_manager + + self._loading_lock = AsyncLock() + + async def load_labs(self, labs: set[str]) -> None: + """ + Load one or more labs into the orchestrator. + """ + self._configuration_manager.load_labs(labs) + await self._device_manager.update_devices(loaded_labs=labs) + await self._container_manager.update_containers(loaded_labs=labs) + + async def unload_labs(self, labs: set[str]) -> None: + """ + Unload one or more labs from the orchestrator. + """ + self._configuration_manager.unload_labs(labs) + await self._device_manager.update_devices(unloaded_labs=labs) + await self._container_manager.update_containers(unloaded_labs=labs) + + async def reload_labs(self, lab_types: set[str]) -> None: + """ + Reload one or more labs in the orchestrator. + """ + async with self._loading_lock: + experiments_to_reload = set() + for lab_type in lab_types: + existing_experiments = await self._experiment_manager.get_experiments( + status=ExperimentStatus.RUNNING.value + ) + + for experiment in existing_experiments: + experiment_config = self._configuration_manager.experiments[experiment.type] + if lab_type in experiment_config.labs: + log.error(f"Cannot reload lab type '{lab_type}' as there are running experiments that use it.") + raise EosExperimentTypeInUseError + + # Determine experiments to reload for this lab type + for experiment_type, experiment_config in self._configuration_manager.experiments.items(): + if lab_type in experiment_config.labs: + experiments_to_reload.add(experiment_type) + try: + await self.unload_labs(lab_types) + await self.load_labs(lab_types) + self.load_experiments(experiments_to_reload) + except EosConfigurationError: + log.error(f"Error reloading labs: {traceback.format_exc()}") + raise + + async def update_loaded_labs(self, lab_types: set[str]) -> None: + """ + Update the loaded labs with new configurations. + """ + async with self._loading_lock: + currently_loaded = set(self._configuration_manager.labs.keys()) + + if currently_loaded == lab_types: + return + + to_unload = currently_loaded - lab_types + to_load = lab_types - currently_loaded + + for lab_type in to_unload: + existing_experiments = await self._experiment_manager.get_experiments( + status=ExperimentStatus.RUNNING.value + ) + + for experiment in existing_experiments: + experiment_config = self._configuration_manager.experiments[experiment.type] + if lab_type in experiment_config.labs: + log.error(f"Cannot unload lab type '{lab_type}' as there are running experiments that use it.") + raise EosExperimentTypeInUseError + + try: + await self.unload_labs(to_unload) + await self.load_labs(to_load) + except EosConfigurationError: + log.error(f"Error updating loaded labs: {traceback.format_exc()}") + raise + + async def list_loaded_labs(self) -> dict[str, bool]: + """ + Return a dictionary of lab types and a boolean indicating whether they are loaded. + """ + return self._configuration_manager.get_lab_loaded_statuses() + + def load_experiments(self, experiment_types: set[str]) -> None: + """ + Load one or more experiments into the orchestrator. + """ + self._configuration_manager.load_experiments(experiment_types) + + def unload_experiments(self, experiment_types: set[str]) -> None: + """ + Unload one or more experiments from the orchestrator. + """ + self._configuration_manager.unload_experiments(experiment_types) + + async def reload_experiments(self, experiment_types: set[str]) -> None: + """ + Reload one or more experiments in the orchestrator. + """ + async with self._loading_lock: + for experiment_type in experiment_types: + existing_experiments = await self._experiment_manager.get_experiments( + status=ExperimentStatus.RUNNING.value, type=experiment_type + ) + if existing_experiments: + log.error( + f"Cannot reload experiment type '{experiment_type}' as there are running experiments of this " + f"type." + ) + raise EosExperimentTypeInUseError + try: + self.unload_experiments(experiment_types) + self.load_experiments(experiment_types) + except EosConfigurationError: + log.error(f"Error reloading experiments: {traceback.format_exc()}") + raise + + async def update_loaded_experiments(self, experiment_types: set[str]) -> None: + """ + Update the loaded experiments with new configurations. + """ + async with self._loading_lock: + currently_loaded = set(self._configuration_manager.experiments.keys()) + + if currently_loaded == experiment_types: + return + + to_unload = currently_loaded - experiment_types + to_load = experiment_types - currently_loaded + + for experiment_type in to_unload: + existing_experiments = await self._experiment_manager.get_experiments( + status=ExperimentStatus.RUNNING.value, type=experiment_type + ) + if existing_experiments: + log.error( + f"Cannot unload experiment type '{experiment_type}' as there are running experiments of this " + f"type." + ) + raise EosExperimentTypeInUseError + + try: + self.unload_experiments(to_unload) + self.load_experiments(to_load) + except EosConfigurationError: + log.error(f"Error updating loaded experiments: {traceback.format_exc()}") + raise + + async def get_loaded(self) -> dict[str, bool]: + """ + Return a dictionary of experiment types and a boolean indicating whether they are loaded. + """ + return self._configuration_manager.get_experiment_loaded_statuses() diff --git a/eos/orchestration/modules/result_module.py b/eos/orchestration/modules/result_module.py new file mode 100644 index 0000000..46a65ac --- /dev/null +++ b/eos/orchestration/modules/result_module.py @@ -0,0 +1,27 @@ +from collections.abc import AsyncIterable + +from eos.tasks.task_manager import TaskManager + + +class ResultModule: + """ + Top-level result querying integration. + Exposes an interface for querying results, such as downloading task output files. + """ + + def __init__(self, task_manager: TaskManager): + self._task_manager = task_manager + + def download_task_output_file( + self, experiment_id: str, task_id: str, file_name: str, chunk_size: int = 3 * 1024 * 1024 + ) -> AsyncIterable[bytes]: + """ + Stream the contents of a task output file in chunks. + """ + return self._task_manager.stream_task_output_file(experiment_id, task_id, file_name, chunk_size) + + async def list_task_output_files(self, experiment_id: str, task_id: str) -> list[str]: + """ + Get a list of all output files for a given task. + """ + return self._task_manager.list_task_output_files(experiment_id, task_id) diff --git a/eos/orchestration/modules/task_module.py b/eos/orchestration/modules/task_module.py new file mode 100644 index 0000000..65f8f95 --- /dev/null +++ b/eos/orchestration/modules/task_module.py @@ -0,0 +1,90 @@ +from eos.configuration.configuration_manager import ConfigurationManager +from eos.configuration.entities.task_spec import TaskSpecConfig +from eos.logging.logger import log +from eos.tasks.entities.task import Task, TaskStatus, TaskDefinition +from eos.tasks.exceptions import EosTaskCancellationError +from eos.tasks.on_demand_task_executor import OnDemandTaskExecutor +from eos.tasks.task_executor import TaskExecutor +from eos.tasks.task_manager import TaskManager + + +class TaskModule: + """ + Top-level task functionality integration. + Exposes an interface for submission, monitoring, and cancellation of tasks. + """ + + def __init__( + self, + configuration_manager: ConfigurationManager, + task_manager: TaskManager, + task_executor: TaskExecutor, + on_demand_task_executor: OnDemandTaskExecutor, + ): + self._configuration_manager = configuration_manager + self._task_manager = task_manager + self._task_executor = task_executor + self._on_demand_task_executor = on_demand_task_executor + + async def get_task(self, experiment_id: str, task_id: str) -> Task: + """ + Get a task by its unique identifier. + + :param experiment_id: The unique identifier of the experiment. + :param task_id: The unique identifier of the task. + :return: The task entity. + """ + return await self._task_manager.get_task(experiment_id, task_id) + + def submit_task( + self, + task_definition: TaskDefinition, + ) -> None: + """ + Submit a new task for execution. + + :param task_definition: The task definition. + :return: The output of the task. + """ + self._on_demand_task_executor.submit_task(task_definition) + + async def cancel_task(self, task_id: str, experiment_id: str = "on_demand") -> None: + """ + Cancel a task that is currently being executed. + + :param task_id: The unique identifier of the task. + :param experiment_id: The unique identifier of the experiment. + """ + try: + if experiment_id == "on_demand": + await self._on_demand_task_executor.request_task_cancellation(task_id) + else: + await self._task_executor.request_task_cancellation(experiment_id, task_id) + except EosTaskCancellationError: + log.error(f"Failed to cancel task '{task_id}'.") + + async def fail_running_tasks(self) -> None: + """Fail all running tasks.""" + running_tasks = await self._task_manager.get_tasks(status=TaskStatus.RUNNING.value) + for task in running_tasks: + await self._task_manager.fail_task(task.experiment_id, task.id) + log.warning(f"EXP '{task.experiment_id}' - Failed task '{task.id}'.") + + if running_tasks: + log.warning("All running tasks have been marked as failed. Please review the state of the system.") + + async def get_task_types(self) -> list[str]: + """Get a list of all task types that are defined in the configuration.""" + return [task.type for task in self._configuration_manager.task_specs.get_all_specs().values()] + + async def get_task_spec(self, task_type: str) -> TaskSpecConfig | None: + """Get the task specification for a given task type.""" + task_spec = self._configuration_manager.task_specs.get_spec_by_type(task_type) + if not task_spec: + log.error(f"Task type '{task_type}' does not exist.") + + return task_spec + + async def process_on_demand_tasks(self) -> None: + """Try to make progress on all on-demand tasks.""" + await self._on_demand_task_executor.process_tasks() diff --git a/eos/orchestration/orchestrator.py b/eos/orchestration/orchestrator.py index f1af601..590ebac 100644 --- a/eos/orchestration/orchestrator.py +++ b/eos/orchestration/orchestrator.py @@ -1,52 +1,34 @@ import asyncio -import traceback -from asyncio import Lock as AsyncLock -from collections.abc import AsyncIterable -from typing import Any, TYPE_CHECKING - import ray from eos.campaigns.campaign_executor_factory import CampaignExecutorFactory from eos.campaigns.campaign_manager import CampaignManager from eos.campaigns.campaign_optimizer_manager import CampaignOptimizerManager -from eos.campaigns.entities.campaign import CampaignStatus, CampaignExecutionParameters, Campaign -from eos.campaigns.exceptions import EosCampaignExecutionError from eos.configuration.configuration_manager import ConfigurationManager -from eos.configuration.entities.lab import LabDeviceConfig -from eos.configuration.entities.task import TaskConfig -from eos.configuration.entities.task_specification import TaskSpecification -from eos.configuration.exceptions import EosConfigurationError -from eos.configuration.validation import validation_utils +from eos.configuration.entities.eos_config import DbConfig from eos.containers.container_manager import ContainerManager from eos.devices.device_manager import DeviceManager -from eos.experiments.entities.experiment import ExperimentStatus, Experiment, ExperimentExecutionParameters -from eos.experiments.exceptions import EosExperimentExecutionError from eos.experiments.experiment_executor_factory import ExperimentExecutorFactory from eos.experiments.experiment_manager import ExperimentManager from eos.logging.logger import log from eos.monitoring.graceful_termination_monitor import GracefulTerminationMonitor -from eos.orchestration.exceptions import ( - EosExperimentTypeInUseError, - EosExperimentDoesNotExistError, -) +from eos.orchestration.modules.campaign_module import CampaignModule +from eos.orchestration.modules.experiment_module import ExperimentModule +from eos.orchestration.modules.lab_module import LabModule +from eos.orchestration.modules.loading_module import LoadingModule +from eos.orchestration.modules.result_module import ResultModule +from eos.orchestration.modules.task_module import TaskModule from eos.persistence.async_mongodb_interface import AsyncMongoDbInterface from eos.persistence.file_db_interface import FileDbInterface -from eos.persistence.service_credentials import ServiceCredentials from eos.resource_allocation.resource_allocation_manager import ( ResourceAllocationManager, ) from eos.scheduling.greedy_scheduler import GreedyScheduler -from eos.tasks.entities.task import Task, TaskStatus -from eos.tasks.exceptions import EosTaskCancellationError from eos.tasks.on_demand_task_executor import OnDemandTaskExecutor from eos.tasks.task_executor import TaskExecutor from eos.tasks.task_manager import TaskManager from eos.utils.singleton import Singleton -if TYPE_CHECKING: - from eos.campaigns.campaign_executor import CampaignExecutor - from eos.experiments.experiment_executor import ExperimentExecutor - class Orchestrator(metaclass=Singleton): """ @@ -56,8 +38,8 @@ class Orchestrator(metaclass=Singleton): def __init__( self, user_dir: str, - db_credentials: ServiceCredentials, - file_db_credentials: ServiceCredentials, + db_credentials: DbConfig, + file_db_credentials: DbConfig, ): self._user_dir = user_dir self._db_credentials = db_credentials @@ -82,15 +64,12 @@ def __init__( self._experiment_executor_factory: ExperimentExecutorFactory | None = None self._campaign_executor_factory: CampaignExecutorFactory | None = None - self._campaign_submission_lock = AsyncLock() - self._submitted_campaigns: dict[str, CampaignExecutor] = {} - self._experiment_submission_lock = AsyncLock() - self._submitted_experiments: dict[str, ExperimentExecutor] = {} - - self._campaign_cancellation_queue = asyncio.Queue(maxsize=100) - self._experiment_cancellation_queue = asyncio.Queue(maxsize=100) - - self._loading_lock = AsyncLock() + self._loading: LoadingModule | None = None + self._labs: LabModule | None = None + self._results: ResultModule | None = None + self._tasks: TaskModule | None = None + self._experiments: ExperimentModule | None = None + self._campaigns: CampaignModule | None = None async def initialize(self) -> None: """ @@ -102,7 +81,7 @@ async def initialize(self) -> None: log.info("Initializing EOS...") log.info("Initializing Ray cluster...") ray.init(namespace="eos", resources={"eos-core": 1000}) - log.info("Ray initialized.") + log.info("Ray cluster initialized.") # Configuration ########################################### self._configuration_manager = ConfigurationManager(self._user_dir) @@ -170,44 +149,25 @@ async def initialize(self) -> None: self._experiment_executor_factory, ) - await self._fail_all_running_work() - - self._initialized = True + # Orchestrator Modules ####################################### + self._loading = LoadingModule( + self._configuration_manager, self._device_manager, self._container_manager, self._experiment_manager + ) + self._labs = LabModule(self._configuration_manager) + self._results = ResultModule(self._task_manager) + self._tasks = TaskModule( + self._configuration_manager, self._task_manager, self._task_executor, self._on_demand_task_executor + ) + self._experiments = ExperimentModule( + self._configuration_manager, self._experiment_manager, self._experiment_executor_factory + ) + self._campaigns = CampaignModule( + self._configuration_manager, self._campaign_manager, self._campaign_executor_factory + ) - async def _fail_all_running_work(self) -> None: - """ - When the orchestrator starts, fail all running tasks, experiments, and campaigns. - This is for safety, as if the orchestrator was terminated while there was running work then the state of the - system may be unknown. We want to force manual review of the state of the system and explicitly require - re-submission of any work that was running. - """ - running_tasks = await self._task_manager.get_tasks(status=TaskStatus.RUNNING.value) - for task in running_tasks: - await self._task_manager.fail_task(task.experiment_id, task.id) - log.warning(f"EXP '{task.experiment_id}' - Failed task '{task.id}'.") - - running_experiments = await self._experiment_manager.get_experiments(status=ExperimentStatus.RUNNING.value) - for experiment in running_experiments: - await self._experiment_manager.fail_experiment(experiment.id) - - running_campaigns = await self._campaign_manager.get_campaigns(status=CampaignStatus.RUNNING.value) - for campaign in running_campaigns: - await self._campaign_manager.fail_campaign(campaign.id) - - if running_tasks: - log.warning("All running tasks have been marked as failed. Please review the state of the system.") - - if running_experiments: - log.warning( - "All running experiments have been marked as failed. Please review the state of the system and " - "re-submit with resume=True." - ) + await self._fail_running_work() - if running_campaigns: - log.warning( - "All running campaigns have been marked as failed. Please review the state of the system and re-submit " - "with resume=True." - ) + self._initialized = True async def terminate(self) -> None: """ @@ -216,404 +176,13 @@ async def terminate(self) -> None: """ if not self._initialized: return - log.info("Cleaning up device actors...") + log.info("Cleaning up devices...") await self._device_manager.cleanup_device_actors() log.info("Shutting down Ray cluster...") ray.shutdown() await self._graceful_termination_monitor.set_terminated_gracefully() self._initialized = False - async def load_labs(self, labs: set[str]) -> None: - """ - Load one or more labs into the orchestrator. - """ - self._configuration_manager.load_labs(labs) - await self._device_manager.update_devices(loaded_labs=labs) - await self._container_manager.update_containers(loaded_labs=labs) - - async def unload_labs(self, labs: set[str]) -> None: - """ - Unload one or more labs from the orchestrator. - """ - self._configuration_manager.unload_labs(labs) - await self._device_manager.update_devices(unloaded_labs=labs) - await self._container_manager.update_containers(unloaded_labs=labs) - - async def reload_labs(self, lab_types: set[str]) -> None: - """ - Reload one or more labs in the orchestrator. - """ - async with self._loading_lock: - experiments_to_reload = set() - for lab_type in lab_types: - existing_experiments = await self._experiment_manager.get_experiments( - status=ExperimentStatus.RUNNING.value - ) - - for experiment in existing_experiments: - experiment_config = self._configuration_manager.experiments[experiment.type] - if lab_type in experiment_config.labs: - log.error(f"Cannot reload lab type '{lab_type}' as there are running experiments that use it.") - raise EosExperimentTypeInUseError - - # Determine experiments to reload for this lab type - for experiment_type, experiment_config in self._configuration_manager.experiments.items(): - if lab_type in experiment_config.labs: - experiments_to_reload.add(experiment_type) - try: - await self.unload_labs(lab_types) - await self.load_labs(lab_types) - self.load_experiments(experiments_to_reload) - except EosConfigurationError: - log.error(f"Error reloading labs: {traceback.format_exc()}") - raise - - async def update_loaded_labs(self, lab_types: set[str]) -> None: - """ - Update the loaded labs with new configurations. - """ - async with self._loading_lock: - currently_loaded = set(self._configuration_manager.labs.keys()) - - if currently_loaded == lab_types: - return - - to_unload = currently_loaded - lab_types - to_load = lab_types - currently_loaded - - for lab_type in to_unload: - existing_experiments = await self._experiment_manager.get_experiments( - status=ExperimentStatus.RUNNING.value - ) - - for experiment in existing_experiments: - experiment_config = self._configuration_manager.experiments[experiment.type] - if lab_type in experiment_config.labs: - log.error(f"Cannot unload lab type '{lab_type}' as there are running experiments that use it.") - raise EosExperimentTypeInUseError - - try: - await self.unload_labs(to_unload) - await self.load_labs(to_load) - except EosConfigurationError: - log.error(f"Error updating loaded labs: {traceback.format_exc()}") - raise - - async def get_lab_loaded_statuses(self) -> dict[str, bool]: - """ - Return a dictionary of lab types and a boolean indicating whether they are loaded. - """ - return self._configuration_manager.get_lab_loaded_statuses() - - def load_experiments(self, experiment_types: set[str]) -> None: - """ - Load one or more experiments into the orchestrator. - """ - self._configuration_manager.load_experiments(experiment_types) - - def unload_experiments(self, experiment_types: set[str]) -> None: - """ - Unload one or more experiments from the orchestrator. - """ - self._configuration_manager.unload_experiments(experiment_types) - - async def reload_experiments(self, experiment_types: set[str]) -> None: - """ - Reload one or more experiments in the orchestrator. - """ - async with self._loading_lock: - for experiment_type in experiment_types: - existing_experiments = await self._experiment_manager.get_experiments( - status=ExperimentStatus.RUNNING.value, type=experiment_type - ) - if existing_experiments: - log.error( - f"Cannot reload experiment type '{experiment_type}' as there are running experiments of this " - f"type." - ) - raise EosExperimentTypeInUseError - try: - self.unload_experiments(experiment_types) - self.load_experiments(experiment_types) - except EosConfigurationError: - log.error(f"Error reloading experiments: {traceback.format_exc()}") - raise - - async def update_loaded_experiments(self, experiment_types: set[str]) -> None: - """ - Update the loaded experiments with new configurations. - """ - async with self._loading_lock: - currently_loaded = set(self._configuration_manager.experiments.keys()) - - if currently_loaded == experiment_types: - return - - to_unload = currently_loaded - experiment_types - to_load = experiment_types - currently_loaded - - for experiment_type in to_unload: - existing_experiments = await self._experiment_manager.get_experiments( - status=ExperimentStatus.RUNNING.value, type=experiment_type - ) - if existing_experiments: - log.error( - f"Cannot unload experiment type '{experiment_type}' as there are running experiments of this " - f"type." - ) - raise EosExperimentTypeInUseError - - try: - self.unload_experiments(to_unload) - self.load_experiments(to_load) - except EosConfigurationError: - log.error(f"Error updating loaded experiments: {traceback.format_exc()}") - raise - - async def get_experiment_loaded_statuses(self) -> dict[str, bool]: - """ - Return a dictionary of experiment types and a boolean indicating whether they are loaded. - """ - return self._configuration_manager.get_experiment_loaded_statuses() - - async def get_lab_devices( - self, lab_types: set[str] | None = None, task_type: str | None = None - ) -> dict[str, dict[str, LabDeviceConfig]]: - """ - Get the devices that are available in the given labs or for a specific task type. - - :param lab_types: The lab types. If None, all labs will be considered. - :param task_type: The task type. If provided, only devices supporting this task type will be returned. - :return: A dictionary of lab types and the devices available in each lab. - """ - lab_devices = {} - - if not lab_types or not any(lab_type.strip() for lab_type in lab_types): - lab_types = set(self._configuration_manager.labs.keys()) - - task_device_types = set() - if task_type: - task_spec = self._configuration_manager.task_specs.get_spec_by_type(task_type) - task_device_types = set(task_spec.device_types) if task_spec.device_types else set() - - for lab_type in lab_types: - lab = self._configuration_manager.labs.get(lab_type) - if not lab: - continue - - if task_device_types: - devices = {name: device for name, device in lab.devices.items() if device.type in task_device_types} - else: - devices = lab.devices - - if devices: - lab_devices[lab_type] = devices - - return lab_devices - - async def get_task(self, experiment_id: str, task_id: str) -> Task: - """ - Get a task by its unique identifier. - - :param experiment_id: The unique identifier of the experiment. - :param task_id: The unique identifier of the task. - :return: The task entity. - """ - return await self._task_manager.get_task(experiment_id, task_id) - - def submit_task( - self, - task_config: TaskConfig, - resource_allocation_priority: int = 1, - resource_allocation_timeout: int = 180, - ) -> None: - """ - Submit a new task for execution. By default, tasks submitted in this way have maximum resource allocation - priority and a timeout of 180 seconds. - - :param task_config: The task configuration. This is the same data as defined in an experiment configuration. - :param resource_allocation_priority: The priority of the task in acquiring resources. - :param resource_allocation_timeout: The maximum seconds to wait for resources to be allocated before raising an - error. - :return: The output of the task. - """ - self._on_demand_task_executor.submit_task( - task_config, resource_allocation_priority, resource_allocation_timeout - ) - - async def cancel_task(self, task_id: str, experiment_id: str = "on_demand") -> None: - """ - Cancel a task that is currently being executed. - - :param task_id: The unique identifier of the task. - :param experiment_id: The unique identifier of the experiment. - """ - try: - if experiment_id == "on_demand": - await self._on_demand_task_executor.request_task_cancellation(task_id) - else: - await self._task_executor.request_task_cancellation(experiment_id, task_id) - except EosTaskCancellationError: - log.error(f"Failed to cancel task '{task_id}'.") - - async def get_task_types(self) -> list[str]: - """ - Get a list of all task types that are defined in the configuration. - """ - return [task.type for task in self._configuration_manager.task_specs.get_all_specs().values()] - - async def get_task_spec(self, task_type: str) -> TaskSpecification | None: - """ - Get the task specification for a given task type. - """ - task_spec = self._configuration_manager.task_specs.get_spec_by_type(task_type) - if not task_spec: - log.error(f"Task type '{task_type}' does not exist.") - - return task_spec - - def stream_task_output_file( - self, experiment_id: str, task_id: str, file_name: str, chunk_size: int = 3 * 1024 * 1024 - ) -> AsyncIterable[bytes]: - """ - Stream the contents of a task output file in chunks. - """ - return self._task_manager.stream_task_output_file(experiment_id, task_id, file_name, chunk_size) - - async def list_task_output_files(self, experiment_id: str, task_id: str) -> list[str]: - """ - Get a list of all output files for a given task. - """ - return self._task_manager.list_task_output_files(experiment_id, task_id) - - async def get_experiment(self, experiment_id: str) -> Experiment | None: - """ - Get an experiment by its unique identifier. - - :param experiment_id: The unique identifier of the experiment. - :return: The experiment entity. - """ - return await self._experiment_manager.get_experiment(experiment_id) - - async def submit_experiment( - self, - experiment_id: str, - experiment_type: str, - execution_parameters: ExperimentExecutionParameters, - dynamic_parameters: dict[str, dict[str, Any]], - metadata: dict[str, Any] | None = None, - ) -> None: - """ - Submit a new experiment for execution. The experiment will be executed asynchronously. - - :param experiment_id: The unique identifier of the experiment. - :param experiment_type: The type of the experiment. Must have a configuration defined in the - configuration manager. - :param execution_parameters: The execution parameters for the experiment. - :param dynamic_parameters: The dynamic parameters for the experiment. - :param metadata: Any additional metadata. - """ - self._validate_experiment_type_exists(experiment_type) - - async with self._experiment_submission_lock: - if experiment_id in self._submitted_experiments: - log.warning(f"Experiment '{experiment_id}' is already submitted. Ignoring new submission.") - return - - experiment_executor = self._experiment_executor_factory.create( - experiment_id, experiment_type, execution_parameters - ) - - try: - await experiment_executor.start_experiment(dynamic_parameters, metadata) - self._submitted_experiments[experiment_id] = experiment_executor - except EosExperimentExecutionError: - log.error(f"Failed to submit experiment '{experiment_id}': {traceback.format_exc()}") - del self._submitted_experiments[experiment_id] - return - - log.info(f"Submitted experiment '{experiment_id}'.") - - async def cancel_experiment(self, experiment_id: str) -> None: - """ - Cancel an experiment that is currently being executed. - - :param experiment_id: The unique identifier of the experiment. - """ - if experiment_id in self._submitted_experiments: - await self._experiment_cancellation_queue.put(experiment_id) - - async def get_experiment_types(self) -> list[str]: - """ - Get a list of all experiment types that are defined in the configuration. - """ - return list(self._configuration_manager.experiments.keys()) - - async def get_experiment_dynamic_params_template(self, experiment_type: str) -> dict[str, Any]: - """ - Get the dynamic parameters template for a given experiment type. - - :param experiment_type: The type of the experiment. - :return: The dynamic parameter template. - """ - experiment_config = self._configuration_manager.experiments[experiment_type] - dynamic_parameters = {} - - for task in experiment_config.tasks: - task_dynamic_parameters = {} - for parameter_name, parameter_value in task.parameters.items(): - if validation_utils.is_dynamic_parameter(parameter_value): - task_dynamic_parameters[parameter_name] = "PLACEHOLDER" - if task_dynamic_parameters: - dynamic_parameters[task.id] = task_dynamic_parameters - - return dynamic_parameters - - async def get_campaign(self, campaign_id: str) -> Campaign | None: - """ - Get a campaign by its unique identifier. - - :param campaign_id: The unique identifier of the campaign. - :return: The campaign entity. - """ - return await self._campaign_manager.get_campaign(campaign_id) - - async def submit_campaign( - self, - campaign_id: str, - experiment_type: str, - campaign_execution_parameters: CampaignExecutionParameters, - ) -> None: - self._validate_experiment_type_exists(experiment_type) - - async with self._campaign_submission_lock: - if campaign_id in self._submitted_campaigns: - log.warning(f"Campaign '{campaign_id}' is already submitted. Ignoring new submission.") - return - - campaign_executor = self._campaign_executor_factory.create( - campaign_id, experiment_type, campaign_execution_parameters - ) - - try: - await campaign_executor.start_campaign() - self._submitted_campaigns[campaign_id] = campaign_executor - except EosCampaignExecutionError: - log.error(f"Failed to submit campaign '{campaign_id}': {traceback.format_exc()}") - del self._submitted_campaigns[campaign_id] - return - - log.info(f"Submitted campaign '{campaign_id}'.") - - async def cancel_campaign(self, campaign_id: str) -> None: - """ - Cancel a campaign that is currently being executed. - - :param campaign_id: The unique identifier of the campaign. - """ - if campaign_id in self._submitted_campaigns: - await self._campaign_cancellation_queue.put(campaign_id) - async def spin(self, rate_hz: int = 5) -> None: """ Spin the orchestrator at a given rate in Hz. Process submitted work. @@ -621,108 +190,50 @@ async def spin(self, rate_hz: int = 5) -> None: :param rate_hz: The processing rate in Hz. This is the rate in which the orchestrator updates. """ while True: - await self._process_experiment_cancellations() - await self._process_campaign_cancellations() + await self._experiments.process_experiment_cancellations() + await self._campaigns.process_campaign_cancellations() await asyncio.gather( - self._process_on_demand_tasks(), - self._process_experiments(), - self._process_campaigns(), + self._tasks.process_on_demand_tasks(), + self._experiments.process_experiments(), + self._campaigns.process_campaigns(), ) await self._resource_allocation_manager.process_active_requests() await asyncio.sleep(1 / rate_hz) - async def _process_experiment_cancellations(self) -> None: - experiment_ids = [] - while not self._experiment_cancellation_queue.empty(): - experiment_ids.append(await self._experiment_cancellation_queue.get()) - - if experiment_ids: - log.warning(f"Attempting to cancel experiments: {experiment_ids}") - experiment_cancel_tasks = [ - self._submitted_experiments[exp_id].cancel_experiment() for exp_id in experiment_ids - ] - await asyncio.gather(*experiment_cancel_tasks) - - for exp_id in experiment_ids: - del self._submitted_experiments[exp_id] - log.warning(f"Cancelled experiments: {experiment_ids}") - - async def _process_campaign_cancellations(self) -> None: - campaign_ids = [] - while not self._campaign_cancellation_queue.empty(): - campaign_ids.append(await self._campaign_cancellation_queue.get()) - - if campaign_ids: - log.warning(f"Attempting to cancel campaigns: {campaign_ids}") - campaign_cancel_tasks = [self._submitted_campaigns[camp_id].cancel_campaign() for camp_id in campaign_ids] - await asyncio.gather(*campaign_cancel_tasks) - - for campaign_id in campaign_ids: - self._submitted_campaigns[campaign_id].cleanup() - del self._submitted_campaigns[campaign_id] - log.warning(f"Cancelled campaigns: {campaign_ids}") - - async def _process_experiments(self) -> None: - to_remove_completed = [] - to_remove_failed = [] - - for experiment_id, experiment_executor in self._submitted_experiments.items(): - try: - completed = await experiment_executor.progress_experiment() - - if completed: - to_remove_completed.append(experiment_id) - except EosExperimentExecutionError: - log.error(f"Error in experiment '{experiment_id}': {traceback.format_exc()}") - to_remove_failed.append(experiment_id) - - for experiment_id in to_remove_completed: - log.info(f"Completed experiment '{experiment_id}'.") - del self._submitted_experiments[experiment_id] - - for experiment_id in to_remove_failed: - log.error(f"Failed experiment '{experiment_id}'.") - del self._submitted_experiments[experiment_id] - - async def _process_campaigns(self) -> None: - async def process_single_campaign(campaign_id: str, campaign_executor) -> tuple[str, bool, bool]: - try: - completed = await campaign_executor.progress_campaign() - return campaign_id, completed, False - except EosCampaignExecutionError: - log.error(f"Error in campaign '{campaign_id}': {traceback.format_exc()}") - return campaign_id, False, True - - results = await asyncio.gather( - *(process_single_campaign(cid, executor) for cid, executor in self._submitted_campaigns.items()), - ) + async def _fail_running_work(self) -> None: + """ + When the orchestrator starts, fail all running tasks, experiments, and campaigns. + This is for safety, as if the orchestrator was terminated while there was running work then the state of the + system may be unknown. We want to force manual review of the state of the system and explicitly require + re-submission of any work that was running. + """ + await self._tasks.fail_running_tasks() + await self._experiments.fail_running_experiments() + await self._campaigns.fail_running_campaigns() - to_remove_completed: list[str] = [] - to_remove_failed: list[str] = [] + @property + def loading(self) -> LoadingModule: + return self._loading - for campaign_id, completed, failed in results: - if completed: - to_remove_completed.append(campaign_id) - elif failed: - to_remove_failed.append(campaign_id) + @property + def labs(self) -> LabModule: + return self._labs - for campaign_id in to_remove_completed: - log.info(f"Completed campaign '{campaign_id}'.") - self._submitted_campaigns[campaign_id].cleanup() - del self._submitted_campaigns[campaign_id] + @property + def results(self) -> ResultModule: + return self._results - for campaign_id in to_remove_failed: - log.error(f"Failed campaign '{campaign_id}'.") - self._submitted_campaigns[campaign_id].cleanup() - del self._submitted_campaigns[campaign_id] + @property + def tasks(self) -> TaskModule: + return self._tasks - async def _process_on_demand_tasks(self) -> None: - await self._on_demand_task_executor.process_tasks() + @property + def experiments(self) -> ExperimentModule: + return self._experiments - def _validate_experiment_type_exists(self, experiment_type: str) -> None: - if experiment_type not in self._configuration_manager.experiments: - log.error(f"Cannot submit experiment of type '{experiment_type}' as it does not exist.") - raise EosExperimentDoesNotExistError + @property + def campaigns(self) -> CampaignModule: + return self._campaigns diff --git a/eos/persistence/async_mongodb_interface.py b/eos/persistence/async_mongodb_interface.py index 662bb06..9ab7b90 100644 --- a/eos/persistence/async_mongodb_interface.py +++ b/eos/persistence/async_mongodb_interface.py @@ -1,9 +1,9 @@ from motor.core import AgnosticDatabase from motor.motor_asyncio import AsyncIOMotorClient +from eos.configuration.entities.eos_config import DbConfig from eos.logging.logger import log from eos.persistence.async_mongodb_session_factory import AsyncMongoDbSessionFactory -from eos.persistence.service_credentials import ServiceCredentials class AsyncMongoDbInterface: @@ -13,7 +13,7 @@ class AsyncMongoDbInterface: def __init__( self, - db_credentials: ServiceCredentials, + db_credentials: DbConfig, db_name: str = "eos", ): self._db_credentials = db_credentials diff --git a/eos/persistence/file_db_interface.py b/eos/persistence/file_db_interface.py index 9203f95..7bd7279 100644 --- a/eos/persistence/file_db_interface.py +++ b/eos/persistence/file_db_interface.py @@ -3,9 +3,9 @@ from minio import Minio, S3Error +from eos.configuration.entities.eos_config import DbConfig from eos.logging.logger import log from eos.persistence.exceptions import EosFileDbError -from eos.persistence.service_credentials import ServiceCredentials class FileDbInterface: @@ -13,7 +13,7 @@ class FileDbInterface: Provides access to a MinIO server for storing and retrieving files. """ - def __init__(self, file_db_credentials: ServiceCredentials, bucket_name: str = "eos"): + def __init__(self, file_db_credentials: DbConfig, bucket_name: str = "eos"): endpoint = f"{file_db_credentials.host}:{file_db_credentials.port}" self._client = Minio( diff --git a/eos/persistence/mongodb_async_repository.py b/eos/persistence/mongodb_async_repository.py index 3cb80ad..f2a9798 100644 --- a/eos/persistence/mongodb_async_repository.py +++ b/eos/persistence/mongodb_async_repository.py @@ -77,8 +77,9 @@ async def get_all(self, session: AgnosticClientSession | None = None, **kwargs) """ return await self._collection.find(kwargs, session=session).to_list(None) - async def update_one(self, updated_entity: dict[str, Any], session: AgnosticClientSession | None = None, - **kwargs) -> UpdateResult: + async def update_one( + self, updated_entity: dict[str, Any], session: AgnosticClientSession | None = None, **kwargs + ) -> UpdateResult: """ Update an entity in the collection. diff --git a/eos/persistence/service_credentials.py b/eos/persistence/service_credentials.py deleted file mode 100644 index 97c0687..0000000 --- a/eos/persistence/service_credentials.py +++ /dev/null @@ -1,9 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class ServiceCredentials: - host: str - port: int - username: str - password: str diff --git a/eos/resource_allocation/container_allocator.py b/eos/resource_allocation/container_allocator.py index 15925c4..f86348f 100644 --- a/eos/resource_allocation/container_allocator.py +++ b/eos/resource_allocation/container_allocator.py @@ -45,7 +45,6 @@ async def allocate(self, container_id: str, owner: str, experiment_id: str | Non id=container_id, owner=owner, container_type=container_config["type"], - lab=container_config["lab"], experiment_id=experiment_id, ) await self._allocations.create(allocation.model_dump()) diff --git a/eos/resource_allocation/entities/resource_request.py b/eos/resource_allocation/entities/resource_request.py index b29e823..f29e78b 100644 --- a/eos/resource_allocation/entities/resource_request.py +++ b/eos/resource_allocation/entities/resource_request.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, timezone from enum import Enum from bson import ObjectId @@ -22,7 +22,7 @@ def resource_type_enum_to_string(self, v: ResourceType) -> str: class ResourceAllocationRequest(BaseModel): requester: str - resources: list[Resource] = [] + resources: list[Resource] = Field(default_factory=list) experiment_id: str | None = None reason: str | None = None priority: int = Field(default=100, gt=0) @@ -49,7 +49,7 @@ class ActiveResourceAllocationRequest(BaseModel): id: ObjectId = Field(default_factory=ObjectId, alias="_id") request: ResourceAllocationRequest status: ResourceRequestAllocationStatus = ResourceRequestAllocationStatus.PENDING - created_at: datetime = Field(default_factory=datetime.utcnow) + created_at: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc)) allocated_at: datetime | None = None class Config: diff --git a/eos/resource_allocation/repositories/resource_request_repository.py b/eos/resource_allocation/repositories/resource_request_repository.py index 077969a..ee9c53c 100644 --- a/eos/resource_allocation/repositories/resource_request_repository.py +++ b/eos/resource_allocation/repositories/resource_request_repository.py @@ -12,13 +12,16 @@ class ResourceRequestRepository(MongoDbAsyncRepository): def __init__(self, db_interface: AsyncMongoDbInterface): super().__init__("resource_requests", db_interface) - async def get_requests_prioritized(self, status: ResourceRequestAllocationStatus, - session: AgnosticClientSession | None = None) -> list[dict]: - return await self._collection.find({"status": status.value}, session=session).sort("request.priority", - 1).to_list() + async def get_requests_prioritized( + self, status: ResourceRequestAllocationStatus, session: AgnosticClientSession | None = None + ) -> list[dict]: + return ( + await self._collection.find({"status": status.value}, session=session).sort("request.priority", 1).to_list() + ) - async def get_existing_request(self, request: ResourceAllocationRequest, - session: AgnosticClientSession | None = None) -> dict: + async def get_existing_request( + self, request: ResourceAllocationRequest, session: AgnosticClientSession | None = None + ) -> dict: query = { "request.resources": [r.model_dump() for r in request.resources], "request.requester": request.requester, diff --git a/eos/resource_allocation/resource_allocation_manager.py b/eos/resource_allocation/resource_allocation_manager.py index 7387786..59dccee 100644 --- a/eos/resource_allocation/resource_allocation_manager.py +++ b/eos/resource_allocation/resource_allocation_manager.py @@ -261,9 +261,7 @@ async def _delete_all_allocations(self) -> None: """ Delete all device and container allocations. """ - await asyncio.gather( - self._device_allocator.deallocate_all(), self._container_allocator.deallocate_all() - ) + await asyncio.gather(self._device_allocator.deallocate_all(), self._container_allocator.deallocate_all()) @property def device_allocator(self) -> DeviceAllocator: diff --git a/eos/scheduling/greedy_scheduler.py b/eos/scheduling/greedy_scheduler.py index 4a079af..24fc1e1 100644 --- a/eos/scheduling/greedy_scheduler.py +++ b/eos/scheduling/greedy_scheduler.py @@ -101,11 +101,13 @@ async def request_tasks(self, experiment_id: str) -> list[ScheduledTask]: pending_tasks = [task_id for task_id in all_tasks if task_id not in completed_tasks] # Release resources for completed tasks - await asyncio.gather(*[ - self._release_task_resources(experiment_id, task_id) - for task_id in completed_tasks - if task_id in self._allocated_resources.get(experiment_id, {}) - ]) + await asyncio.gather( + *[ + self._release_task_resources(experiment_id, task_id) + for task_id in completed_tasks + if task_id in self._allocated_resources.get(experiment_id, {}) + ] + ) scheduled_tasks = [] for task_id in pending_tasks: @@ -140,8 +142,8 @@ async def request_tasks(self, experiment_id: str) -> list[ScheduledTask]: ) except EosSchedulerResourceAllocationError: log.warning( - f"Timed out in allocating resources for task '{task_id}' in experiment '{experiment_id}. " - f"Will retry.'" + f"Timed out in allocating resources for task '{task_id}' in experiment '{experiment_id}'. " + f"Will retry." ) continue diff --git a/eos/tasks/entities/task.py b/eos/tasks/entities/task.py index c414bb0..a141274 100644 --- a/eos/tasks/entities/task.py +++ b/eos/tasks/entities/task.py @@ -1,11 +1,10 @@ from datetime import datetime, timezone from enum import Enum -from typing import Any, ClassVar +from typing import Any -from omegaconf import ListConfig, DictConfig, OmegaConf -from pydantic import BaseModel, field_serializer +from pydantic import BaseModel, Field, field_serializer -from eos.configuration.entities.task import TaskDeviceConfig +from eos.configuration.entities.task import TaskDeviceConfig, TaskConfig from eos.containers.entities.container import Container @@ -17,10 +16,6 @@ class TaskStatus(Enum): CANCELLED = "CANCELLED" -class TaskContainer(BaseModel): - id: str - - class TaskInput(BaseModel): parameters: dict[str, Any] | None = None containers: dict[str, Container] | None = None @@ -28,59 +23,80 @@ class TaskInput(BaseModel): class Config: arbitrary_types_allowed = True - @field_serializer("parameters") - def serialize_parameters(self, parameters: dict[str, Any] | None, _info) -> Any: - if parameters is None: - return None - return omegaconf_serializer(parameters) - class TaskOutput(BaseModel): parameters: dict[str, Any] | None = None containers: dict[str, Container] | None = None file_names: list[str] | None = None - @field_serializer("parameters") - def serialize_parameters(self, parameters: dict[str, Any] | None, _info) -> Any: - if parameters is None: - return None - return omegaconf_serializer(parameters) - -def omegaconf_serializer(obj: Any) -> Any: - if isinstance(obj, ListConfig | DictConfig): - return OmegaConf.to_object(obj) - if isinstance(obj, dict): - return {k: omegaconf_serializer(v) for k, v in obj.items()} - if isinstance(obj, list): - return [omegaconf_serializer(v) for v in obj] - return obj +class TaskDefinition(BaseModel): + """The definition of a task. Used for submission.""" - -class Task(BaseModel): id: str type: str - experiment_id: str - - devices: list[TaskDeviceConfig] = [] - input: TaskInput = TaskInput() - output: TaskOutput = TaskInput() + experiment_id: str = "on_demand" + + devices: list[TaskDeviceConfig] = Field(default_factory=list) + input: TaskInput = Field(default_factory=TaskInput) + + resource_allocation_priority: int = Field(90, ge=0) + resource_allocation_timeout: int = Field(600, ge=0) # sec + + metadata: dict[str, Any] = Field(default_factory=dict) + + @classmethod + def from_config(cls, config: TaskConfig, experiment_id: str) -> "TaskDefinition": + """Create a TaskDefinition from a TaskConfig.""" + input_params = TaskInput( + parameters=config.parameters, + containers={ + container_name: Container(id=container_id) for container_name, container_id in config.containers.items() + }, + ) + + return cls( + id=config.id, + type=config.type, + experiment_id=experiment_id, + devices=config.devices, + input=input_params, + ) + + def to_config(self) -> TaskConfig: + """Convert a TaskDefinition to a TaskConfig.""" + containers = {} + if self.input.containers: + containers = {container_name: container.id for container_name, container in self.input.containers.items()} + + return TaskConfig( + id=self.id, + type=self.type, + devices=self.devices, + containers=containers, + parameters=self.input.parameters or {}, + dependencies=[], + ) + + +class Task(TaskDefinition): + """The state of a task in the system.""" status: TaskStatus = TaskStatus.CREATED + output: TaskOutput = TaskOutput() - metadata: dict[str, Any] = {} start_time: datetime | None = None end_time: datetime | None = None - - created_at: datetime = datetime.now(tz=timezone.utc) + created_at: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc)) class Config: arbitrary_types_allowed = True - json_encoders: ClassVar = { - ListConfig: lambda v: omegaconf_serializer(v), - DictConfig: lambda v: omegaconf_serializer(v), - } @field_serializer("status") def status_enum_to_string(self, v: TaskStatus) -> str: return v.value + + @classmethod + def from_definition(cls, definition: TaskDefinition) -> "Task": + """Create a Task instance from a TaskDefinition.""" + return cls(**definition.model_dump()) diff --git a/eos/tasks/entities/task_execution_parameters.py b/eos/tasks/entities/task_execution_parameters.py deleted file mode 100644 index 7d148c6..0000000 --- a/eos/tasks/entities/task_execution_parameters.py +++ /dev/null @@ -1,10 +0,0 @@ -from pydantic import BaseModel, Field - -from eos.configuration.entities.task import TaskConfig - - -class TaskExecutionParameters(BaseModel): - experiment_id: str - task_config: TaskConfig - resource_allocation_priority: int = Field(120, ge=0) - resource_allocation_timeout: int = Field(30, ge=0) diff --git a/eos/tasks/on_demand_task_executor.py b/eos/tasks/on_demand_task_executor.py index 72e1f26..284bac4 100644 --- a/eos/tasks/on_demand_task_executor.py +++ b/eos/tasks/on_demand_task_executor.py @@ -2,12 +2,10 @@ import traceback from typing import Any -from eos.configuration.entities.task import TaskConfig from eos.containers.container_manager import ContainerManager from eos.containers.entities.container import Container from eos.logging.logger import log -from eos.tasks.entities.task import TaskOutput -from eos.tasks.entities.task_execution_parameters import TaskExecutionParameters +from eos.tasks.entities.task import TaskOutput, TaskDefinition from eos.tasks.exceptions import EosTaskExecutionError, EosTaskValidationError, EosTaskStateError from eos.tasks.task_executor import TaskExecutor from eos.tasks.task_manager import TaskManager @@ -29,25 +27,12 @@ def __init__(self, task_executor: TaskExecutor, task_manager: TaskManager, conta log.debug("On-demand task executor initialized.") - def submit_task( - self, - task_config: TaskConfig, - resource_allocation_priority: int = 90, - resource_allocation_timeout: int = 3600, - ) -> None: + def submit_task(self, task_definition: TaskDefinition) -> None: """Submit an on-demand task for execution.""" - task_id = task_config.id - task_execution_parameters = TaskExecutionParameters( - experiment_id=self.EXPERIMENT_ID, - task_config=task_config, - resource_allocation_priority=resource_allocation_priority, - resource_allocation_timeout=resource_allocation_timeout, - ) - - self._task_futures[task_id] = asyncio.create_task( - self._task_executor.request_task_execution(task_execution_parameters) + self._task_futures[task_definition.id] = asyncio.create_task( + self._task_executor.request_task_execution(task_definition) ) - log.info(f"Submitted on-demand task '{task_id}'.") + log.info(f"Submitted on-demand task '{task_definition.id}'.") async def request_task_cancellation(self, task_id: str) -> None: """Request cancellation of an on-demand task.""" @@ -93,8 +78,6 @@ async def _process_task_output( ) task_output = TaskOutput( - experiment_id=self.EXPERIMENT_ID, - task_id=task_id, parameters=output_parameters, containers=output_containers, file_names=list(output_files.keys()), diff --git a/eos/tasks/task_executor.py b/eos/tasks/task_executor.py index 5179adf..8860394 100644 --- a/eos/tasks/task_executor.py +++ b/eos/tasks/task_executor.py @@ -3,10 +3,10 @@ from typing import Any import ray -from omegaconf import OmegaConf from ray import ObjectRef from eos.configuration.configuration_manager import ConfigurationManager +from eos.configuration.entities.task import TaskConfig from eos.containers.container_manager import ContainerManager from eos.containers.entities.container import Container from eos.devices.device_actor_wrapper_registry import DeviceActorReference, DeviceActorWrapperRegistry @@ -22,13 +22,13 @@ from eos.resource_allocation.resource_allocation_manager import ResourceAllocationManager from eos.scheduling.entities.scheduled_task import ScheduledTask from eos.tasks.base_task import BaseTask -from eos.tasks.entities.task import TaskStatus -from eos.tasks.entities.task_execution_parameters import TaskExecutionParameters +from eos.tasks.entities.task import TaskStatus, TaskDefinition from eos.tasks.exceptions import ( EosTaskResourceAllocationError, EosTaskExecutionError, EosTaskValidationError, - EosTaskExistsError, EosTaskCancellationError, + EosTaskExistsError, + EosTaskCancellationError, ) from eos.tasks.task_input_parameter_caster import TaskInputParameterCaster from eos.tasks.task_manager import TaskManager @@ -66,13 +66,15 @@ def __init__( log.debug("Task executor initialized.") async def request_task_execution( - self, task_parameters: TaskExecutionParameters, scheduled_task: ScheduledTask | None = None + self, + task_definition: TaskDefinition, + scheduled_task: ScheduledTask | None = None, ) -> BaseTask.OutputType | None: """ Request the execution of a task. Resources will first be requested to be allocated (if not pre-allocated) and then the task will be executed. - :param task_parameters: Parameters for task execution + :param task_definition: The task definition (e.g., user submission) :param scheduled_task: Scheduled task information, if applicable. This is populated by the EOS scheduler. :return: Output of the executed task @@ -80,23 +82,24 @@ async def request_task_execution( :raises EosTaskValidationError: If the task fails validation :raises EosTaskResourceAllocationError: If resource allocation fails """ - context = TaskExecutionContext(task_parameters.experiment_id, task_parameters.task_config.id) + context = TaskExecutionContext(task_definition.experiment_id, task_definition.id) task_key = (context.experiment_id, context.task_id) self._active_tasks[task_key] = context try: - containers = await self._prepare_containers(task_parameters) - await self._initialize_task(task_parameters, containers) + task_config = task_definition.to_config() + self._task_validator.validate(task_config) - self._task_validator.validate(task_parameters.task_config) + task_definition.input.containers = await self._prepare_containers(task_config) + await self._initialize_task(task_definition) context.active_resource_request = ( scheduled_task.allocated_resources if scheduled_task - else await self._allocate_resources(task_parameters) + else await self._allocate_resources(task_definition) ) - context.task_ref = await self._execute_task(task_parameters, containers) + context.task_ref = await self._execute_task(task_definition) return await context.task_ref except EosTaskExistsError as e: raise EosTaskExecutionError( @@ -139,7 +142,8 @@ async def request_task_cancellation(self, experiment_id: str, task_id: str) -> N context = self._active_tasks.get(task_key) if not context: raise EosTaskCancellationError( - f"Cannot cancel task '{task_id}' in experiment '{experiment_id}' as it does not exist.") + f"Cannot cancel task '{task_id}' in experiment '{experiment_id}' as it does not exist." + ) if context.task_ref: ray.cancel(context.task_ref, recursive=True) @@ -149,21 +153,19 @@ async def request_task_cancellation(self, experiment_id: str, task_id: str) -> N await self._resource_allocation_manager.process_active_requests() await self._task_manager.cancel_task(experiment_id, task_id) - del self._active_tasks[task_key] + self._active_tasks.pop(task_key, None) log.warning(f"EXP '{experiment_id}' - Cancelled task '{task_id}'.") - async def _prepare_containers(self, execution_parameters: TaskExecutionParameters) -> dict[str, Container]: - containers = execution_parameters.task_config.containers + async def _prepare_containers(self, task_config: TaskConfig) -> dict[str, Container]: + containers = task_config.containers fetched_containers = await asyncio.gather( *[self._container_manager.get_container(container_id) for container_id in containers.values()] ) return dict(zip(containers.keys(), fetched_containers, strict=True)) - async def _initialize_task( - self, execution_parameters: TaskExecutionParameters, containers: dict[str, Container] - ) -> None: - experiment_id, task_id = execution_parameters.experiment_id, execution_parameters.task_config.id + async def _initialize_task(self, task_definition: TaskDefinition) -> None: + experiment_id, task_id = task_definition.experiment_id, task_definition.id log.debug(f"Execution of task '{task_id}' for experiment '{experiment_id}' has been requested") task = await self._task_manager.get_task(experiment_id, task_id) @@ -172,22 +174,13 @@ async def _initialize_task( await self.request_task_cancellation(experiment_id, task_id) await self._task_manager.delete_task(experiment_id, task_id) - await self._task_manager.create_task( - experiment_id=experiment_id, - task_id=task_id, - task_type=execution_parameters.task_config.type, - devices=execution_parameters.task_config.devices, - parameters=execution_parameters.task_config.parameters, - containers=containers, - ) + await self._task_manager.create_task(task_definition) - async def _allocate_resources( - self, execution_parameters: TaskExecutionParameters - ) -> ActiveResourceAllocationRequest: - resource_request = self._create_resource_request(execution_parameters) - return await self._request_resources(resource_request, execution_parameters.resource_allocation_timeout) + async def _allocate_resources(self, task_definition: TaskDefinition) -> ActiveResourceAllocationRequest: + resource_request = self._create_resource_request(task_definition) + return await self._request_resources(resource_request, task_definition.resource_allocation_timeout) - def _get_device_actor_references(self, task_parameters: TaskExecutionParameters) -> list[DeviceActorReference]: + def _get_device_actor_references(self, task_definition: TaskDefinition) -> list[DeviceActorReference]: return [ DeviceActorReference( id=device.id, @@ -195,24 +188,18 @@ def _get_device_actor_references(self, task_parameters: TaskExecutionParameters) type=self._configuration_manager.labs[device.lab_id].devices[device.id].type, actor_handle=self._device_manager.get_device_actor(device.lab_id, device.id), ) - for device in task_parameters.task_config.devices + for device in task_definition.devices ] async def _execute_task( self, - task_execution_parameters: TaskExecutionParameters, - containers: dict[str, Container], + task_definition: TaskDefinition, ) -> ObjectRef: - experiment_id, task_id = task_execution_parameters.experiment_id, task_execution_parameters.task_config.id - device_actor_references = self._get_device_actor_references(task_execution_parameters) - task_class_type = self._task_plugin_registry.get_task_class_type(task_execution_parameters.task_config.type) - parameters = task_execution_parameters.task_config.parameters - if not isinstance(parameters, dict): - parameters = OmegaConf.to_object(parameters) - - parameters = self._task_input_parameter_caster.cast_input_parameters( - task_id, task_execution_parameters.task_config.type, parameters - ) + experiment_id, task_id = task_definition.experiment_id, task_definition.id + device_actor_references = self._get_device_actor_references(task_definition) + task_class_type = self._task_plugin_registry.get_plugin_class_type(task_definition.type) + + input_parameters = self._task_input_parameter_caster.cast_input_parameters(task_definition) @ray.remote(num_cpus=0) def _ray_execute_task( @@ -233,34 +220,31 @@ def _ray_execute_task( experiment_id, task_id, device_actor_references, - parameters, - containers, + input_parameters, + task_definition.input.containers, ) @staticmethod def _create_resource_request( - task_parameters: TaskExecutionParameters, + task_definition: TaskDefinition, ) -> ResourceAllocationRequest: - task_id, experiment_id = task_parameters.task_config.id, task_parameters.experiment_id - resource_allocation_priority = task_parameters.resource_allocation_priority - request = ResourceAllocationRequest( - requester=task_id, - experiment_id=experiment_id, - reason=f"Resources required for task '{task_id}'", - priority=resource_allocation_priority, + requester=task_definition.id, + experiment_id=task_definition.experiment_id, + reason=f"Resources required for task '{task_definition.id}'", + priority=task_definition.resource_allocation_priority, ) - for device in task_parameters.task_config.devices: + for device in task_definition.devices: request.add_resource(device.id, device.lab_id, ResourceType.DEVICE) - for container_id in task_parameters.task_config.containers.values(): - request.add_resource(container_id, "", ResourceType.CONTAINER) + for container in task_definition.input.containers.values(): + request.add_resource(container.id, "", ResourceType.CONTAINER) return request async def _request_resources( - self, resource_request: ResourceAllocationRequest, timeout: int = 30 + self, resource_request: ResourceAllocationRequest, timeout: int = 600 ) -> ActiveResourceAllocationRequest: allocation_event = asyncio.Event() active_request = None diff --git a/eos/tasks/task_input_parameter_caster.py b/eos/tasks/task_input_parameter_caster.py index 008e092..436caa8 100644 --- a/eos/tasks/task_input_parameter_caster.py +++ b/eos/tasks/task_input_parameter_caster.py @@ -1,29 +1,32 @@ from typing import Any -from eos.configuration.entities.parameters import ParameterType +from eos.configuration.entities.task_parameters import TaskParameterType from eos.configuration.exceptions import EosTaskValidationError -from eos.configuration.spec_registries.task_specification_registry import TaskSpecificationRegistry +from eos.configuration.spec_registries.task_spec_registry import TaskSpecRegistry +from eos.tasks.entities.task import TaskDefinition class TaskInputParameterCaster: def __init__(self): - self.task_spec_registry = TaskSpecificationRegistry() + self.task_spec_registry = TaskSpecRegistry() - def cast_input_parameters(self, task_id: str, task_type: str, input_parameters: dict[str, Any]) -> dict[str, Any]: + def cast_input_parameters(self, task_definition: TaskDefinition) -> dict[str, Any]: """ Cast input parameters of a task to the expected Python types. - :param task_id: The ID of the task. - :param task_type: The type of the task. - :param input_parameters: The input parameters of the task. + :param task_definition: The task definition. :return: The input parameters cast to the expected Python types. """ + task_id = task_definition.id + task_type = task_definition.type + input_parameters = task_definition.input.parameters + task_spec = self.task_spec_registry.get_spec_by_type(task_type) for parameter_name, parameter in input_parameters.items(): try: - parameter_type = ParameterType(task_spec.input_parameters[parameter_name].type) - input_parameters[parameter_name] = parameter_type.python_type()(parameter) + parameter_type = TaskParameterType(task_spec.input_parameters[parameter_name].type) + input_parameters[parameter_name] = parameter_type.python_type(parameter) except TypeError as e: raise EosTaskValidationError( f"Failed to cast input parameter '{parameter_name}' of task '{task_id}' of type \ diff --git a/eos/tasks/task_input_parameter_validator.py b/eos/tasks/task_input_parameter_validator.py index 97f7dae..96d16c6 100644 --- a/eos/tasks/task_input_parameter_validator.py +++ b/eos/tasks/task_input_parameter_validator.py @@ -1,11 +1,9 @@ import copy from typing import Any -from omegaconf import ListConfig, OmegaConf, DictConfig - -from eos.configuration.entities.parameters import ParameterType, ParameterFactory +from eos.configuration.entities.task_parameters import TaskParameterType, TaskParameterFactory from eos.configuration.entities.task import TaskConfig -from eos.configuration.entities.task_specification import TaskSpecification +from eos.configuration.entities.task_spec import TaskSpecConfig from eos.configuration.exceptions import EosConfigurationError from eos.configuration.validation import validation_utils from eos.logging.batch_error_logger import batch_error, raise_batched_errors @@ -17,7 +15,7 @@ class TaskInputParameterValidator: Validates that the input parameters of a task conform to the task's specification. """ - def __init__(self, task: TaskConfig, task_spec: TaskSpecification): + def __init__(self, task: TaskConfig, task_spec: TaskSpecConfig): self._task_id = task.id self._input_parameters = task.parameters self._task_spec = task_spec @@ -69,8 +67,8 @@ def _validate_parameter_spec(self, parameter_name: str, parameter: Any) -> None: parameter_spec = copy.deepcopy(self._task_spec.input_parameters[parameter_name]) try: - parameter = self._convert_value_type(parameter, ParameterType(parameter_spec.type)) - except Exception: + parameter = self._convert_value_type(parameter, TaskParameterType(parameter_spec.type)) + except ValueError: batch_error( f"Parameter '{parameter_name}' in task '{self._task_id}' has incorrect type {type(parameter)}. " f"Expected type: '{parameter_spec.type}'.", @@ -78,11 +76,11 @@ def _validate_parameter_spec(self, parameter_name: str, parameter: Any) -> None: ) return - parameter_spec["value"] = parameter + parameter_spec.value = parameter try: - parameter_type = ParameterType(parameter_spec.type) - ParameterFactory.create_parameter(parameter_type, **parameter_spec) + parameter_type = TaskParameterType(parameter_spec.type) + TaskParameterFactory.create(parameter_type, **parameter_spec.model_dump()) except EosConfigurationError as e: batch_error( f"Parameter '{parameter_name}' in task '{self._task_id}' validation error: {e}", @@ -90,25 +88,23 @@ def _validate_parameter_spec(self, parameter_name: str, parameter: Any) -> None: ) @staticmethod - def _convert_value_type(value: Any, expected_type: ParameterType) -> Any: + def _convert_value_type(value: Any, expected_type: TaskParameterType) -> Any: result = None - if isinstance(value, expected_type.python_type()): + if isinstance(value, expected_type.python_type): result = value - elif isinstance(value, ListConfig | DictConfig): - value = OmegaConf.to_object(value) if result is None: conversion_map = { - ParameterType.integer: int, - ParameterType.decimal: float, - ParameterType.string: str, - ParameterType.choice: str, + TaskParameterType.INT: int, + TaskParameterType.FLOAT: float, + TaskParameterType.STR: str, + TaskParameterType.CHOICE: str, } if expected_type in conversion_map: result = conversion_map[expected_type](value) - elif expected_type == ParameterType.boolean: + elif expected_type == TaskParameterType.BOOL: if isinstance(value, bool): result = value elif isinstance(value, str): @@ -117,9 +113,9 @@ def _convert_value_type(value: Any, expected_type: ParameterType) -> Any: result = True elif v == "false": result = False - elif expected_type == ParameterType.list and isinstance(value, list): + elif expected_type == TaskParameterType.LIST and isinstance(value, list): result = list(value) - elif expected_type == ParameterType.dictionary and isinstance(value, dict): + elif expected_type == TaskParameterType.DICT and isinstance(value, dict): result = value if result is None: @@ -151,4 +147,4 @@ def _get_required_input_parameters(self) -> list[str]: """ Get all the required input parameters for the task. """ - return [param for param, spec in self._task_spec.input_parameters.items() if "value" not in spec] + return [param for param, spec in self._task_spec.input_parameters.items() if spec.value is None] diff --git a/eos/tasks/task_manager.py b/eos/tasks/task_manager.py index 611cba8..ca1f0f6 100644 --- a/eos/tasks/task_manager.py +++ b/eos/tasks/task_manager.py @@ -4,13 +4,11 @@ from typing import Any from eos.configuration.configuration_manager import ConfigurationManager -from eos.configuration.entities.task import TaskDeviceConfig -from eos.containers.entities.container import Container from eos.experiments.repositories.experiment_repository import ExperimentRepository from eos.logging.logger import log from eos.persistence.async_mongodb_interface import AsyncMongoDbInterface from eos.persistence.file_db_interface import FileDbInterface -from eos.tasks.entities.task import Task, TaskStatus, TaskInput, TaskOutput +from eos.tasks.entities.task import Task, TaskStatus, TaskOutput, TaskDefinition from eos.tasks.exceptions import EosTaskStateError, EosTaskExistsError from eos.tasks.repositories.task_repository import TaskRepository @@ -41,27 +39,16 @@ async def initialize(self, db_interface: AsyncMongoDbInterface) -> None: log.debug("Task manager initialized.") - async def create_task( - self, - experiment_id: str, - task_id: str, - task_type: str, - devices: list[TaskDeviceConfig], - parameters: dict[str, Any] | None = None, - containers: dict[str, Container] | None = None, - metadata: dict[str, Any] | None = None, - ) -> None: + async def create_task(self, task_definition: TaskDefinition) -> None: """ Create a new task instance for a specific task type that is associated with an experiment. - :param experiment_id: The id of the experiment. - :param task_id: The id of the task in the experiment task sequence. - :param task_type: The type of the task as defined in the configuration. - :param devices: The devices required for the task. - :param parameters: The input parameters for the task. - :param containers: The input containers for the task. - :param metadata: Additional metadata to be stored with the task. + :param task_definition: The task definition. """ + task_id = task_definition.id + experiment_id = task_definition.experiment_id + task_type = task_definition.type + if await self._tasks.exists(experiment_id=experiment_id, id=task_id): raise EosTaskExistsError(f"Cannot create task '{task_id}' as a task with that ID already exists.") @@ -69,16 +56,7 @@ async def create_task( if not task_spec: raise EosTaskStateError(f"Task type '{task_type}' does not exist.") - task_input = TaskInput(parameters=parameters or {}, containers=containers or {}) - - task = Task( - id=task_id, - type=task_type, - experiment_id=experiment_id, - devices=[TaskDeviceConfig(id=device.id, lab_id=device.lab_id) for device in devices], - input=task_input, - metadata=metadata or {}, - ) + task = Task.from_definition(task_definition) await self._tasks.create(task.model_dump()) async def delete_task(self, experiment_id: str, task_id: str) -> None: @@ -131,7 +109,7 @@ async def fail_task(self, experiment_id: str, task_id: str) -> None: async def cancel_task(self, experiment_id: str, task_id: str) -> None: """ Remove a task from the running tasks list and do not add it to the executed tasks list. Update the task status - to cancelled. + to be cancelled. """ await self._validate_task_exists(experiment_id, task_id) diff --git a/eos/tasks/task_validator.py b/eos/tasks/task_validator.py index 0e34b97..76f06b3 100644 --- a/eos/tasks/task_validator.py +++ b/eos/tasks/task_validator.py @@ -1,18 +1,18 @@ from eos.configuration.entities.task import TaskConfig -from eos.configuration.entities.task_specification import TaskSpecification -from eos.configuration.spec_registries.task_specification_registry import TaskSpecificationRegistry +from eos.configuration.entities.task_spec import TaskSpecConfig +from eos.configuration.spec_registries.task_spec_registry import TaskSpecRegistry from eos.tasks.task_input_parameter_validator import TaskInputParameterValidator class TaskValidator: def __init__(self): - self.task_spec_registry = TaskSpecificationRegistry() + self.task_spec_registry = TaskSpecRegistry() def validate(self, task_config: TaskConfig) -> None: - task_spec = self.task_spec_registry.get_spec_by_config(task_config) + task_spec = self.task_spec_registry.get_spec_by_type(task_config.type) self._validate_parameters(task_config, task_spec) @staticmethod - def _validate_parameters(task_config: TaskConfig, task_spec: TaskSpecification) -> None: + def _validate_parameters(task_config: TaskConfig, task_spec: TaskSpecConfig) -> None: validator = TaskInputParameterValidator(task_config, task_spec) validator.validate_input_parameters() diff --git a/eos/web_api/common/entities.py b/eos/web_api/common/entities.py index fe3328e..8129191 100644 --- a/eos/web_api/common/entities.py +++ b/eos/web_api/common/entities.py @@ -1,38 +1,14 @@ -from typing import Any - from pydantic import BaseModel -from eos.campaigns.entities.campaign import CampaignExecutionParameters -from eos.configuration.entities.task import TaskConfig -from eos.experiments.entities.experiment import ExperimentExecutionParameters - - -class SubmitTaskRequest(BaseModel): - task_config: TaskConfig - resource_allocation_priority: int = 1 - resource_allocation_timeout: int = 180 - class TaskTypesResponse(BaseModel): task_types: list[str] | str -class SubmitExperimentRequest(BaseModel): - experiment_id: str - experiment_type: str - experiment_execution_parameters: ExperimentExecutionParameters - dynamic_parameters: dict[str, dict[str, Any]] - metadata: dict[str, Any] = {} - - class ExperimentTypes(BaseModel): experiment_types: list[str] | str -class ExperimentLoadedStatusesResponse(BaseModel): - experiment_loaded_statuses: dict[str, bool] - - class ExperimentTypesResponse(BaseModel): experiment_types: list[str] @@ -41,15 +17,5 @@ class ExperimentDynamicParamsTemplateResponse(BaseModel): dynamic_params_template: str -class SubmitCampaignRequest(BaseModel): - campaign_id: str - experiment_type: str - campaign_execution_parameters: CampaignExecutionParameters - - class LabTypes(BaseModel): lab_types: list[str] | str - - -class LabLoadedStatusesResponse(BaseModel): - lab_loaded_statuses: dict[str, bool] diff --git a/eos/web_api/orchestrator/controllers/campaign_controller.py b/eos/web_api/orchestrator/controllers/campaign_controller.py index 20312d4..619bfb5 100644 --- a/eos/web_api/orchestrator/controllers/campaign_controller.py +++ b/eos/web_api/orchestrator/controllers/campaign_controller.py @@ -2,8 +2,8 @@ from litestar.handlers import post from litestar.status_codes import HTTP_200_OK, HTTP_404_NOT_FOUND, HTTP_201_CREATED +from eos.campaigns.entities.campaign import CampaignDefinition from eos.orchestration.orchestrator import Orchestrator -from eos.web_api.common.entities import SubmitCampaignRequest from eos.web_api.public.exception_handling import handle_exceptions @@ -13,7 +13,7 @@ class CampaignController(Controller): @get("/{campaign_id:str}") @handle_exceptions("Failed to get campaign") async def get_campaign(self, campaign_id: str, orchestrator: Orchestrator) -> Response: - campaign = await orchestrator.get_campaign(campaign_id) + campaign = await orchestrator.campaigns.get_campaign(campaign_id) if campaign is None: return Response(content={"error": "Campaign not found"}, status_code=HTTP_404_NOT_FOUND) @@ -22,12 +22,12 @@ async def get_campaign(self, campaign_id: str, orchestrator: Orchestrator) -> Re @post("/submit") @handle_exceptions("Failed to submit campaign") - async def submit_campaign(self, data: SubmitCampaignRequest, orchestrator: Orchestrator) -> Response: - await orchestrator.submit_campaign(data.campaign_id, data.experiment_type, data.campaign_execution_parameters) + async def submit_campaign(self, data: CampaignDefinition, orchestrator: Orchestrator) -> Response: + await orchestrator.campaigns.submit_campaign(data) return Response(content=None, status_code=HTTP_201_CREATED) @post("/{campaign_id:str}/cancel") @handle_exceptions("Failed to cancel campaign") async def cancel_campaign(self, campaign_id: str, orchestrator: Orchestrator) -> Response: - await orchestrator.cancel_campaign(campaign_id) + await orchestrator.campaigns.cancel_campaign(campaign_id) return Response(content=None, status_code=HTTP_200_OK) diff --git a/eos/web_api/orchestrator/controllers/experiment_controller.py b/eos/web_api/orchestrator/controllers/experiment_controller.py index b0cca83..085f87f 100644 --- a/eos/web_api/orchestrator/controllers/experiment_controller.py +++ b/eos/web_api/orchestrator/controllers/experiment_controller.py @@ -2,11 +2,10 @@ from litestar.handlers import post from litestar.status_codes import HTTP_200_OK, HTTP_201_CREATED, HTTP_404_NOT_FOUND +from eos.experiments.entities.experiment import ExperimentDefinition from eos.orchestration.orchestrator import Orchestrator from eos.web_api.common.entities import ( - SubmitExperimentRequest, ExperimentTypesResponse, - ExperimentLoadedStatusesResponse, ExperimentTypes, ) from eos.web_api.public.exception_handling import handle_exceptions @@ -17,7 +16,7 @@ class ExperimentController(Controller): @get("/{experiment_id:str}") async def get_experiment(self, experiment_id: str, orchestrator: Orchestrator) -> Response: - experiment = await orchestrator.get_experiment(experiment_id) + experiment = await orchestrator.experiments.get_experiment(experiment_id) if experiment is None: return Response(content={"error": "Experiment not found"}, status_code=HTTP_404_NOT_FOUND) @@ -26,50 +25,40 @@ async def get_experiment(self, experiment_id: str, orchestrator: Orchestrator) - @post("/submit") @handle_exceptions("Failed to submit experiment") - async def submit_experiment(self, data: SubmitExperimentRequest, orchestrator: Orchestrator) -> Response: - await orchestrator.submit_experiment( - data.experiment_id, - data.experiment_type, - data.experiment_execution_parameters, - data.dynamic_parameters, - data.metadata, - ) + async def submit_experiment(self, data: ExperimentDefinition, orchestrator: Orchestrator) -> Response: + await orchestrator.experiments.submit_experiment(data) return Response(content=None, status_code=HTTP_201_CREATED) @post("/{experiment_id:str}/cancel") @handle_exceptions("Failed to cancel experiment") async def cancel_experiment(self, experiment_id: str, orchestrator: Orchestrator) -> Response: - await orchestrator.cancel_experiment(experiment_id) + await orchestrator.experiments.cancel_experiment(experiment_id) return Response(content=None, status_code=HTTP_200_OK) @put("/update_loaded") @handle_exceptions("Failed to update loaded experiments") async def update_loaded_experiments(self, data: ExperimentTypes, orchestrator: Orchestrator) -> Response: - await orchestrator.update_loaded_experiments(set(data.experiment_types)) + await orchestrator.loading.update_loaded_experiments(set(data.experiment_types)) return Response(content=None, status_code=HTTP_200_OK) @put("/reload") @handle_exceptions("Failed to reload experiments") async def reload_experiments(self, data: ExperimentTypes, orchestrator: Orchestrator) -> Response: - await orchestrator.reload_experiments(set(data.experiment_types)) + await orchestrator.loading.reload_experiments(set(data.experiment_types)) return Response(content=None, status_code=HTTP_200_OK) @get("/types") @handle_exceptions("Failed to get experiment types") async def get_experiment_types(self, orchestrator: Orchestrator) -> ExperimentTypesResponse: - experiment_types = await orchestrator.get_experiment_types() + experiment_types = await orchestrator.experiments.get_experiment_types() return ExperimentTypesResponse(experiment_types=experiment_types) - @get("/loaded_statuses") - @handle_exceptions("Failed to get experiment loaded statuses") - async def get_experiment_loaded_statuses(self, orchestrator: Orchestrator) -> ExperimentLoadedStatusesResponse: - experiment_loaded_statuses = await orchestrator.get_experiment_loaded_statuses() - return ExperimentLoadedStatusesResponse(experiment_loaded_statuses=experiment_loaded_statuses) + @get("/loaded") + @handle_exceptions("Failed to get loaded experiments") + async def get_loaded_experiments(self, orchestrator: Orchestrator) -> dict: + return await orchestrator.loading.get_loaded() @get("/{experiment_type:str}/dynamic_params_template") @handle_exceptions("Failed to get dynamic parameters template") - async def get_experiment_dynamic_params_template( - self, experiment_type: str, orchestrator: Orchestrator - ) -> Response: - dynamic_params_template = await orchestrator.get_experiment_dynamic_params_template(experiment_type) - return Response(content=dynamic_params_template, status_code=HTTP_200_OK) + async def get_experiment_dynamic_params_template(self, experiment_type: str, orchestrator: Orchestrator) -> dict: + return await orchestrator.experiments.get_experiment_dynamic_params_template(experiment_type) diff --git a/eos/web_api/orchestrator/controllers/file_controller.py b/eos/web_api/orchestrator/controllers/file_controller.py index 48fc445..8cd118e 100644 --- a/eos/web_api/orchestrator/controllers/file_controller.py +++ b/eos/web_api/orchestrator/controllers/file_controller.py @@ -23,7 +23,7 @@ async def download_task_output_file( ) -> Stream: async def file_stream() -> AsyncIterable: try: - async for chunk in orchestrator.stream_task_output_file( + async for chunk in orchestrator.results.download_task_output_file( experiment_id, task_id, file_name, chunk_size=_CHUNK_SIZE ): yield chunk @@ -39,7 +39,7 @@ async def download_task_output_files_zipped( ) -> Stream: async def zip_stream() -> AsyncIterable: try: - file_list = await orchestrator.list_task_output_files(experiment_id, task_id) + file_list = await orchestrator.results.list_task_output_files(experiment_id, task_id) buffer = io.BytesIO() with zipfile.ZipFile(buffer, "w", zipfile.ZIP_DEFLATED) as zip_file: @@ -50,7 +50,9 @@ async def zip_stream() -> AsyncIterable: zip_info.compress_type = zipfile.ZIP_DEFLATED with zip_file.open(zip_info, mode="w") as file_in_zip: - async for chunk in orchestrator.stream_task_output_file(experiment_id, task_id, file_name): + async for chunk in orchestrator.results.download_task_output_file( + experiment_id, task_id, file_name + ): file_in_zip.write(chunk) if buffer.tell() > _CHUNK_SIZE: diff --git a/eos/web_api/orchestrator/controllers/lab_controller.py b/eos/web_api/orchestrator/controllers/lab_controller.py index a1bc80a..a25c038 100644 --- a/eos/web_api/orchestrator/controllers/lab_controller.py +++ b/eos/web_api/orchestrator/controllers/lab_controller.py @@ -1,9 +1,8 @@ from litestar import Controller, put, get, Response from litestar.status_codes import HTTP_200_OK -from omegaconf import OmegaConf from eos.orchestration.orchestrator import Orchestrator -from eos.web_api.common.entities import LabLoadedStatusesResponse, LabTypes +from eos.web_api.common.entities import LabTypes from eos.web_api.public.exception_handling import handle_exceptions @@ -15,29 +14,29 @@ class LabController(Controller): async def get_lab_devices( self, lab_types: list[str] | None, task_type: str | None, orchestrator: Orchestrator ) -> Response: - lab_devices = await orchestrator.get_lab_devices(lab_types, task_type) + lab_devices = await orchestrator.labs.get_lab_devices(lab_types, task_type) # Convert LabDeviceConfig objects to plain dictionaries dict_lab_devices = {} for lab_type, devices in lab_devices.items(): - dict_lab_devices[lab_type] = {name: OmegaConf.to_object(device) for name, device in devices.items()} + dict_lab_devices[lab_type] = {name: device.model_dump() for name, device in devices.items()} return Response(content=dict_lab_devices, status_code=HTTP_200_OK) @put("/update_loaded") @handle_exceptions("Failed to update loaded labs") async def update_loaded_labs(self, data: LabTypes, orchestrator: Orchestrator) -> Response: - await orchestrator.update_loaded_labs(set(data.lab_types)) + await orchestrator.loading.update_loaded_labs(set(data.lab_types)) return Response(content=None, status_code=HTTP_200_OK) @put("/reload") @handle_exceptions("Failed to reload labs") async def reload_labs(self, data: LabTypes, orchestrator: Orchestrator) -> Response: - await orchestrator.reload_labs(set(data.lab_types)) + await orchestrator.loading.reload_labs(set(data.lab_types)) return Response(content=None, status_code=HTTP_200_OK) - @get("/loaded_statuses") - @handle_exceptions("Failed to get lab loaded statuses") - async def get_lab_loaded_statuses(self, orchestrator: Orchestrator) -> LabLoadedStatusesResponse: - lab_loaded_statuses = await orchestrator.get_lab_loaded_statuses() - return LabLoadedStatusesResponse(lab_loaded_statuses=lab_loaded_statuses) + @get("/loaded") + @handle_exceptions("Failed to get loaded labs") + async def get_loaded_labs(self, orchestrator: Orchestrator) -> Response: + lab_loaded_statuses = await orchestrator.loading.list_loaded_labs() + return Response(content=lab_loaded_statuses, status_code=HTTP_200_OK) diff --git a/eos/web_api/orchestrator/controllers/task_controller.py b/eos/web_api/orchestrator/controllers/task_controller.py index 0e5f50d..45eb6cf 100644 --- a/eos/web_api/orchestrator/controllers/task_controller.py +++ b/eos/web_api/orchestrator/controllers/task_controller.py @@ -1,10 +1,10 @@ from litestar import Controller, Response, get from litestar.handlers import post from litestar.status_codes import HTTP_200_OK, HTTP_201_CREATED -from omegaconf import OmegaConf from eos.orchestration.orchestrator import Orchestrator -from eos.web_api.common.entities import SubmitTaskRequest, TaskTypesResponse +from eos.tasks.entities.task import TaskDefinition +from eos.web_api.common.entities import TaskTypesResponse from eos.web_api.public.exception_handling import handle_exceptions @@ -14,32 +14,29 @@ class TaskController(Controller): @get("/{experiment_id:str}/{task_id:str}") @handle_exceptions("Failed to get task") async def get_task(self, experiment_id: str, task_id: str, orchestrator: Orchestrator) -> Response: - task = await orchestrator.get_task(experiment_id, task_id) + task = await orchestrator.tasks.get_task(experiment_id, task_id) return Response(content=task.model_dump_json(), status_code=HTTP_200_OK) @post("/submit") @handle_exceptions("Failed to submit task") - async def submit_task(self, data: SubmitTaskRequest, orchestrator: Orchestrator) -> Response: - orchestrator.submit_task( - data.task_config, data.resource_allocation_priority, data.resource_allocation_timeout - ) + async def submit_task(self, data: TaskDefinition, orchestrator: Orchestrator) -> Response: + orchestrator.tasks.submit_task(data) return Response(content=None, status_code=HTTP_201_CREATED) @post("/{task_id:str}/cancel") @handle_exceptions("Failed to cancel task") async def cancel_task(self, task_id: str, orchestrator: Orchestrator) -> Response: - await orchestrator.cancel_task(task_id) + await orchestrator.tasks.cancel_task(task_id) return Response(content=None, status_code=HTTP_200_OK) @get("/types") @handle_exceptions("Failed to get task types") async def get_task_types(self, orchestrator: Orchestrator) -> TaskTypesResponse: - task_types = await orchestrator.get_task_types() + task_types = await orchestrator.tasks.get_task_types() return TaskTypesResponse(task_types=task_types) @get("/{task_type:str}/spec") @handle_exceptions("Failed to get task spec") async def get_task_spec(self, task_type: str, orchestrator: Orchestrator) -> Response: - task_spec = await orchestrator.get_task_spec(task_type) - task_spec = OmegaConf.to_object(task_spec) + task_spec = await orchestrator.tasks.get_task_spec(task_type) return Response(content=task_spec, status_code=HTTP_200_OK) diff --git a/eos/web_api/public/controllers/campaign_controller.py b/eos/web_api/public/controllers/campaign_controller.py index c1bf2ab..7bf1b02 100644 --- a/eos/web_api/public/controllers/campaign_controller.py +++ b/eos/web_api/public/controllers/campaign_controller.py @@ -4,7 +4,7 @@ from litestar.handlers import post from litestar.status_codes import HTTP_200_OK, HTTP_404_NOT_FOUND, HTTP_201_CREATED -from eos.web_api.common.entities import SubmitCampaignRequest +from eos.campaigns.entities.campaign import CampaignDefinition from eos.web_api.public.exception_handling import handle_exceptions @@ -26,7 +26,7 @@ async def get_campaign(self, campaign_id: str, state: State) -> Response: @post("/submit") @handle_exceptions("Failed to submit campaign") - async def submit_campaign(self, data: SubmitCampaignRequest, state: State) -> Response: + async def submit_campaign(self, data: CampaignDefinition, state: State) -> Response: orchestrator_client = state.orchestrator_client async with orchestrator_client.post("/api/campaigns/submit", json=data.model_dump()) as response: if response.status == HTTP_201_CREATED: diff --git a/eos/web_api/public/controllers/experiment_controller.py b/eos/web_api/public/controllers/experiment_controller.py index 3049040..be242f1 100644 --- a/eos/web_api/public/controllers/experiment_controller.py +++ b/eos/web_api/public/controllers/experiment_controller.py @@ -4,10 +4,9 @@ from litestar.handlers import post from litestar.status_codes import HTTP_200_OK, HTTP_201_CREATED, HTTP_404_NOT_FOUND +from eos.experiments.entities.experiment import ExperimentDefinition from eos.web_api.common.entities import ( - SubmitExperimentRequest, ExperimentTypesResponse, - ExperimentLoadedStatusesResponse, ExperimentTypes, ) from eos.web_api.public.exception_handling import handle_exceptions @@ -30,7 +29,7 @@ async def get_experiment(self, experiment_id: str, state: State) -> Response: @post("/submit") @handle_exceptions("Failed to submit experiment") - async def submit_experiment(self, data: SubmitExperimentRequest, state: State) -> Response: + async def submit_experiment(self, data: ExperimentDefinition, state: State) -> Response: orchestrator_client = state.orchestrator_client async with orchestrator_client.post("/api/experiments/submit", json=data.model_dump()) as response: if response.status == HTTP_201_CREATED: @@ -94,23 +93,22 @@ async def get_experiment_types(self, state: State) -> ExperimentTypesResponse: raise HTTPException(status_code=response.status, detail="Error fetching experiment types") - @get("/loaded_statuses") - @handle_exceptions("Failed to get experiment loaded statuses") - async def get_experiment_loaded_statuses(self, state: State) -> ExperimentLoadedStatusesResponse: + @get("/loaded") + @handle_exceptions("Failed to get loaded experiments") + async def get_loaded_experiments(self, state: State) -> dict: orchestrator_client = state.orchestrator_client - async with orchestrator_client.get("/api/experiments/loaded_statuses") as response: + async with orchestrator_client.get("/api/experiments/loaded") as response: if response.status == HTTP_200_OK: - return ExperimentLoadedStatusesResponse(**await response.json()) + return await response.json() raise HTTPException(status_code=response.status, detail="Error fetching experiment loaded statuses") @get("/{experiment_type:str}/dynamic_params_template") @handle_exceptions("Failed to get dynamic parameters template") - async def get_experiment_dynamic_params_template(self, experiment_type: str, state: State) -> Response: + async def get_experiment_dynamic_params_template(self, experiment_type: str, state: State) -> dict: orchestrator_client = state.orchestrator_client async with orchestrator_client.get(f"/api/experiments/{experiment_type}/dynamic_params_template") as response: if response.status == HTTP_200_OK: - dynamic_params_template = await response.json() - return Response(content=dynamic_params_template, status_code=HTTP_200_OK) + return await response.json() raise HTTPException(status_code=response.status, detail="Error fetching dynamic parameters template") diff --git a/eos/web_api/public/controllers/lab_controller.py b/eos/web_api/public/controllers/lab_controller.py index ce3cb22..fea8b36 100644 --- a/eos/web_api/public/controllers/lab_controller.py +++ b/eos/web_api/public/controllers/lab_controller.py @@ -3,7 +3,7 @@ from litestar.exceptions import HTTPException from litestar.status_codes import HTTP_200_OK -from eos.web_api.common.entities import LabLoadedStatusesResponse, LabTypes +from eos.web_api.common.entities import LabTypes from eos.web_api.public.exception_handling import handle_exceptions @@ -62,12 +62,13 @@ async def reload_labs(self, data: LabTypes, state: State) -> Response: raise HTTPException(status_code=response.status, detail="Error reloading labs") - @get("/loaded_statuses") - @handle_exceptions("Failed to get lab loaded statuses") - async def get_lab_loaded_statuses(self, state: State) -> LabLoadedStatusesResponse: + @get("/loaded") + @handle_exceptions("Failed to check loaded labs") + async def get_loaded_labs(self, state: State) -> Response: orchestrator_client = state.orchestrator_client - async with orchestrator_client.get("/api/labs/loaded_statuses") as response: + async with orchestrator_client.get("/api/labs/loaded") as response: if response.status == HTTP_200_OK: - return LabLoadedStatusesResponse(**await response.json()) + loaded_labs = await response.json() + return Response(content=loaded_labs, status_code=HTTP_200_OK) raise HTTPException(status_code=response.status, detail="Error fetching lab loaded statuses") diff --git a/eos/web_api/public/controllers/task_controller.py b/eos/web_api/public/controllers/task_controller.py index 8fab1e3..90bf4ea 100644 --- a/eos/web_api/public/controllers/task_controller.py +++ b/eos/web_api/public/controllers/task_controller.py @@ -4,7 +4,8 @@ from litestar.handlers import post from litestar.status_codes import HTTP_200_OK, HTTP_201_CREATED, HTTP_404_NOT_FOUND -from eos.web_api.common.entities import SubmitTaskRequest, TaskTypesResponse +from eos.tasks.entities.task import TaskDefinition +from eos.web_api.common.entities import TaskTypesResponse from eos.web_api.public.exception_handling import handle_exceptions @@ -26,7 +27,7 @@ async def get_task(self, experiment_id: str, task_id: str, state: State) -> Resp @post("/submit") @handle_exceptions("Failed to submit task") - async def submit_task(self, data: SubmitTaskRequest, state: State) -> Response: + async def submit_task(self, data: TaskDefinition, state: State) -> Response: orchestrator_client = state.orchestrator_client async with orchestrator_client.post("/api/tasks/submit", json=data.model_dump()) as response: if response.status == HTTP_201_CREATED: diff --git a/pdm.lock b/pdm.lock index 7825cea..8a57d61 100644 --- a/pdm.lock +++ b/pdm.lock @@ -5,7 +5,7 @@ groups = ["default", "dev", "docs"] strategy = ["inherit_metadata"] lock_version = "4.5.0" -content_hash = "sha256:f11de7d602ce06bb8d7d0c62e185925068eacb0e790be5334c7ddfda9ceefa16" +content_hash = "sha256:fcc243850de3127d32dc8a1860ed4e0f7e6424ec19abba7d4d18bf357e09888b" [[metadata.targets]] requires_python = ">=3.10" @@ -152,18 +152,6 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] -[[package]] -name = "antlr4-python3-runtime" -version = "4.9.3" -summary = "ANTLR 4.9.3 runtime for Python 3.7" -groups = ["default"] -dependencies = [ - "typing; python_version < \"3.5\"", -] -files = [ - {file = "antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b"}, -] - [[package]] name = "anyio" version = "4.4.0" @@ -310,7 +298,7 @@ files = [ [[package]] name = "bofire" -version = "0.0.13" +version = "0.0.15" requires_python = ">=3.9.0" summary = "" groups = ["default"] @@ -322,31 +310,32 @@ dependencies = [ "typing-extensions", ] files = [ - {file = "bofire-0.0.13-py3-none-any.whl", hash = "sha256:a1b47732e70770a591d74bb24cdcd1ad1048b60e624df52a1529c6c54f8822c8"}, - {file = "bofire-0.0.13.tar.gz", hash = "sha256:d1d83e781c63992c1fc9157587189251b4c06f6c9196f0e000e1cd329ca0fe6a"}, + {file = "bofire-0.0.15-py3-none-any.whl", hash = "sha256:b142059531f9458bdec6a293ad8e82f29e3ae5f778e069843c27192ac2391e1d"}, + {file = "bofire-0.0.15.tar.gz", hash = "sha256:15bf807dd78e93d694933cbf0ed4ddfa1bffd365febd45429f63b7e2ccbd1fcb"}, ] [[package]] name = "bofire" -version = "0.0.13" +version = "0.0.15" extras = ["optimization"] requires_python = ">=3.9.0" summary = "" groups = ["default"] dependencies = [ - "bofire==0.0.13", + "bofire==0.0.15", "botorch>=0.10.0", "cloudpickle>=2.0.0", "cvxpy[clarabel]", "formulaic>=1.0.1", "multiprocess", + "numpy", "plotly", "scikit-learn>=1.0.0", "sympy>=1.12", ] files = [ - {file = "bofire-0.0.13-py3-none-any.whl", hash = "sha256:a1b47732e70770a591d74bb24cdcd1ad1048b60e624df52a1529c6c54f8822c8"}, - {file = "bofire-0.0.13.tar.gz", hash = "sha256:d1d83e781c63992c1fc9157587189251b4c06f6c9196f0e000e1cd329ca0fe6a"}, + {file = "bofire-0.0.15-py3-none-any.whl", hash = "sha256:b142059531f9458bdec6a293ad8e82f29e3ae5f778e069843c27192ac2391e1d"}, + {file = "bofire-0.0.15.tar.gz", hash = "sha256:15bf807dd78e93d694933cbf0ed4ddfa1bffd365febd45429f63b7e2ccbd1fcb"}, ] [[package]] @@ -1522,7 +1511,7 @@ files = [ [[package]] name = "minio" -version = "7.2.8" +version = "7.2.10" requires_python = ">3.8" summary = "MinIO Python SDK for Amazon S3 Compatible Cloud Storage" groups = ["default"] @@ -1534,8 +1523,8 @@ dependencies = [ "urllib3", ] files = [ - {file = "minio-7.2.8-py3-none-any.whl", hash = "sha256:aa3b485788b63b12406a5798465d12a57e4be2ac2a58a8380959b6b748e64ddd"}, - {file = "minio-7.2.8.tar.gz", hash = "sha256:f8af2dafc22ebe1aef3ac181b8e217037011c430aa6da276ed627e55aaf7c815"}, + {file = "minio-7.2.10-py3-none-any.whl", hash = "sha256:5961c58192b1d70d3a2a362064b8e027b8232688998a6d1251dadbb02ab57a7d"}, + {file = "minio-7.2.10.tar.gz", hash = "sha256:418c31ac79346a580df04a0e14db1becbc548a6e7cca61f9bc4ef3bcd336c449"}, ] [[package]] @@ -1733,13 +1722,13 @@ files = [ [[package]] name = "networkx" -version = "3.4.1" +version = "3.4.2" requires_python = ">=3.10" summary = "Python package for creating and manipulating graphs and networks" groups = ["default"] files = [ - {file = "networkx-3.4.1-py3-none-any.whl", hash = "sha256:e30a87b48c9a6a7cc220e732bffefaee585bdb166d13377734446ce1a0620eed"}, - {file = "networkx-3.4.1.tar.gz", hash = "sha256:f9df45e85b78f5bd010993e897b4f1fdb242c11e015b101bd951e5c0e29982d8"}, + {file = "networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f"}, + {file = "networkx-3.4.2.tar.gz", hash = "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1"}, ] [[package]] @@ -1932,22 +1921,6 @@ files = [ {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, ] -[[package]] -name = "omegaconf" -version = "2.3.0" -requires_python = ">=3.6" -summary = "A flexible configuration library" -groups = ["default"] -dependencies = [ - "PyYAML>=5.1.0", - "antlr4-python3-runtime==4.9.*", - "dataclasses; python_version == \"3.6\"", -] -files = [ - {file = "omegaconf-2.3.0-py3-none-any.whl", hash = "sha256:7b4df175cdb08ba400f45cae3bdcae7ba8365db4d165fc65fd04b050ab63b46b"}, - {file = "omegaconf-2.3.0.tar.gz", hash = "sha256:d5d4b6d29955cc50ad50c46dc269bcd92c6e00f5f90d23ab5fee7bfca4ba4cc7"}, -] - [[package]] name = "opencensus" version = "0.11.4" @@ -2619,7 +2592,7 @@ files = [ [[package]] name = "ray" -version = "2.37.0" +version = "2.39.0" requires_python = ">=3.9" summary = "Ray provides a simple, universal API for building distributed applications." groups = ["default"] @@ -2636,26 +2609,26 @@ dependencies = [ "requests", ] files = [ - {file = "ray-2.37.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:96366285038fe0c47e975ffd64eb891f70fb863a80be91c0be64f2ab0cf16d9c"}, - {file = "ray-2.37.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:31c55de41b7e1899a62f2dd6a693ffca0a4cb52633aa66617e3816d48b70aac3"}, - {file = "ray-2.37.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:aee7ff189fd52530d020b13c5e7e6da55e65456193a349d39635a72981e521db"}, - {file = "ray-2.37.0-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:29932441e68ab7dad35b276c763670bf42ebf721cddc4f4de8200bd92ac05c58"}, - {file = "ray-2.37.0-cp310-cp310-win_amd64.whl", hash = "sha256:8a96139143584558507b7bca05581962d92ff86fdd0c58210ed53adc7340ec98"}, - {file = "ray-2.37.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:fa642e9b34e88c6a7edb17b291201351d44f063e04ba9f1e83e42aaf492fc14a"}, - {file = "ray-2.37.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c53ee350a009bab6b811254f8407387812de9a290269e32dbf7c3f0dce6c93c9"}, - {file = "ray-2.37.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:60298e199d9938d3be7418e0645aae312f1283e31123991053d36d0ff1e4ec43"}, - {file = "ray-2.37.0-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:b420279ca14f02cc27fc592ff1f28da9aa08b962316bf65ddf370db877082e91"}, - {file = "ray-2.37.0-cp311-cp311-win_amd64.whl", hash = "sha256:7faff20ea7a06612d3cd860a61d2736aa9f82d0d2bcef0917717ced67c8b51c5"}, - {file = "ray-2.37.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:860f3d45438c3daad30f034f107e3fed05a710c7251e10714f942be598715bd2"}, - {file = "ray-2.37.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0b8c23ced4186040dee37e982227e3b1296e2fcbd4c520e4399e5d99ed3c641d"}, - {file = "ray-2.37.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:75cd9a1f6f332ac00d77154b24bd38f4b46a4e600cd02a2440e69b918273b475"}, - {file = "ray-2.37.0-cp312-cp312-manylinux2014_x86_64.whl", hash = "sha256:0268c7bc2e8bb6ef9bb8969299deb5857bf672bfcb59da95db7495a8a502f8ba"}, - {file = "ray-2.37.0-cp312-cp312-win_amd64.whl", hash = "sha256:4132f79902160c650eaffe1ed1265e5b88d461ff5f3a777a16a750beeed7de1e"}, + {file = "ray-2.39.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:13d62cead910f433817ca5b41eda75d9c24e81a6b727e0d4e9c5817da86eca5b"}, + {file = "ray-2.39.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74219fade4acaf722d34a2630008220a2a5b2ba856e874cd5a8c24ab2f2b2412"}, + {file = "ray-2.39.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:54ed235b4542ad6d0e317988dc4feaf46af99902f3dfd2097600e0294751bf88"}, + {file = "ray-2.39.0-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:6298fb981cd0fa8607f1917deb27925ab8add48c60ba5bd0f6cf40d4cc5dace4"}, + {file = "ray-2.39.0-cp310-cp310-win_amd64.whl", hash = "sha256:c9d1a26fa3c4d32555c483fab57f54c4ba017f7552732fe9841396aaa24ee6ea"}, + {file = "ray-2.39.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:5547f2e6cf3b5d5aaea8aabea2d223a65c9566db198349c0aac668f454710f1a"}, + {file = "ray-2.39.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f8a83c2b7719386b3f8d6e3120aae49d9aa4cf49050acaee059b45df92eb281"}, + {file = "ray-2.39.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:413488eb2f8bfced8ecc269b120321f33106cbe412a69c3e23ce20c6d5b6f702"}, + {file = "ray-2.39.0-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:21aee127ae1a9cf6193001ab41d2551bcc81331ba3b7196d000f16d10f15c705"}, + {file = "ray-2.39.0-cp311-cp311-win_amd64.whl", hash = "sha256:fdcb7ad51883d194f7b49f23533d29b3c96d78034f829b6cde1e24b6783dff9d"}, + {file = "ray-2.39.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:77fbcf0002cfbb673b2832e273ee8a834358a2a2bff77e2ff5c97924fcd2b389"}, + {file = "ray-2.39.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a10cfca3a2f05d285ba1ab3cdd3ce43ec2934b05eb91516a9766bcfc4c070425"}, + {file = "ray-2.39.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:f8d01550f718a65e0be48da578fa2a3f2e1be85a5453d4b98c3576e1cfaab01b"}, + {file = "ray-2.39.0-cp312-cp312-manylinux2014_x86_64.whl", hash = "sha256:016930e6ba74b91b40117a64b24f7bfff48a6a780f23d2b064a7a3f43bc4e1a2"}, + {file = "ray-2.39.0-cp312-cp312-win_amd64.whl", hash = "sha256:4893cc7fd8b3c48c68c3d90bc5fe2023ee2732f91e9664ee79e8272b18ddb170"}, ] [[package]] name = "ray" -version = "2.37.0" +version = "2.39.0" extras = ["default"] requires_python = ">=3.9" summary = "Ray provides a simple, universal API for building distributed applications." @@ -2671,27 +2644,27 @@ dependencies = [ "prometheus-client>=0.7.1", "py-spy>=0.2.0", "pydantic!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,<3", - "ray==2.37.0", + "ray==2.39.0", "requests", "smart-open", "virtualenv!=20.21.1,>=20.0.24", ] files = [ - {file = "ray-2.37.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:96366285038fe0c47e975ffd64eb891f70fb863a80be91c0be64f2ab0cf16d9c"}, - {file = "ray-2.37.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:31c55de41b7e1899a62f2dd6a693ffca0a4cb52633aa66617e3816d48b70aac3"}, - {file = "ray-2.37.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:aee7ff189fd52530d020b13c5e7e6da55e65456193a349d39635a72981e521db"}, - {file = "ray-2.37.0-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:29932441e68ab7dad35b276c763670bf42ebf721cddc4f4de8200bd92ac05c58"}, - {file = "ray-2.37.0-cp310-cp310-win_amd64.whl", hash = "sha256:8a96139143584558507b7bca05581962d92ff86fdd0c58210ed53adc7340ec98"}, - {file = "ray-2.37.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:fa642e9b34e88c6a7edb17b291201351d44f063e04ba9f1e83e42aaf492fc14a"}, - {file = "ray-2.37.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c53ee350a009bab6b811254f8407387812de9a290269e32dbf7c3f0dce6c93c9"}, - {file = "ray-2.37.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:60298e199d9938d3be7418e0645aae312f1283e31123991053d36d0ff1e4ec43"}, - {file = "ray-2.37.0-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:b420279ca14f02cc27fc592ff1f28da9aa08b962316bf65ddf370db877082e91"}, - {file = "ray-2.37.0-cp311-cp311-win_amd64.whl", hash = "sha256:7faff20ea7a06612d3cd860a61d2736aa9f82d0d2bcef0917717ced67c8b51c5"}, - {file = "ray-2.37.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:860f3d45438c3daad30f034f107e3fed05a710c7251e10714f942be598715bd2"}, - {file = "ray-2.37.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0b8c23ced4186040dee37e982227e3b1296e2fcbd4c520e4399e5d99ed3c641d"}, - {file = "ray-2.37.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:75cd9a1f6f332ac00d77154b24bd38f4b46a4e600cd02a2440e69b918273b475"}, - {file = "ray-2.37.0-cp312-cp312-manylinux2014_x86_64.whl", hash = "sha256:0268c7bc2e8bb6ef9bb8969299deb5857bf672bfcb59da95db7495a8a502f8ba"}, - {file = "ray-2.37.0-cp312-cp312-win_amd64.whl", hash = "sha256:4132f79902160c650eaffe1ed1265e5b88d461ff5f3a777a16a750beeed7de1e"}, + {file = "ray-2.39.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:13d62cead910f433817ca5b41eda75d9c24e81a6b727e0d4e9c5817da86eca5b"}, + {file = "ray-2.39.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74219fade4acaf722d34a2630008220a2a5b2ba856e874cd5a8c24ab2f2b2412"}, + {file = "ray-2.39.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:54ed235b4542ad6d0e317988dc4feaf46af99902f3dfd2097600e0294751bf88"}, + {file = "ray-2.39.0-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:6298fb981cd0fa8607f1917deb27925ab8add48c60ba5bd0f6cf40d4cc5dace4"}, + {file = "ray-2.39.0-cp310-cp310-win_amd64.whl", hash = "sha256:c9d1a26fa3c4d32555c483fab57f54c4ba017f7552732fe9841396aaa24ee6ea"}, + {file = "ray-2.39.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:5547f2e6cf3b5d5aaea8aabea2d223a65c9566db198349c0aac668f454710f1a"}, + {file = "ray-2.39.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f8a83c2b7719386b3f8d6e3120aae49d9aa4cf49050acaee059b45df92eb281"}, + {file = "ray-2.39.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:413488eb2f8bfced8ecc269b120321f33106cbe412a69c3e23ce20c6d5b6f702"}, + {file = "ray-2.39.0-cp311-cp311-manylinux2014_x86_64.whl", hash = "sha256:21aee127ae1a9cf6193001ab41d2551bcc81331ba3b7196d000f16d10f15c705"}, + {file = "ray-2.39.0-cp311-cp311-win_amd64.whl", hash = "sha256:fdcb7ad51883d194f7b49f23533d29b3c96d78034f829b6cde1e24b6783dff9d"}, + {file = "ray-2.39.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:77fbcf0002cfbb673b2832e273ee8a834358a2a2bff77e2ff5c97924fcd2b389"}, + {file = "ray-2.39.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a10cfca3a2f05d285ba1ab3cdd3ce43ec2934b05eb91516a9766bcfc4c070425"}, + {file = "ray-2.39.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:f8d01550f718a65e0be48da578fa2a3f2e1be85a5453d4b98c3576e1cfaab01b"}, + {file = "ray-2.39.0-cp312-cp312-manylinux2014_x86_64.whl", hash = "sha256:016930e6ba74b91b40117a64b24f7bfff48a6a780f23d2b064a7a3f43bc4e1a2"}, + {file = "ray-2.39.0-cp312-cp312-win_amd64.whl", hash = "sha256:4893cc7fd8b3c48c68c3d90bc5fe2023ee2732f91e9664ee79e8272b18ddb170"}, ] [[package]] @@ -2728,18 +2701,18 @@ files = [ [[package]] name = "rich" -version = "13.8.1" -requires_python = ">=3.7.0" +version = "13.9.4" +requires_python = ">=3.8.0" summary = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" groups = ["default"] dependencies = [ "markdown-it-py>=2.2.0", "pygments<3.0.0,>=2.13.0", - "typing-extensions<5.0,>=4.0.0; python_version < \"3.9\"", + "typing-extensions<5.0,>=4.0.0; python_version < \"3.11\"", ] files = [ - {file = "rich-13.8.1-py3-none-any.whl", hash = "sha256:1760a3c0848469b97b558fc61c85233e3dafb69c7a071b4d60c38099d3cd4c06"}, - {file = "rich-13.8.1.tar.gz", hash = "sha256:8260cda28e3db6bf04d2d1ef4dbc03ba80a824c88b0e7668a0f23126a424844a"}, + {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, + {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, ] [[package]] @@ -3374,7 +3347,7 @@ files = [ [[package]] name = "typer" -version = "0.12.5" +version = "0.13.0" requires_python = ">=3.7" summary = "Typer, build great CLIs. Easy to code. Based on Python type hints." groups = ["default"] @@ -3385,8 +3358,8 @@ dependencies = [ "typing-extensions>=3.7.4.3", ] files = [ - {file = "typer-0.12.5-py3-none-any.whl", hash = "sha256:62fe4e471711b147e3365034133904df3e235698399bc4de2b36c8579298d52b"}, - {file = "typer-0.12.5.tar.gz", hash = "sha256:f592f089bedcc8ec1b974125d64851029c3b1af145f04aca64d69410f0c9b722"}, + {file = "typer-0.13.0-py3-none-any.whl", hash = "sha256:d85fe0b777b2517cc99c8055ed735452f2659cd45e451507c76f48ce5c1d00e2"}, + {file = "typer-0.13.0.tar.gz", hash = "sha256:f1c7198347939361eec90139ffa0fd8b3df3a2259d5852a0f7400e476d95985c"}, ] [[package]] diff --git a/pyproject.toml b/pyproject.toml index 11fedfa..463e389 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "eos" -version = "0.4.0" +version = "0.6.0" description = "The Experiment Orchestration System (EOS) is a comprehensive software framework and runtime for laboratory automation." keywords = ["automation", "science", "lab", "experiment", "orchestration", "distributed", "infrastructure"] authors = [ @@ -22,21 +22,20 @@ classifiers = [ readme = "README.md" requires-python = ">=3.10" dependencies = [ - "ray[default]~=2.37.0", - "typer~=0.12.5", - "rich~=13.8.1", - "omegaconf~=2.3.0", + "ray[default]~=2.39.0", + "typer~=0.13.0", + "rich~=13.9.4", "jinja2~=3.1.4", "PyYAML~=6.0.2", - "networkx~=3.4.1", + "networkx~=3.4.2", "pymongo~=4.9.2", "motor~=3.6.0", "pydantic~=2.9.2", - "bofire[optimization]~=0.0.13", + "bofire[optimization]~=0.0.15", "pandas~=2.2.3", "numpy~=1.26.2", "litestar[standard]~=2.12.1", - "minio~=7.2.8", + "minio~=7.2.10", ] [project.optional-dependencies] @@ -136,7 +135,7 @@ lint.select = [ "W", # pycodestyle - warning "YTT", # flake8-2020 ] -lint.ignore = ["I001", "ANN001", "ANN002", "ANN003", "ANN101", "ANN204", "ANN401"] +lint.ignore = ["I001", "ANN001", "ANN002", "ANN003", "ANN101", "ANN102", "ANN204", "ANN401", "PLR0913", "N805"] [tool.ruff.lint.per-file-ignores] "tests/**/*.*" = [ diff --git a/tests/campaigns/__init__.py b/tests/campaigns/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_campaign_executor.py b/tests/campaigns/test_campaign_executor.py similarity index 82% rename from tests/test_campaign_executor.py rename to tests/campaigns/test_campaign_executor.py index 5e2be14..ec13fb8 100644 --- a/tests/test_campaign_executor.py +++ b/tests/campaigns/test_campaign_executor.py @@ -1,6 +1,7 @@ import asyncio -from eos.campaigns.entities.campaign import CampaignStatus +from eos.campaigns.campaign_executor import CampaignExecutor +from eos.campaigns.entities.campaign import CampaignStatus, CampaignDefinition from eos.campaigns.exceptions import EosCampaignExecutionError from eos.experiments.exceptions import EosExperimentExecutionError from tests.fixtures import * @@ -9,7 +10,7 @@ CAMPAIGN_ID = "optimize_multiplication_campaign" EXPERIMENT_TYPE = "optimize_multiplication" MAX_EXPERIMENTS = 30 -DO_OPTIMIZATION = True +OPTIMIZE = True @pytest.mark.parametrize( @@ -19,10 +20,38 @@ ) @pytest.mark.parametrize( "campaign_executor", - [(CAMPAIGN_ID, EXPERIMENT_TYPE, MAX_EXPERIMENTS, DO_OPTIMIZATION)], + [(CAMPAIGN_ID, EXPERIMENT_TYPE, MAX_EXPERIMENTS, OPTIMIZE)], indirect=True, ) class TestCampaignExecutor: + @pytest.fixture + def campaign_executor( + self, + configuration_manager, + campaign_manager, + campaign_optimizer_manager, + task_manager, + experiment_executor_factory, + ): + optimizer_computer_ip = "127.0.0.1" + + campaign_definition = CampaignDefinition( + id=CAMPAIGN_ID, + experiment_type=EXPERIMENT_TYPE, + max_experiments=MAX_EXPERIMENTS, + max_concurrent_experiments=1, + optimize=OPTIMIZE, + optimizer_computer_ip=optimizer_computer_ip, + ) + + return CampaignExecutor( + campaign_definition=campaign_definition, + campaign_manager=campaign_manager, + campaign_optimizer_manager=campaign_optimizer_manager, + task_manager=task_manager, + experiment_executor_factory=experiment_executor_factory, + ) + @pytest.mark.slow @pytest.mark.asyncio async def test_start_campaign(self, campaign_executor, campaign_manager): @@ -54,7 +83,6 @@ async def test_progress_campaign_failure(self, campaign_executor, campaign_manag await campaign_executor.start_campaign() await campaign_executor.progress_campaign() - # Mock the progress_experiment method to raise an EosExperimentExecutionError async def mock_progress_experiment(*args, **kwargs): raise EosExperimentExecutionError("Simulated experiment execution error") @@ -125,13 +153,15 @@ async def test_campaign_resuming( campaign_executor.cleanup() # Create a new campaign executor to resume the campaign - new_execution_parameters = CampaignExecutionParameters( - max_experiments=MAX_EXPERIMENTS, do_optimization=DO_OPTIMIZATION, resume=True + campaign_definition = CampaignDefinition( + id=CAMPAIGN_ID, + experiment_type=EXPERIMENT_TYPE, + max_experiments=MAX_EXPERIMENTS, + optimize=OPTIMIZE, + resume=True, ) new_campaign_executor = CampaignExecutor( - CAMPAIGN_ID, - EXPERIMENT_TYPE, - new_execution_parameters, + campaign_definition, campaign_manager, campaign_optimizer_manager, task_manager, @@ -162,10 +192,9 @@ async def test_campaign_cancellation_timeout(self, campaign_executor, campaign_m await campaign_executor.start_campaign() # Run until one experiment is complete - while ( - (await campaign_manager.get_campaign(CAMPAIGN_ID)).experiments_completed < 1 - or len(campaign_executor._experiment_executors) < 1 - ): + while (await campaign_manager.get_campaign(CAMPAIGN_ID)).experiments_completed < 1 or len( + campaign_executor._experiment_executors + ) < 1: await campaign_executor.progress_campaign() await asyncio.sleep(0.1) diff --git a/tests/campaigns/test_campaign_manager.py b/tests/campaigns/test_campaign_manager.py new file mode 100644 index 0000000..14b1c6b --- /dev/null +++ b/tests/campaigns/test_campaign_manager.py @@ -0,0 +1,143 @@ +from eos.campaigns.entities.campaign import CampaignStatus, CampaignDefinition +from eos.campaigns.exceptions import EosCampaignStateError +from tests.fixtures import * + +EXPERIMENT_TYPE = "water_purification" + + +def create_campaign_definition(campaign_id: str, max_experiments: int = 2) -> CampaignDefinition: + """Helper function to create a non-optimized campaign definition.""" + return CampaignDefinition( + id=campaign_id, + experiment_type=EXPERIMENT_TYPE, + max_experiments=max_experiments, + max_concurrent_experiments=1, + optimize=False, + optimizer_computer_ip="127.0.0.1", + dynamic_parameters=[{"param1": {"value": 0}}] * max_experiments, # Simplified params + metadata={"test": "metadata"}, + ) + + +@pytest.mark.parametrize("setup_lab_experiment", [("small_lab", EXPERIMENT_TYPE)], indirect=True) +class TestCampaignManager: + @pytest.mark.asyncio + async def test_create_campaign(self, campaign_manager): + await campaign_manager.create_campaign(create_campaign_definition("test_campaign")) + + campaign = await campaign_manager.get_campaign("test_campaign") + assert campaign.id == "test_campaign" + assert len(campaign.dynamic_parameters) == 2 + assert campaign.metadata == {"test": "metadata"} + + @pytest.mark.asyncio + async def test_create_campaign_validation_errors(self, campaign_manager): + # Test missing dynamic parameters + with pytest.raises(ValueError, match="Campaign dynamic parameters must be provided"): + invalid_definition = CampaignDefinition( + id="test_campaign", + experiment_type=EXPERIMENT_TYPE, + max_experiments=2, + max_concurrent_experiments=1, + optimize=False, + optimizer_computer_ip="127.0.0.1", + dynamic_parameters=None, + ) + await campaign_manager.create_campaign(invalid_definition) + + # Test incorrect number of dynamic parameters + with pytest.raises(ValueError, match="Dynamic parameters must be provided for all experiments"): + invalid_definition = create_campaign_definition("test_campaign", max_experiments=3) + invalid_definition.dynamic_parameters = [{"param1": {"value": 0}}] # Only one set + await campaign_manager.create_campaign(invalid_definition) + + @pytest.mark.asyncio + async def test_create_campaign_nonexistent_type(self, campaign_manager): + with pytest.raises(EosCampaignStateError): + definition = create_campaign_definition("test_campaign") + definition.experiment_type = "nonexistent" + await campaign_manager.create_campaign(definition) + + @pytest.mark.asyncio + async def test_create_existing_campaign(self, campaign_manager): + definition = create_campaign_definition("test_campaign") + await campaign_manager.create_campaign(definition) + + with pytest.raises(EosCampaignStateError): + await campaign_manager.create_campaign(definition) + + @pytest.mark.asyncio + async def test_delete_campaign(self, campaign_manager): + await campaign_manager.create_campaign(create_campaign_definition("test_campaign")) + + campaign = await campaign_manager.get_campaign("test_campaign") + assert campaign is not None + + await campaign_manager.delete_campaign("test_campaign") + + campaign = await campaign_manager.get_campaign("test_campaign") + assert campaign is None + + @pytest.mark.asyncio + async def test_get_campaigns_by_status(self, campaign_manager): + # Create and set different statuses for campaigns + for campaign_id in ["campaign1", "campaign2", "campaign3"]: + await campaign_manager.create_campaign(create_campaign_definition(campaign_id)) + + await campaign_manager.start_campaign("campaign1") + await campaign_manager.start_campaign("campaign2") + await campaign_manager.complete_campaign("campaign3") + + running_campaigns = await campaign_manager.get_campaigns(status=CampaignStatus.RUNNING.value) + completed_campaigns = await campaign_manager.get_campaigns(status=CampaignStatus.COMPLETED.value) + + assert len(running_campaigns) == 2 + assert len(completed_campaigns) == 1 + assert all(c.status == CampaignStatus.RUNNING for c in running_campaigns) + assert all(c.status == CampaignStatus.COMPLETED for c in completed_campaigns) + + @pytest.mark.asyncio + async def test_campaign_lifecycle(self, campaign_manager): + await campaign_manager.create_campaign(create_campaign_definition("test_campaign")) + + # Test status transitions + campaign = await campaign_manager.get_campaign("test_campaign") + assert campaign.status == CampaignStatus.CREATED + assert campaign.start_time is None + assert campaign.end_time is None + + await campaign_manager.start_campaign("test_campaign") + campaign = await campaign_manager.get_campaign("test_campaign") + assert campaign.status == CampaignStatus.RUNNING + assert campaign.start_time is not None + assert campaign.end_time is None + + await campaign_manager.complete_campaign("test_campaign") + campaign = await campaign_manager.get_campaign("test_campaign") + assert campaign.status == CampaignStatus.COMPLETED + assert campaign.start_time is not None + assert campaign.end_time is not None + + @pytest.mark.asyncio + async def test_campaign_experiments(self, campaign_manager): + await campaign_manager.create_campaign(create_campaign_definition("test_campaign")) + + # Test adding experiments + await campaign_manager.add_campaign_experiment("test_campaign", "exp1") + await campaign_manager.add_campaign_experiment("test_campaign", "exp2") + + campaign = await campaign_manager.get_campaign("test_campaign") + assert len(campaign.current_experiment_ids) == 2 + assert "exp1" in campaign.current_experiment_ids + assert "exp2" in campaign.current_experiment_ids + + # Test removing single experiment + await campaign_manager.delete_campaign_experiment("test_campaign", "exp1") + campaign = await campaign_manager.get_campaign("test_campaign") + assert len(campaign.current_experiment_ids) == 1 + assert "exp2" in campaign.current_experiment_ids + + # Test clearing all experiments + await campaign_manager.delete_current_campaign_experiments("test_campaign") + campaign = await campaign_manager.get_campaign("test_campaign") + assert len(campaign.current_experiment_ids) == 0 diff --git a/tests/configuration/__init__.py b/tests/configuration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_configuration_manager.py b/tests/configuration/test_configuration_manager.py similarity index 100% rename from tests/test_configuration_manager.py rename to tests/configuration/test_configuration_manager.py diff --git a/tests/test_device_actor_wrapper_registry.py b/tests/configuration/test_device_actor_wrapper_registry.py similarity index 100% rename from tests/test_device_actor_wrapper_registry.py rename to tests/configuration/test_device_actor_wrapper_registry.py diff --git a/tests/test_lab_validation.py b/tests/configuration/test_lab_validation.py similarity index 94% rename from tests/test_lab_validation.py rename to tests/configuration/test_lab_validation.py index 0f38a93..f7cf076 100644 --- a/tests/test_lab_validation.py +++ b/tests/configuration/test_lab_validation.py @@ -12,7 +12,7 @@ def lab(configuration_manager): class TestLabValidation: def test_device_locations(self, configuration_manager, lab): - lab.devices.magnetic_mixer.location = "invalid_location" + lab.devices["magnetic_mixer"].location = "invalid_location" with pytest.raises(EosLabConfigurationError): LabValidator(configuration_manager._user_dir, lab).validate() @@ -24,7 +24,7 @@ def test_container_locations(self, configuration_manager, lab): LabValidator(configuration_manager._user_dir, lab).validate() def test_device_computers(self, configuration_manager, lab): - lab.devices.magnetic_mixer.computer = "invalid_computer" + lab.devices["magnetic_mixer"].computer = "invalid_computer" with pytest.raises(EosLabConfigurationError): LabValidator(configuration_manager._user_dir, lab).validate() diff --git a/tests/test_multi_lab_validation.py b/tests/configuration/test_multi_lab_validation.py similarity index 100% rename from tests/test_multi_lab_validation.py rename to tests/configuration/test_multi_lab_validation.py diff --git a/tests/configuration/test_task_input_parameter_validator.py b/tests/configuration/test_task_input_parameter_validator.py new file mode 100644 index 0000000..743f892 --- /dev/null +++ b/tests/configuration/test_task_input_parameter_validator.py @@ -0,0 +1,126 @@ +import pytest +from pydantic import ValidationError + +from eos.configuration.entities.task_parameters import TaskParameterType +from eos.configuration.entities.task import TaskConfig +from eos.configuration.entities.task_spec import TaskSpecConfig +from eos.tasks.exceptions import EosTaskValidationError +from eos.tasks.task_input_parameter_validator import TaskInputParameterValidator + + +class TestTaskInputParameterValidator: + @pytest.fixture + def task_spec(self): + return TaskSpecConfig( + type="test_task", + desc="A test task", + input_parameters={ + "int_param": {"type": "int", "unit": "n/a", "desc": "An integer parameter", "min": 0, "max": 100}, + "float_param": {"type": "float", "unit": "n/a", "desc": "A float parameter", "min": 0.0, "max": 1.0}, + "str_param": {"type": "str", "desc": "A string parameter"}, + "bool_param": {"type": "bool", "value": False, "desc": "A boolean parameter"}, + "list_param": {"type": "list", "desc": "A list parameter", "element_type": "int", "length": 3}, + "choice_param": { + "type": "choice", + "value": "A", + "desc": "A choice parameter", + "choices": ["A", "B", "C"], + }, + "dict_param": {"type": "dict", "desc": "A dictionary parameter"}, + }, + ) + + @pytest.fixture + def task_config(self, task_spec): + return TaskConfig( + id="test_task_1", + type="test_task", + parameters={ + "int_param": 50, + "float_param": 0.5, + "str_param": "test", + "bool_param": True, + "list_param": [1, 2, 3], + "dict_param": {"key": "value"}, + "choice_param": "A", + }, + ) + + @pytest.fixture + def validator(self, task_config, task_spec): + return TaskInputParameterValidator(task_config, task_spec) + + def test_valid_input_parameters(self, validator): + validator.validate_input_parameters() # Should not raise any exceptions + + @pytest.mark.parametrize( + ("param_name", "invalid_value"), + [ + ("int_param", "not_an_int"), + ("float_param", "not_a_float"), + ("bool_param", "not_a_bool"), + ("list_param", "not_a_list"), + ("dict_param", "not_a_dict"), + ("choice_param", "D"), + ], + ) + def test_invalid_input_parameters(self, validator, task_config, param_name, invalid_value): + task_config.parameters[param_name] = invalid_value + with pytest.raises((ValidationError, EosTaskValidationError)): + validator.validate_input_parameters() + + def test_missing_required_parameter(self, validator, task_config): + del task_config.parameters["int_param"] + with pytest.raises((ValidationError, EosTaskValidationError)): + validator.validate_input_parameters() + + def test_extra_parameter(self, validator, task_config): + task_config.parameters["extra_param"] = "extra" + with pytest.raises((ValidationError, EosTaskValidationError)): + validator.validate_input_parameters() + + @pytest.mark.parametrize( + ("param_type", "valid_values", "invalid_values"), + [ + (TaskParameterType.INT, [0, 50, 100, "50"], [-1, 101, "fifty"]), + (TaskParameterType.FLOAT, [0.0, 0.5, 1.0, "0.5"], [-0.1, 1.1, "half"]), + (TaskParameterType.BOOL, [True, False, "true", "false"], ["yes", "no", 2]), + (TaskParameterType.STR, ["test", "123", ""], []), + (TaskParameterType.LIST, [[1, 2, 3], [1, 2, 62]], [[1, 2], [1, 2, 3, 4], "not_a_list"]), + (TaskParameterType.DICT, [{"key": "value"}, {}], ["not_a_dict", [1, 2, 3]]), + (TaskParameterType.CHOICE, ["A", "B", "C"], ["D", 1, True]), + ], + ) + def test_parameter_type_conversion( + self, validator, task_config, task_spec, param_type, valid_values, invalid_values + ): + param_name = f"{param_type.value}_param" + task_spec.input_parameters[param_name].type = param_type.value + if param_type == TaskParameterType.CHOICE: + task_spec.input_parameters[param_name].choices = ["A", "B", "C"] + elif param_type == TaskParameterType.LIST: + task_spec.input_parameters[param_name].element_type = "int" + task_spec.input_parameters[param_name].length = 3 + + for valid_value in valid_values: + task_config.parameters[param_name] = valid_value + validator.validate_input_parameters() # Should not raise any exceptions + + for invalid_value in invalid_values: + task_config.parameters[param_name] = invalid_value + with pytest.raises((ValidationError, EosTaskValidationError)): + validator.validate_input_parameters() + + @pytest.mark.parametrize( + ("param_name", "invalid_value", "expected_error"), + [ + ("int_param", "$.some_reference", (ValidationError, EosTaskValidationError)), + ("int_param", "eos_dynamic", (ValidationError, EosTaskValidationError)), + ("int_param", 150, (ValidationError, EosTaskValidationError)), + ("list_param", [1, 2, 3, 4], (ValidationError, EosTaskValidationError)), + ], + ) + def test_specific_validation_cases(self, validator, task_config, param_name, invalid_value, expected_error): + task_config.parameters[param_name] = invalid_value + with pytest.raises(expected_error): + validator.validate_input_parameters() diff --git a/tests/configuration/test_task_specification_validation.py b/tests/configuration/test_task_specification_validation.py new file mode 100644 index 0000000..bb65319 --- /dev/null +++ b/tests/configuration/test_task_specification_validation.py @@ -0,0 +1,265 @@ +from pydantic import ValidationError + +from eos.configuration.entities.task_parameters import ( + TaskParameterFactory, + TaskParameterType, +) +from eos.configuration.entities.task_spec import ( + TaskSpecOutputParameterConfig, + TaskSpecConfig, +) +from tests.fixtures import * + + +class TestTaskSpecifications: + def test_invalid_parameter_type(self): + with pytest.raises(ValueError): + TaskParameterFactory.create( + "invalid_type", + value=120, + desc="Duration of evaporation in seconds.", + ) + + def test_numeric_parameter_unit_not_specified(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.INT, + unit="", + value=120, + min=60, + desc="Duration of evaporation in seconds.", + ) + + def test_numeric_parameter_value_not_numeric(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.INT, + unit="sec", + value="not_a_number", + min=60, + desc="Duration of evaporation in seconds.", + ) + + def test_numeric_parameter_min_greater_than_max(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.INT, + unit="sec", + value=120, + min=300, + max=60, + desc="Duration of evaporation in seconds.", + ) + + def test_numeric_parameter_out_of_range_min(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.INT, + unit="sec", + value=5, + min=60, + max=300, + desc="Duration of evaporation in seconds.", + ) + + def test_numeric_parameter_out_of_range_max(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.INT, + unit="sec", + value=100, + min=0, + max=80, + desc="Duration of evaporation in seconds.", + ) + + def test_boolean_parameter_invalid_value(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.BOOL, + value="not_a_boolean", + desc="Whether to sparge the evaporation vessel with nitrogen.", + ) + + def test_choice_parameter_choices_not_specified(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.CHOICE, + choices=[], + value="method1", + desc="Method to use", + ) + + def test_choice_parameter_no_value(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.CHOICE, + choices=["method1", "method2"], + value=None, + desc="Method to use", + ) + + def test_choice_parameter_invalid_value(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.CHOICE, + choices=["method1", "method2"], + value="invalid_method", + desc="Method to use", + ) + + def test_list_parameter_invalid_element_type(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.LIST, + length=3, + element_type="invalid_type", + value=[1, 2, 3], + desc="List of elements", + ) + + def test_list_parameter_nested_list(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.LIST, + length=3, + element_type="list", + value=[[1], [2], [3]], + desc="List of elements", + ) + + def test_list_parameter_invalid_value(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.LIST, + length=3, + element_type="int", + value=4, + desc="List of elements", + ) + + def test_list_parameter_elements_not_same_type(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.LIST, + length=3, + element_type="int", + value=[1, True, "3"], + desc="List of elements", + ) + + def test_list_parameter_invalid_value_element_size(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.LIST, + length=3, + element_type="int", + value=[1, 2], + desc="List of elements", + ) + + def test_list_parameter_invalid_value_element_min(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.LIST, + length=3, + element_type="int", + value=[1, 2, 3], + min=[2, 2, "INVALID"], + desc="List of elements", + ) + + def test_list_parameter_invalid_value_element_max(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.LIST, + length=3, + element_type="int", + value=[1, 2, 3], + max=[2, 2, "INVALID"], + desc="List of elements", + ) + + def test_list_parameter_value_less_than_min(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.LIST, + length=3, + element_type="int", + value=[2, 2, 2], + min=[2, 2, 3], + desc="List of elements", + ) + + def test_list_parameter_value_greater_than_max(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.LIST, + length=3, + element_type="int", + value=[2, 2, 2], + max=[2, 2, 1], + desc="List of elements", + ) + + def test_list_parameter_invalid_min_max_size(self): + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.LIST, + length=3, + element_type="int", + value=[2, 2, 2], + min=[2, 2], + desc="List of elements", + ) + + with pytest.raises(ValidationError): + TaskParameterFactory.create( + TaskParameterType.LIST, + length=3, + element_type="int", + value=[2, 2, 2], + max=[2, 2], + desc="List of elements", + ) + + def test_parameter_factory_invalid_type(self): + with pytest.raises(ValueError): + TaskParameterFactory.create( + "invalid_type", + value=120, + desc="Duration of evaporation in seconds.", + ) + + def test_parameter_invalid_name(self, configuration_manager): + task_specs = configuration_manager.task_specs + + task_spec = task_specs.get_spec_by_type("Magnetic Mixing") + + task_spec.input_parameters["invalid_name*"] = { + "type": "int", + "unit": "sec", + "value": 120, + "desc": "Duration of evaporation in seconds.", + } + + with pytest.raises(ValidationError): + TaskSpecConfig(**task_spec.model_dump()) + + del task_spec.input_parameters["invalid_name*"] + + def test_output_numeric_parameter_unit_not_specified(self, configuration_manager): + with pytest.raises(ValidationError): + TaskSpecOutputParameterConfig( + type=TaskParameterType.INT, + unit="", + desc="Duration of evaporation in seconds.", + ) + + def test_output_non_numeric_parameter_unit_specified(self, configuration_manager): + with pytest.raises(ValidationError): + TaskSpecOutputParameterConfig( + type=TaskParameterType.BOOL, + unit="sec", + desc="Whether to sparge the evaporation vessel with nitrogen.", + ) diff --git a/tests/containers/__init__.py b/tests/containers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_container_allocator.py b/tests/containers/test_container_allocator.py similarity index 96% rename from tests/test_container_allocator.py rename to tests/containers/test_container_allocator.py index bfc86f3..fc1615b 100644 --- a/tests/test_container_allocator.py +++ b/tests/containers/test_container_allocator.py @@ -5,9 +5,7 @@ from tests.fixtures import * -@pytest.mark.parametrize( - "setup_lab_experiment", [("small_lab", "water_purification")], indirect=True -) +@pytest.mark.parametrize("setup_lab_experiment", [("small_lab", "water_purification")], indirect=True) class TestContainerAllocator: @pytest.mark.asyncio async def test_allocate_container(self, container_allocator): @@ -143,6 +141,4 @@ async def test_deallocate_all_containers_by_owner(self, container_allocator): owner2_allocations = await container_allocator.get_allocations(owner="owner2") assert owner2_allocations == [] - assert await container_allocator.get_allocations() == [ - await container_allocator.get_allocation(container_id_1) - ] + assert await container_allocator.get_allocations() == [await container_allocator.get_allocation(container_id_1)] diff --git a/tests/test_container_manager.py b/tests/containers/test_container_manager.py similarity index 92% rename from tests/test_container_manager.py rename to tests/containers/test_container_manager.py index bdb0ebc..506ca28 100644 --- a/tests/test_container_manager.py +++ b/tests/containers/test_container_manager.py @@ -44,8 +44,9 @@ async def test_add_container_metadata(self, container_manager): @pytest.mark.asyncio async def test_remove_container_metadata(self, container_manager): container_id = "acf829f859e04fee80d54a1ee918555d" - await container_manager.add_metadata(container_id, - {"substance": "water", "temperature": "cold", "color": "blue"}) + await container_manager.add_metadata( + container_id, {"substance": "water", "temperature": "cold", "color": "blue"} + ) await container_manager.remove_metadata(container_id, ["color", "temperature"]) container = await container_manager.get_container(container_id) diff --git a/tests/devices/__init__.py b/tests/devices/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_base_device.py b/tests/devices/test_base_device.py similarity index 95% rename from tests/test_base_device.py rename to tests/devices/test_base_device.py index e1a5cc1..181c61d 100644 --- a/tests/test_base_device.py +++ b/tests/devices/test_base_device.py @@ -12,7 +12,7 @@ def __init__(self, device_id: str, lab_id: str, device_type: str): self.mock_resource = None super().__init__(device_id, lab_id, device_type) - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: + async def _initialize(self, init_parameters: dict[str, Any]) -> None: self.mock_resource = Mock() async def _cleanup(self) -> None: @@ -70,7 +70,7 @@ def test_exception_handling(self, mock_device): @pytest.mark.asyncio async def test_initialization_error(self): class FailingDevice(MockDevice): - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: + async def _initialize(self, init_parameters: dict[str, Any]) -> None: raise ValueError("Initialization failed") device = FailingDevice("fail_device", "test_lab", "failing") diff --git a/tests/test_device_allocator.py b/tests/devices/test_device_allocator.py similarity index 97% rename from tests/test_device_allocator.py rename to tests/devices/test_device_allocator.py index 097b291..6325605 100644 --- a/tests/test_device_allocator.py +++ b/tests/devices/test_device_allocator.py @@ -141,6 +141,4 @@ async def test_deallocate_all_by_owner(self, device_allocator): owner2_allocations = await device_allocator.get_allocations(owner="owner2") assert owner2_allocations == [] - assert await device_allocator.get_allocations() == [ - await device_allocator.get_allocation(LAB_ID, device_id_1) - ] + assert await device_allocator.get_allocations() == [await device_allocator.get_allocation(LAB_ID, device_id_1)] diff --git a/tests/test_device_manager.py b/tests/devices/test_device_manager.py similarity index 100% rename from tests/test_device_manager.py rename to tests/devices/test_device_manager.py diff --git a/tests/experiments/__init__.py b/tests/experiments/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_experiment_executor.py b/tests/experiments/test_experiment_executor.py similarity index 82% rename from tests/test_experiment_executor.py rename to tests/experiments/test_experiment_executor.py index e8a2de3..4229db1 100644 --- a/tests/test_experiment_executor.py +++ b/tests/experiments/test_experiment_executor.py @@ -1,8 +1,9 @@ import asyncio from unittest.mock import patch -from eos.experiments.entities.experiment import ExperimentStatus +from eos.experiments.entities.experiment import ExperimentStatus, ExperimentDefinition from eos.experiments.exceptions import EosExperimentExecutionError +from eos.experiments.experiment_executor import ExperimentExecutor from eos.tasks.entities.task import TaskStatus from tests.fixtures import * @@ -27,15 +28,32 @@ [(LAB_ID, EXPERIMENT_TYPE)], indirect=True, ) -@pytest.mark.parametrize( - "experiment_executor", - [(EXPERIMENT_ID, EXPERIMENT_TYPE)], - indirect=True, -) class TestExperimentExecutor: + @pytest.fixture + def experiment_executor( + self, + experiment_manager, + task_manager, + container_manager, + task_executor, + greedy_scheduler, + experiment_graph, + ): + return ExperimentExecutor( + experiment_definition=ExperimentDefinition( + id=EXPERIMENT_ID, type=EXPERIMENT_TYPE, dynamic_parameters=DYNAMIC_PARAMETERS + ), + experiment_graph=experiment_graph, + experiment_manager=experiment_manager, + task_manager=task_manager, + container_manager=container_manager, + task_executor=task_executor, + scheduler=greedy_scheduler, + ) + @pytest.mark.asyncio async def test_start_experiment(self, experiment_executor, experiment_manager): - await experiment_executor.start_experiment(DYNAMIC_PARAMETERS) + await experiment_executor.start_experiment() experiment = await experiment_manager.get_experiment(EXPERIMENT_ID) assert experiment is not None @@ -45,7 +63,7 @@ async def test_start_experiment(self, experiment_executor, experiment_manager): @pytest.mark.slow @pytest.mark.asyncio async def test_progress_experiment(self, experiment_executor, experiment_manager, task_manager): - await experiment_executor.start_experiment(DYNAMIC_PARAMETERS) + await experiment_executor.start_experiment() experiment_completed = await experiment_executor.progress_experiment() assert not experiment_completed @@ -71,7 +89,7 @@ async def test_progress_experiment(self, experiment_executor, experiment_manager @pytest.mark.asyncio async def test_task_output_registration(self, experiment_executor, task_manager): - await experiment_executor.start_experiment(DYNAMIC_PARAMETERS) + await experiment_executor.start_experiment() experiment_completed = False while not experiment_completed: @@ -84,7 +102,7 @@ async def test_task_output_registration(self, experiment_executor, task_manager) @pytest.mark.asyncio async def test_resolve_input_parameter_references_and_dynamic_parameters(self, experiment_executor, task_manager): - await experiment_executor.start_experiment(DYNAMIC_PARAMETERS) + await experiment_executor.start_experiment() experiment_completed = False while not experiment_completed: @@ -113,12 +131,10 @@ async def test_resolve_input_parameter_references_and_dynamic_parameters(self, e ) @pytest.mark.asyncio async def test_handle_existing_experiment(self, experiment_executor, experiment_manager, experiment_status): - await experiment_manager.create_experiment( - EXPERIMENT_ID, EXPERIMENT_TYPE, experiment_executor._execution_parameters, {}, {} - ) + await experiment_manager.create_experiment(experiment_executor._experiment_definition) await experiment_manager._set_experiment_status(EXPERIMENT_ID, experiment_status) - experiment_executor._execution_parameters.resume = False + experiment_executor._experiment_definition.resume = False with patch.object(experiment_executor, "_resume_experiment") as mock_resume: if experiment_status in [ ExperimentStatus.COMPLETED, @@ -136,7 +152,7 @@ async def test_handle_existing_experiment(self, experiment_executor, experiment_ await experiment_executor._handle_existing_experiment(experiment) mock_resume.assert_not_called() - experiment_executor._execution_parameters.resume = True + experiment_executor._experiment_definition.resume = True with patch.object(experiment_executor, "_resume_experiment") as mock_resume: experiment = await experiment_manager.get_experiment(EXPERIMENT_ID) await experiment_executor._handle_existing_experiment(experiment) diff --git a/tests/test_experiment_graph.py b/tests/experiments/test_experiment_graph.py similarity index 100% rename from tests/test_experiment_graph.py rename to tests/experiments/test_experiment_graph.py diff --git a/tests/test_experiment_manager.py b/tests/experiments/test_experiment_manager.py similarity index 66% rename from tests/test_experiment_manager.py rename to tests/experiments/test_experiment_manager.py index 7301d94..b532d65 100644 --- a/tests/test_experiment_manager.py +++ b/tests/experiments/test_experiment_manager.py @@ -1,16 +1,16 @@ -from eos.experiments.entities.experiment import ExperimentStatus +from eos.experiments.entities.experiment import ExperimentStatus, ExperimentDefinition from eos.experiments.exceptions import EosExperimentStateError from tests.fixtures import * -EXPERIMENT_ID = "water_purification" +EXPERIMENT_TYPE = "water_purification" -@pytest.mark.parametrize("setup_lab_experiment", [("small_lab", EXPERIMENT_ID)], indirect=True) +@pytest.mark.parametrize("setup_lab_experiment", [("small_lab", EXPERIMENT_TYPE)], indirect=True) class TestExperimentManager: @pytest.mark.asyncio async def test_create_experiment(self, experiment_manager): - await experiment_manager.create_experiment("test_experiment", EXPERIMENT_ID) - await experiment_manager.create_experiment("test_experiment_2", EXPERIMENT_ID) + await experiment_manager.create_experiment(ExperimentDefinition(type=EXPERIMENT_TYPE, id="test_experiment")) + await experiment_manager.create_experiment(ExperimentDefinition(type=EXPERIMENT_TYPE, id="test_experiment_2")) experiment1 = await experiment_manager.get_experiment("test_experiment") assert experiment1.id == "test_experiment" @@ -20,18 +20,18 @@ async def test_create_experiment(self, experiment_manager): @pytest.mark.asyncio async def test_create_experiment_nonexistent_type(self, experiment_manager): with pytest.raises(EosExperimentStateError): - await experiment_manager.create_experiment("test_experiment", "nonexistent_type") + await experiment_manager.create_experiment(ExperimentDefinition(type="nonexistent", id="test_experiment")) @pytest.mark.asyncio async def test_create_existing_experiment(self, experiment_manager): - await experiment_manager.create_experiment("test_experiment", EXPERIMENT_ID) + await experiment_manager.create_experiment(ExperimentDefinition(type=EXPERIMENT_TYPE, id="test_experiment")) with pytest.raises(EosExperimentStateError): - await experiment_manager.create_experiment("test_experiment", EXPERIMENT_ID) + await experiment_manager.create_experiment(ExperimentDefinition(type=EXPERIMENT_TYPE, id="test_experiment")) @pytest.mark.asyncio async def test_delete_experiment(self, experiment_manager): - await experiment_manager.create_experiment("test_experiment", EXPERIMENT_ID) + await experiment_manager.create_experiment(ExperimentDefinition(type=EXPERIMENT_TYPE, id="test_experiment")) experiment = await experiment_manager.get_experiment("test_experiment") assert experiment.id == "test_experiment" @@ -42,26 +42,22 @@ async def test_delete_experiment(self, experiment_manager): assert experiment is None @pytest.mark.asyncio - async def test_delete_nonexisting_experiment(self, experiment_manager): + async def test_delete_nonexistent_experiment(self, experiment_manager): with pytest.raises(EosExperimentStateError): await experiment_manager.delete_experiment("non_existing_experiment") @pytest.mark.asyncio async def test_get_experiments_by_status(self, experiment_manager): - await experiment_manager.create_experiment("test_experiment", EXPERIMENT_ID) - await experiment_manager.create_experiment("test_experiment_2", EXPERIMENT_ID) - await experiment_manager.create_experiment("test_experiment_3", EXPERIMENT_ID) + await experiment_manager.create_experiment(ExperimentDefinition(type=EXPERIMENT_TYPE, id="test_experiment")) + await experiment_manager.create_experiment(ExperimentDefinition(type=EXPERIMENT_TYPE, id="test_experiment_2")) + await experiment_manager.create_experiment(ExperimentDefinition(type=EXPERIMENT_TYPE, id="test_experiment_3")) await experiment_manager.start_experiment("test_experiment") await experiment_manager.start_experiment("test_experiment_2") await experiment_manager.complete_experiment("test_experiment_3") - running_experiments = await experiment_manager.get_experiments( - status=ExperimentStatus.RUNNING.value - ) - completed_experiments = await experiment_manager.get_experiments( - status=ExperimentStatus.COMPLETED.value - ) + running_experiments = await experiment_manager.get_experiments(status=ExperimentStatus.RUNNING.value) + completed_experiments = await experiment_manager.get_experiments(status=ExperimentStatus.COMPLETED.value) assert running_experiments == [ await experiment_manager.get_experiment("test_experiment"), @@ -72,7 +68,7 @@ async def test_get_experiments_by_status(self, experiment_manager): @pytest.mark.asyncio async def test_set_experiment_status(self, experiment_manager): - await experiment_manager.create_experiment("test_experiment", EXPERIMENT_ID) + await experiment_manager.create_experiment(ExperimentDefinition(type=EXPERIMENT_TYPE, id="test_experiment")) experiment = await experiment_manager.get_experiment("test_experiment") assert experiment.status == ExperimentStatus.CREATED @@ -91,9 +87,9 @@ async def test_set_experiment_status_nonexistent_experiment(self, experiment_man @pytest.mark.asyncio async def test_get_all_experiments(self, experiment_manager): - await experiment_manager.create_experiment("test_experiment", EXPERIMENT_ID) - await experiment_manager.create_experiment("test_experiment_2", EXPERIMENT_ID) - await experiment_manager.create_experiment("test_experiment_3", EXPERIMENT_ID) + await experiment_manager.create_experiment(ExperimentDefinition(type=EXPERIMENT_TYPE, id="test_experiment")) + await experiment_manager.create_experiment(ExperimentDefinition(type=EXPERIMENT_TYPE, id="test_experiment_2")) + await experiment_manager.create_experiment(ExperimentDefinition(type=EXPERIMENT_TYPE, id="test_experiment_3")) experiments = await experiment_manager.get_experiments() assert experiments == [ diff --git a/tests/fixtures.py b/tests/fixtures.py index 9d60593..c4080a3 100644 --- a/tests/fixtures.py +++ b/tests/fixtures.py @@ -5,22 +5,18 @@ import ray import yaml -from eos.campaigns.campaign_executor import CampaignExecutor from eos.campaigns.campaign_manager import CampaignManager from eos.campaigns.campaign_optimizer_manager import CampaignOptimizerManager -from eos.campaigns.entities.campaign import CampaignExecutionParameters from eos.configuration.configuration_manager import ConfigurationManager +from eos.configuration.entities.eos_config import DbConfig from eos.configuration.experiment_graph.experiment_graph import ExperimentGraph from eos.containers.container_manager import ContainerManager from eos.devices.device_manager import DeviceManager -from eos.experiments.entities.experiment import ExperimentExecutionParameters -from eos.experiments.experiment_executor import ExperimentExecutor from eos.experiments.experiment_executor_factory import ExperimentExecutorFactory from eos.experiments.experiment_manager import ExperimentManager from eos.logging.logger import log from eos.persistence.async_mongodb_interface import AsyncMongoDbInterface from eos.persistence.file_db_interface import FileDbInterface -from eos.persistence.service_credentials import ServiceCredentials from eos.resource_allocation.container_allocator import ContainerAllocator from eos.resource_allocation.device_allocator import DeviceAllocator from eos.resource_allocation.resource_allocation_manager import ( @@ -68,18 +64,14 @@ def user_dir(): @pytest.fixture(scope="session") def db_interface(): config = load_test_config() - - db_credentials = ServiceCredentials(**config["db"]) - + db_credentials = DbConfig(**config["db"]) return AsyncMongoDbInterface(db_credentials, "test-eos") @pytest.fixture(scope="session") def file_db_interface(db_interface): config = load_test_config() - - file_db_credentials = ServiceCredentials(**config["file_db"]) - + file_db_credentials = DbConfig(**config["file_db"]) return FileDbInterface(file_db_credentials, bucket_name="test-eos") @@ -166,7 +158,8 @@ async def task_manager(setup_lab_experiment, configuration_manager, db_interface @pytest.fixture(scope="session", autouse=True) def ray_cluster(): - ray.init(namespace="test-eos", ignore_reinit_error=True, resources={"eos-core": 1000}) + if not ray.is_initialized(): + ray.init(namespace="test-eos", resources={"eos-core": 1000}) yield ray.shutdown() @@ -192,9 +185,7 @@ def on_demand_task_executor( task_manager, container_manager, ): - return OnDemandTaskExecutor( - task_executor, task_manager, container_manager - ) + return OnDemandTaskExecutor(task_executor, task_manager, container_manager) @pytest.fixture @@ -211,31 +202,6 @@ def greedy_scheduler( ) -@pytest.fixture -def experiment_executor( - request, - experiment_manager, - task_manager, - container_manager, - task_executor, - greedy_scheduler, - experiment_graph, -): - experiment_id, experiment_type = request.param - - return ExperimentExecutor( - experiment_id=experiment_id, - experiment_type=experiment_type, - execution_parameters=ExperimentExecutionParameters(), - experiment_graph=experiment_graph, - experiment_manager=experiment_manager, - task_manager=task_manager, - container_manager=container_manager, - task_executor=task_executor, - scheduler=greedy_scheduler, - ) - - @pytest.fixture def experiment_executor_factory( configuration_manager, @@ -256,10 +222,7 @@ def experiment_executor_factory( @pytest.fixture -async def campaign_manager( - configuration_manager, - db_interface, -): +async def campaign_manager(setup_lab_experiment, configuration_manager, db_interface, clean_db): campaign_manager = CampaignManager(configuration_manager, db_interface) await campaign_manager.initialize(db_interface) return campaign_manager @@ -273,34 +236,3 @@ async def campaign_optimizer_manager( campaign_optimizer_manager = CampaignOptimizerManager(configuration_manager, db_interface) await campaign_optimizer_manager.initialize(db_interface) return campaign_optimizer_manager - - -@pytest.fixture -def campaign_executor( - request, - configuration_manager, - campaign_manager, - campaign_optimizer_manager, - task_manager, - experiment_executor_factory, -): - campaign_id, experiment_type, max_experiments, do_optimization = request.param - - optimizer_computer_ip = "127.0.0.1" - - execution_parameters = CampaignExecutionParameters( - max_experiments=max_experiments, - max_concurrent_experiments=1, - do_optimization=do_optimization, - optimizer_computer_ip=optimizer_computer_ip, - ) - - return CampaignExecutor( - campaign_id=campaign_id, - experiment_type=experiment_type, - campaign_manager=campaign_manager, - campaign_optimizer_manager=campaign_optimizer_manager, - task_manager=task_manager, - experiment_executor_factory=experiment_executor_factory, - execution_parameters=execution_parameters, - ) diff --git a/tests/optimization/__init__.py b/tests/optimization/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_bayesian_sequential_optimizer.py b/tests/optimization/test_bayesian_sequential_optimizer.py similarity index 97% rename from tests/test_bayesian_sequential_optimizer.py rename to tests/optimization/test_bayesian_sequential_optimizer.py index 3e93354..9d452ec 100644 --- a/tests/test_bayesian_sequential_optimizer.py +++ b/tests/optimization/test_bayesian_sequential_optimizer.py @@ -56,7 +56,6 @@ def test_competing_multi_objective_optimization(self): optimizer.report(parameters, results) optimal_solutions = optimizer.get_optimal_solutions() - print() pd.set_option("display.max_rows", None, "display.max_columns", None) print(optimal_solutions) @@ -74,8 +73,8 @@ def test_competing_multi_objective_optimization(self): for true_solution in true_pareto_front: assert any( - abs(solution["x"] - true_solution["x"]) < 0.8 - and abs(solution["y1"] - true_solution["y1"]) < 0.8 - and abs(solution["y2"] - true_solution["y2"]) < 0.8 + abs(solution["x"] - true_solution["x"]) < 0.9 + and abs(solution["y1"] - true_solution["y1"]) < 0.9 + and abs(solution["y2"] - true_solution["y2"]) < 0.9 for _, solution in optimal_solutions.iterrows() ) diff --git a/tests/persistence/__init__.py b/tests/persistence/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_mongodb_async_repository.py b/tests/persistence/test_mongodb_async_repository.py similarity index 100% rename from tests/test_mongodb_async_repository.py rename to tests/persistence/test_mongodb_async_repository.py diff --git a/tests/resource_allocation/__init__.py b/tests/resource_allocation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_resource_allocation_manager.py b/tests/resource_allocation/test_resource_allocation_manager.py similarity index 100% rename from tests/test_resource_allocation_manager.py rename to tests/resource_allocation/test_resource_allocation_manager.py diff --git a/tests/scheduling/__init__.py b/tests/scheduling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_greedy_scheduler.py b/tests/scheduling/test_greedy_scheduler.py similarity index 90% rename from tests/test_greedy_scheduler.py rename to tests/scheduling/test_greedy_scheduler.py index 15ff627..ec2417d 100644 --- a/tests/test_greedy_scheduler.py +++ b/tests/scheduling/test_greedy_scheduler.py @@ -1,3 +1,5 @@ +from eos.experiments.entities.experiment import ExperimentDefinition +from eos.tasks.entities.task import TaskDefinition from tests.fixtures import * @@ -26,7 +28,7 @@ async def test_unregister_experiment(self, greedy_scheduler, experiment_graph): @pytest.mark.asyncio async def test_correct_schedule(self, greedy_scheduler, experiment_graph, experiment_manager, task_manager): async def complete_task(task_id, task_type): - await task_manager.create_task("experiment_1", task_id, task_type, []) + await task_manager.create_task(TaskDefinition(id=task_id, type=task_type, experiment_id="experiment_1")) await task_manager.start_task("experiment_1", task_id) await task_manager.complete_task("experiment_1", task_id) @@ -45,7 +47,7 @@ async def process_and_assert(tasks, expected_tasks): assert_task(task, task_id, device_lab_id, device_id) await complete_task(task_id, "Noop") - await experiment_manager.create_experiment("experiment_1", "abstract_experiment") + await experiment_manager.create_experiment(ExperimentDefinition(type="abstract_experiment", id="experiment_1")) await experiment_manager.start_experiment("experiment_1") greedy_scheduler.register_experiment("experiment_1", "abstract_experiment", experiment_graph) diff --git a/tests/tasks/__init__.py b/tests/tasks/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_base_task.py b/tests/tasks/test_base_task.py similarity index 100% rename from tests/test_base_task.py rename to tests/tasks/test_base_task.py diff --git a/tests/test_on_demand_task_executor.py b/tests/tasks/test_on_demand_task_executor.py similarity index 79% rename from tests/test_on_demand_task_executor.py rename to tests/tasks/test_on_demand_task_executor.py index d91f014..2aefe08 100644 --- a/tests/test_on_demand_task_executor.py +++ b/tests/tasks/test_on_demand_task_executor.py @@ -1,7 +1,7 @@ import asyncio from eos.configuration.entities.task import TaskConfig -from eos.tasks.entities.task import TaskStatus +from eos.tasks.entities.task import TaskStatus, TaskDefinition from tests.fixtures import * @@ -16,11 +16,12 @@ async def test_execute_on_demand_task(self, on_demand_task_executor, task_manage task_config = TaskConfig( id="mixing", type="Magnetic Mixing", - description="Mixing task", + desc="Mixing task", parameters={"time": 5}, ) + task_definition = TaskDefinition.from_config(task_config, "on_demand") - on_demand_task_executor.submit_task(task_config) + on_demand_task_executor.submit_task(task_definition) await on_demand_task_executor.process_tasks() while True: @@ -36,13 +37,14 @@ async def test_execute_on_demand_task(self, on_demand_task_executor, task_manage @pytest.mark.asyncio async def test_on_demand_task_output(self, on_demand_task_executor, task_manager): task_config = TaskConfig( - "file_gen", + id="file_gen", type="File Generation", - description="File generation task", + desc="File generation task", parameters={"content_length": 32}, ) + task_definition = TaskDefinition.from_config(task_config, "on_demand") - on_demand_task_executor.submit_task(task_config) + on_demand_task_executor.submit_task(task_definition) await on_demand_task_executor.process_tasks() while True: @@ -62,11 +64,12 @@ async def test_request_task_cancellation(self, on_demand_task_executor, task_man task_config = TaskConfig( id="sleep", type="Sleep", - description="Sleeping task", + desc="Sleeping task", parameters={"time": 20}, ) + task_definition = TaskDefinition.from_config(task_config, "on_demand") - on_demand_task_executor.submit_task(task_config) + on_demand_task_executor.submit_task(task_definition) await on_demand_task_executor.process_tasks() iterations = 0 @@ -81,7 +84,7 @@ async def test_request_task_cancellation(self, on_demand_task_executor, task_man if iterations > 5: await on_demand_task_executor.request_task_cancellation("sleep") - if iterations > 20: + if iterations > 40: raise Exception("Task did not cancel in time") task = await task_manager.get_task("on_demand", "sleep") diff --git a/tests/test_task_executor.py b/tests/tasks/test_task_executor.py similarity index 65% rename from tests/test_task_executor.py rename to tests/tasks/test_task_executor.py index c7ef03e..55bb9e8 100644 --- a/tests/test_task_executor.py +++ b/tests/tasks/test_task_executor.py @@ -1,11 +1,12 @@ import asyncio from eos.configuration.entities.task import TaskConfig, TaskDeviceConfig +from eos.experiments.entities.experiment import ExperimentDefinition from eos.resource_allocation.entities.resource_request import ( ResourceAllocationRequest, ResourceType, ) -from eos.tasks.entities.task_execution_parameters import TaskExecutionParameters +from eos.tasks.entities.task import TaskDefinition from eos.tasks.exceptions import EosTaskResourceAllocationError from tests.fixtures import * @@ -23,25 +24,24 @@ async def test_request_task_execution( experiment_manager, experiment_graph, ): - await experiment_manager.create_experiment("water_purification", "water_purification") + await experiment_manager.create_experiment( + ExperimentDefinition(type="water_purification", id="water_purification") + ) task_config = experiment_graph.get_task_config("mixing") task_config.parameters["time"] = 5 + task_config.devices = [TaskDeviceConfig(lab_id="small_lab", id="magnetic_mixer")] + task_definition = TaskDefinition.from_config(task_config, "water_purification") - task_parameters = TaskExecutionParameters( - experiment_id="water_purification", - devices=[TaskDeviceConfig(lab_id="small_lab", id="magnetic_mixer")], - task_config=task_config, - ) - task_output_parameters, _, _ = await task_executor.request_task_execution(task_parameters) + task_output_parameters, _, _ = await task_executor.request_task_execution(task_definition) assert task_output_parameters["mixing_time"] == 5 - task_parameters.task_config.id = "mixing2" - task_output_parameters, _, _ = await task_executor.request_task_execution(task_parameters) + task_definition.id = "mixing2" + task_output_parameters, _, _ = await task_executor.request_task_execution(task_definition) assert task_output_parameters["mixing_time"] == 5 - task_parameters.task_config.id = "mixing3" - task_output_parameters, _, _ = await task_executor.request_task_execution(task_parameters) + task_definition.id = "mixing3" + task_output_parameters, _, _ = await task_executor.request_task_execution(task_definition) assert task_output_parameters["mixing_time"] == 5 @pytest.mark.asyncio @@ -59,24 +59,26 @@ async def test_request_task_execution_resource_request_timeout( active_request = await resource_allocation_manager.request_resources(request, lambda requests: None) await resource_allocation_manager.process_active_requests() - await experiment_manager.create_experiment("water_purification", "water_purification") + await experiment_manager.create_experiment( + ExperimentDefinition(type="water_purification", id="water_purification") + ) task_config = experiment_graph.get_task_config("mixing") task_config.parameters["time"] = 5 - task_parameters = TaskExecutionParameters( - experiment_id="water_purification", - devices=[TaskDeviceConfig(lab_id="small_lab", id="magnetic_mixer")], - task_config=task_config, - resource_allocation_timeout=1, - ) + task_config.devices = [TaskDeviceConfig(lab_id="small_lab", id="magnetic_mixer")] + task_definition = TaskDefinition.from_config(task_config, "water_purification") + task_definition.resource_allocation_timeout = 1 + with pytest.raises(EosTaskResourceAllocationError): - await task_executor.request_task_execution(task_parameters) + await task_executor.request_task_execution(task_definition) await resource_allocation_manager.release_resources(active_request) @pytest.mark.asyncio async def test_request_task_cancellation(self, task_executor, experiment_manager): - await experiment_manager.create_experiment("water_purification", "water_purification") + await experiment_manager.create_experiment( + ExperimentDefinition(type="water_purification", id="water_purification") + ) sleep_config = TaskConfig( id="sleep_task", @@ -84,11 +86,7 @@ async def test_request_task_cancellation(self, task_executor, experiment_manager devices=[TaskDeviceConfig(lab_id="small_lab", id="general_computer")], parameters={"sleep_time": 5}, # 5 seconds to ensure it's still running when we cancel ) - task_parameters = TaskExecutionParameters( - experiment_id="water_purification", - task_config=sleep_config, - ) - + task_parameters = TaskDefinition.from_config(sleep_config, "water_purification") async def run_task(): return await task_executor.request_task_execution(task_parameters) @@ -98,12 +96,7 @@ async def cancel_task(): assert task_executor._active_tasks == {"water_purification": {"sleep_task": task_parameters}} await task_executor.request_task_cancellation(task_parameters.experiment_id, task_parameters.task_config.id) - # Use asyncio.gather to run both coroutines concurrently - task_result, _ = await asyncio.gather( - run_task(), - cancel_task(), - return_exceptions=True # This allows us to catch any exceptions - ) + task_result, _ = await asyncio.gather(run_task(), cancel_task(), return_exceptions=True) # Check if the task was cancelled assert task_executor._active_tasks == {} diff --git a/tests/tasks/test_task_manager.py b/tests/tasks/test_task_manager.py new file mode 100644 index 0000000..17e5e86 --- /dev/null +++ b/tests/tasks/test_task_manager.py @@ -0,0 +1,145 @@ +from eos.experiments.entities.experiment import ExperimentDefinition +from eos.tasks.entities.task import TaskStatus, TaskOutput, TaskDefinition +from eos.tasks.exceptions import EosTaskStateError, EosTaskExistsError +from tests.fixtures import * + +EXPERIMENT_TYPE = "water_purification" + + +@pytest.fixture +async def experiment_manager(configuration_manager, db_interface): + experiment_manager = ExperimentManager(configuration_manager, db_interface) + await experiment_manager.initialize(db_interface) + await experiment_manager.create_experiment(ExperimentDefinition(type=EXPERIMENT_TYPE, id=EXPERIMENT_TYPE)) + return experiment_manager + + +@pytest.mark.parametrize("setup_lab_experiment", [("small_lab", "water_purification")], indirect=True) +class TestTaskManager: + @pytest.mark.asyncio + async def test_create_task(self, task_manager, experiment_manager): + await task_manager.create_task( + TaskDefinition(id="mixing", type="Magnetic Mixing", experiment_id=EXPERIMENT_TYPE) + ) + + task = await task_manager.get_task(EXPERIMENT_TYPE, "mixing") + assert task.id == "mixing" + assert task.type == "Magnetic Mixing" + + @pytest.mark.asyncio + async def test_create_task_nonexistent_type(self, task_manager, experiment_manager): + with pytest.raises(EosTaskStateError): + await task_manager.create_task( + TaskDefinition(id="nonexistent_task", type="Nonexistent", experiment_id=EXPERIMENT_TYPE) + ) + + @pytest.mark.asyncio + async def test_create_existing_task(self, task_manager, experiment_manager): + task_def = TaskDefinition(id="mixing", type="Magnetic Mixing", experiment_id=EXPERIMENT_TYPE) + await task_manager.create_task(task_def) + + with pytest.raises(EosTaskExistsError): + await task_manager.create_task(task_def) + + @pytest.mark.asyncio + async def test_delete_task(self, task_manager): + await task_manager.create_task( + TaskDefinition(id="mixing", type="Magnetic Mixing", experiment_id=EXPERIMENT_TYPE) + ) + await task_manager.delete_task(EXPERIMENT_TYPE, "mixing") + assert await task_manager.get_task(EXPERIMENT_TYPE, "mixing") is None + + @pytest.mark.asyncio + async def test_delete_nonexistent_task(self, task_manager, experiment_manager): + with pytest.raises(EosTaskStateError): + await task_manager.create_task( + TaskDefinition(id="nonexistent_task", type="Nonexistent", experiment_id=EXPERIMENT_TYPE) + ) + await task_manager.delete_task(EXPERIMENT_TYPE, "nonexistent_task") + + @pytest.mark.asyncio + async def test_get_all_tasks_by_status(self, task_manager, experiment_manager): + await task_manager.create_task( + TaskDefinition(id="mixing", type="Magnetic Mixing", experiment_id=EXPERIMENT_TYPE) + ) + await task_manager.create_task( + TaskDefinition(id="purification", type="Purification", experiment_id=EXPERIMENT_TYPE) + ) + + await task_manager.start_task(EXPERIMENT_TYPE, "mixing") + await task_manager.complete_task(EXPERIMENT_TYPE, "purification") + + assert len(await task_manager.get_tasks(experiment_id=EXPERIMENT_TYPE, status=TaskStatus.RUNNING.value)) == 1 + assert len(await task_manager.get_tasks(experiment_id=EXPERIMENT_TYPE, status=TaskStatus.COMPLETED.value)) == 1 + + @pytest.mark.asyncio + async def test_set_task_status(self, task_manager, experiment_manager): + await task_manager.create_task( + TaskDefinition(id="mixing", type="Magnetic Mixing", experiment_id=EXPERIMENT_TYPE) + ) + task = await task_manager.get_task(EXPERIMENT_TYPE, "mixing") + assert task.status == TaskStatus.CREATED + + await task_manager.start_task(EXPERIMENT_TYPE, "mixing") + task = await task_manager.get_task(EXPERIMENT_TYPE, "mixing") + assert task.status == TaskStatus.RUNNING + + await task_manager.complete_task(EXPERIMENT_TYPE, "mixing") + task = await task_manager.get_task(EXPERIMENT_TYPE, "mixing") + assert task.status == TaskStatus.COMPLETED + + @pytest.mark.asyncio + async def test_set_task_status_nonexistent_task(self, task_manager, experiment_manager): + with pytest.raises(EosTaskStateError): + await task_manager.start_task(EXPERIMENT_TYPE, "nonexistent_task") + + @pytest.mark.asyncio + async def test_start_task(self, task_manager, experiment_manager): + await task_manager.create_task( + TaskDefinition(id="mixing", type="Magnetic Mixing", experiment_id=EXPERIMENT_TYPE) + ) + + await task_manager.start_task(EXPERIMENT_TYPE, "mixing") + assert "mixing" in await experiment_manager.get_running_tasks(EXPERIMENT_TYPE) + + @pytest.mark.asyncio + async def test_start_task_nonexistent_experiment(self, task_manager, experiment_manager): + with pytest.raises(EosTaskStateError): + await task_manager.start_task(EXPERIMENT_TYPE, "nonexistent_task") + + @pytest.mark.asyncio + async def test_complete_task(self, task_manager, experiment_manager): + await task_manager.create_task( + TaskDefinition(id="mixing", type="Magnetic Mixing", experiment_id=EXPERIMENT_TYPE) + ) + await task_manager.start_task(EXPERIMENT_TYPE, "mixing") + await task_manager.complete_task(EXPERIMENT_TYPE, "mixing") + assert "mixing" not in await experiment_manager.get_running_tasks(EXPERIMENT_TYPE) + assert "mixing" in await experiment_manager.get_completed_tasks(EXPERIMENT_TYPE) + + @pytest.mark.asyncio + async def test_complete_task_nonexistent_experiment(self, task_manager, experiment_manager): + with pytest.raises(EosTaskStateError): + await task_manager.complete_task(EXPERIMENT_TYPE, "nonexistent_task") + + @pytest.mark.asyncio + async def test_add_task_output(self, task_manager): + await task_manager.create_task( + TaskDefinition(id="mixing", type="Magnetic Mixing", experiment_id=EXPERIMENT_TYPE) + ) + + task_output = TaskOutput( + parameters={"x": 5}, + file_names=["file"], + ) + await task_manager.add_task_output(EXPERIMENT_TYPE, "mixing", task_output) + task_manager.add_task_output_file(EXPERIMENT_TYPE, "mixing", "file", b"file_data") + + output = await task_manager.get_task_output(experiment_id=EXPERIMENT_TYPE, task_id="mixing") + assert output.parameters == {"x": 5} + assert output.file_names == ["file"] + + output_file = task_manager.get_task_output_file( + experiment_id=EXPERIMENT_TYPE, task_id="mixing", file_name="file" + ) + assert output_file == b"file_data" diff --git a/tests/test_task_input_parameter_validator.py b/tests/test_task_input_parameter_validator.py deleted file mode 100644 index 75b1d29..0000000 --- a/tests/test_task_input_parameter_validator.py +++ /dev/null @@ -1,129 +0,0 @@ -import pytest -from omegaconf import DictConfig - -from eos.configuration.entities.parameters import ParameterType -from eos.configuration.entities.task import TaskConfig -from eos.configuration.entities.task_specification import TaskSpecification -from eos.tasks.exceptions import EosTaskValidationError -from eos.tasks.task_input_parameter_validator import TaskInputParameterValidator - - -class TestTaskInputParameterValidator: - @pytest.fixture - def task_spec(self): - return TaskSpecification( - type="test_task", - description="A test task", - input_parameters={ - "integer_param": DictConfig( - {"type": "integer", "unit": "n/a", "description": "An integer parameter", "min": 0, "max": 100} - ), - "decimal_param": DictConfig( - {"type": "decimal", "unit": "n/a", "description": "A float parameter", "min": 0.0, "max": 1.0} - ), - "string_param": DictConfig({"type": "string", "description": "A string parameter"}), - "boolean_param": DictConfig({"type": "boolean", "value": False, "description": "A boolean parameter"}), - "list_param": DictConfig( - {"type": "list", "description": "A list parameter", "element_type": "integer", "length": 3} - ), - "dictionary_param": DictConfig({"type": "dictionary", "description": "A dictionary parameter"}), - "choice_param": DictConfig( - {"type": "choice", "value": "A", "description": "A choice parameter", "choices": ["A", "B", "C"]} - ), - }, - ) - - @pytest.fixture - def task_config(self, task_spec): - return TaskConfig( - id="test_task_1", - type="test_task", - parameters={ - "integer_param": 50, - "decimal_param": 0.5, - "string_param": "test", - "boolean_param": True, - "list_param": [1, 2, 3], - "dictionary_param": {"key": "value"}, - "choice_param": "A", - }, - ) - - @pytest.fixture - def validator(self, task_config, task_spec): - return TaskInputParameterValidator(task_config, task_spec) - - def test_valid_input_parameters(self, validator): - validator.validate_input_parameters() # Should not raise any exceptions - - @pytest.mark.parametrize( - ("param_name", "invalid_value"), - [ - ("integer_param", "not_an_int"), - ("decimal_param", "not_a_float"), - ("boolean_param", "not_a_bool"), - ("list_param", "not_a_list"), - ("dictionary_param", "not_a_dict"), - ("choice_param", "D"), - ], - ) - def test_invalid_input_parameters(self, validator, task_config, param_name, invalid_value): - task_config.parameters[param_name] = invalid_value - with pytest.raises(EosTaskValidationError): - validator.validate_input_parameters() - - def test_missing_required_parameter(self, validator, task_config): - del task_config.parameters["integer_param"] - with pytest.raises(EosTaskValidationError): - validator.validate_input_parameters() - - def test_extra_parameter(self, validator, task_config): - task_config.parameters["extra_param"] = "extra" - with pytest.raises(EosTaskValidationError): - validator.validate_input_parameters() - - @pytest.mark.parametrize( - ("param_type", "valid_values", "invalid_values"), - [ - (ParameterType.integer, [0, 50, 100, "50"], [-1, 101, "fifty"]), - (ParameterType.decimal, [0.0, 0.5, 1.0, "0.5"], [-0.1, 1.1, "half"]), - (ParameterType.boolean, [True, False, "true", "false"], ["yes", "no", 2]), - (ParameterType.string, ["test", "123", ""], []), - (ParameterType.list, [[1, 2, 3], [1, 2, 62]], [[1, 2], [1, 2, 3, 4], "not_a_list"]), - (ParameterType.dictionary, [{"key": "value"}, {}], ["not_a_dict", [1, 2, 3]]), - (ParameterType.choice, ["A", "B", "C"], ["D", 1, True]), - ], - ) - def test_parameter_type_conversion( - self, validator, task_config, task_spec, param_type, valid_values, invalid_values - ): - param_name = f"{param_type.value}_param" - task_spec.input_parameters[param_name]["type"] = param_type.value - if param_type == ParameterType.choice: - task_spec.input_parameters[param_name]["choices"] = ["A", "B", "C"] - elif param_type == ParameterType.list: - task_spec.input_parameters[param_name]["element_type"] = "integer" - task_spec.input_parameters[param_name]["length"] = 3 - - for valid_value in valid_values: - task_config.parameters[param_name] = valid_value - validator.validate_input_parameters() # Should not raise any exceptions - - for invalid_value in invalid_values: - task_config.parameters[param_name] = invalid_value - with pytest.raises(EosTaskValidationError): - validator.validate_input_parameters() - - @pytest.mark.parametrize( - ("param_name", "invalid_value", "expected_error"), - [ - ("integer_param", "$.some_reference", EosTaskValidationError), - ("integer_param", "eos_dynamic", EosTaskValidationError), - ("integer_param", 150, EosTaskValidationError), - ("list_param", [1, 2, 3, 4], EosTaskValidationError), - ], - ) - def test_specific_validation_cases(self, validator, task_config, param_name, invalid_value, expected_error): - task_config.parameters[param_name] = invalid_value - with pytest.raises(expected_error): - validator.validate_input_parameters() diff --git a/tests/test_task_manager.py b/tests/test_task_manager.py deleted file mode 100644 index 6eb24c0..0000000 --- a/tests/test_task_manager.py +++ /dev/null @@ -1,129 +0,0 @@ -from eos.tasks.entities.task import TaskStatus, TaskOutput -from eos.tasks.exceptions import EosTaskStateError, EosTaskExistsError -from tests.fixtures import * - -EXPERIMENT_ID = "water_purification" - - -@pytest.fixture -async def experiment_manager(configuration_manager, db_interface): - experiment_manager = ExperimentManager(configuration_manager, db_interface) - await experiment_manager.initialize(db_interface) - await experiment_manager.create_experiment(EXPERIMENT_ID, "water_purification") - return experiment_manager - - -@pytest.mark.parametrize("setup_lab_experiment", [("small_lab", "water_purification")], indirect=True) -class TestTaskManager: - @pytest.mark.asyncio - async def test_create_task(self, task_manager, experiment_manager): - await task_manager.create_task(EXPERIMENT_ID, "mixing", "Magnetic Mixing", []) - - task = await task_manager.get_task(EXPERIMENT_ID, "mixing") - assert task.id == "mixing" - assert task.type == "Magnetic Mixing" - - @pytest.mark.asyncio - async def test_create_task_nonexistent(self, task_manager, experiment_manager): - with pytest.raises(EosTaskStateError): - await task_manager.create_task(EXPERIMENT_ID, "nonexistent", "nonexistent", []) - - @pytest.mark.asyncio - async def test_create_task_nonexistent_task_type(self, task_manager, experiment_manager): - with pytest.raises(EosTaskStateError): - await task_manager.create_task(EXPERIMENT_ID, "nonexistent_task", "Nonexistent", []) - - @pytest.mark.asyncio - async def test_create_existing_task(self, task_manager, experiment_manager): - await task_manager.create_task(EXPERIMENT_ID, "mixing", "Magnetic Mixing", []) - - with pytest.raises(EosTaskExistsError): - await task_manager.create_task(EXPERIMENT_ID, "mixing", "Magnetic Mixing", []) - - @pytest.mark.asyncio - async def test_delete_task(self, task_manager): - await task_manager.create_task(EXPERIMENT_ID, "mixing", "Magnetic Mixing", []) - - await task_manager.delete_task(EXPERIMENT_ID, "mixing") - - assert await task_manager.get_task(EXPERIMENT_ID, "mixing") is None - - @pytest.mark.asyncio - async def test_delete_nonexistent_task(self, task_manager, experiment_manager): - with pytest.raises(EosTaskStateError): - await task_manager.delete_task(EXPERIMENT_ID, "nonexistent_task") - - @pytest.mark.asyncio - async def test_get_all_tasks_by_status(self, task_manager, experiment_manager): - await task_manager.create_task(EXPERIMENT_ID, "mixing", "Magnetic Mixing", []) - await task_manager.create_task(EXPERIMENT_ID, "purification", "Purification", []) - - await task_manager.start_task(EXPERIMENT_ID, "mixing") - await task_manager.complete_task(EXPERIMENT_ID, "purification") - - assert len(await task_manager.get_tasks(experiment_id=EXPERIMENT_ID, status=TaskStatus.RUNNING.value)) == 1 - assert len(await task_manager.get_tasks(experiment_id=EXPERIMENT_ID, status=TaskStatus.COMPLETED.value)) == 1 - - @pytest.mark.asyncio - async def test_set_task_status(self, task_manager, experiment_manager): - await task_manager.create_task(EXPERIMENT_ID, "mixing", "Magnetic Mixing", []) - task = await task_manager.get_task(EXPERIMENT_ID, "mixing") - assert task.status == TaskStatus.CREATED - - await task_manager.start_task(EXPERIMENT_ID, "mixing") - task = await task_manager.get_task(EXPERIMENT_ID, "mixing") - assert task.status == TaskStatus.RUNNING - - await task_manager.complete_task(EXPERIMENT_ID, "mixing") - task = await task_manager.get_task(EXPERIMENT_ID, "mixing") - assert task.status == TaskStatus.COMPLETED - - @pytest.mark.asyncio - async def test_set_task_status_nonexistent_task(self, task_manager, experiment_manager): - with pytest.raises(EosTaskStateError): - await task_manager.start_task(EXPERIMENT_ID, "nonexistent_task") - - @pytest.mark.asyncio - async def test_start_task(self, task_manager, experiment_manager): - await task_manager.create_task(EXPERIMENT_ID, "mixing", "Magnetic Mixing", []) - - await task_manager.start_task(EXPERIMENT_ID, "mixing") - assert "mixing" in await experiment_manager.get_running_tasks(EXPERIMENT_ID) - - @pytest.mark.asyncio - async def test_start_task_nonexistent_experiment(self, task_manager, experiment_manager): - with pytest.raises(EosTaskStateError): - await task_manager.start_task(EXPERIMENT_ID, "nonexistent_task") - - @pytest.mark.asyncio - async def test_complete_task(self, task_manager, experiment_manager): - await task_manager.create_task(EXPERIMENT_ID, "mixing", "Magnetic Mixing", []) - await task_manager.start_task(EXPERIMENT_ID, "mixing") - await task_manager.complete_task(EXPERIMENT_ID, "mixing") - assert "mixing" not in await experiment_manager.get_running_tasks(EXPERIMENT_ID) - assert "mixing" in await experiment_manager.get_completed_tasks(EXPERIMENT_ID) - - @pytest.mark.asyncio - async def test_complete_task_nonexistent_experiment(self, task_manager, experiment_manager): - with pytest.raises(EosTaskStateError): - await task_manager.complete_task(EXPERIMENT_ID, "nonexistent_task") - - @pytest.mark.asyncio - async def test_add_task_output(self, task_manager): - await task_manager.create_task(EXPERIMENT_ID, "mixing", "Magnetic Mixing", []) - - task_output = TaskOutput( - experiment_id=EXPERIMENT_ID, - task_id="mixing", - parameters={"x": 5}, - file_names=["file"], - ) - await task_manager.add_task_output(EXPERIMENT_ID, "mixing", task_output) - task_manager.add_task_output_file(EXPERIMENT_ID, "mixing", "file", b"file_data") - - output = await task_manager.get_task_output(experiment_id=EXPERIMENT_ID, task_id="mixing") - assert output.parameters == {"x": 5} - assert output.file_names == ["file"] - - output_file = task_manager.get_task_output_file(experiment_id=EXPERIMENT_ID, task_id="mixing", file_name="file") - assert output_file == b"file_data" diff --git a/tests/test_task_specification_validation.py b/tests/test_task_specification_validation.py deleted file mode 100644 index 8561c3a..0000000 --- a/tests/test_task_specification_validation.py +++ /dev/null @@ -1,262 +0,0 @@ -from eos.configuration.entities.parameters import ( - ParameterFactory, - ParameterType, -) -from eos.configuration.entities.task_specification import ( - TaskSpecificationOutputParameter, - TaskSpecification, -) -from eos.configuration.exceptions import EosConfigurationError -from tests.fixtures import * - - -class TestTaskSpecifications: - def test_invalid_parameter_type(self): - with pytest.raises(ValueError): - ParameterFactory.create_parameter( - "invalid_type", - value=120, - description="Duration of evaporation in seconds.", - ) - - def test_numeric_parameter_unit_not_specified(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.integer, - unit="", - value=120, - min=60, - description="Duration of evaporation in seconds.", - ) - - def test_numeric_parameter_value_not_numeric(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.integer, - unit="sec", - value="not_a_number", - min=60, - description="Duration of evaporation in seconds.", - ) - - def test_numeric_parameter_min_greater_than_max(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.integer, - unit="sec", - value=120, - min=300, - max=60, - description="Duration of evaporation in seconds.", - ) - - def test_numeric_parameter_out_of_range_min(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.integer, - unit="sec", - value=5, - min=60, - max=300, - description="Duration of evaporation in seconds.", - ) - - def test_numeric_parameter_out_of_range_max(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.integer, - unit="sec", - value=100, - min=0, - max=80, - description="Duration of evaporation in seconds.", - ) - - def test_boolean_parameter_invalid_value(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.boolean, - value="not_a_boolean", - description="Whether to sparge the evaporation vessel with nitrogen.", - ) - - def test_choice_parameter_choices_not_specified(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.choice, - choices=[], - value="method1", - description="Method to use", - ) - - def test_choice_parameter_no_value(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.choice, - choices=["method1", "method2"], - value=None, - description="Method to use", - ) - - def test_choice_parameter_invalid_value(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.choice, - choices=["method1", "method2"], - value="invalid_method", - description="Method to use", - ) - - def test_list_parameter_invalid_element_type(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.list, - length=3, - element_type="invalid_type", - value=[1, 2, 3], - description="List of elements", - ) - - def test_list_parameter_nested_list(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.list, - length=3, - element_type="list", - value=[[1], [2], [3]], - description="List of elements", - ) - - def test_list_parameter_invalid_value(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.list, - length=3, - element_type="integer", - value=4, - description="List of elements", - ) - - def test_list_parameter_elements_not_same_type(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.list, - length=3, - element_type="integer", - value=[1, True, "3"], - description="List of elements", - ) - - def test_list_parameter_invalid_value_element_size(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.list, - length=3, - element_type="integer", - value=[1, 2], - description="List of elements", - ) - - def test_list_parameter_invalid_value_element_min(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.list, - length=3, - element_type="integer", - value=[1, 2, 3], - min=[2, 2, "INVALID"], - description="List of elements", - ) - - def test_list_parameter_invalid_value_element_max(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.list, - length=3, - element_type="integer", - value=[1, 2, 3], - max=[2, 2, "INVALID"], - description="List of elements", - ) - - def test_list_parameter_value_less_than_min(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.list, - length=3, - element_type="integer", - value=[2, 2, 2], - min=[2, 2, 3], - description="List of elements", - ) - - def test_list_parameter_value_greater_than_max(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.list, - length=3, - element_type="integer", - value=[2, 2, 2], - max=[2, 2, 1], - description="List of elements", - ) - - def test_list_parameter_invalid_min_max_size(self): - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.list, - length=3, - element_type="integer", - value=[2, 2, 2], - min=[2, 2], - description="List of elements", - ) - - with pytest.raises(EosConfigurationError): - ParameterFactory.create_parameter( - ParameterType.list, - length=3, - element_type="integer", - value=[2, 2, 2], - max=[2, 2], - description="List of elements", - ) - - def test_parameter_factory_invalid_type(self): - with pytest.raises(ValueError): - ParameterFactory.create_parameter( - "invalid_type", - value=120, - description="Duration of evaporation in seconds.", - ) - - def test_parameter_invalid_name(self, configuration_manager): - task_specs = configuration_manager.task_specs - - task_spec = task_specs.get_spec_by_type("Magnetic Mixing") - - task_spec.input_parameters["invalid_name*"] = { - "type": "integer", - "unit": "sec", - "value": 120, - "description": "Duration of evaporation in seconds.", - } - - with pytest.raises(EosConfigurationError): - TaskSpecification(**task_spec) - - def test_output_numeric_parameter_unit_not_specified(self, configuration_manager): - with pytest.raises(EosConfigurationError): - TaskSpecificationOutputParameter( - type=ParameterType.integer, - unit="", - description="Duration of evaporation in seconds.", - ) - - def test_output_non_numeric_parameter_unit_specified(self, configuration_manager): - with pytest.raises(EosConfigurationError): - TaskSpecificationOutputParameter( - type=ParameterType.boolean, - unit="sec", - description="Whether to sparge the evaporation vessel with nitrogen.", - ) diff --git a/tests/user/testing/devices/abstract_lab/DT1/device.py b/tests/user/testing/devices/abstract_lab/DT1/device.py index baaec86..7995302 100644 --- a/tests/user/testing/devices/abstract_lab/DT1/device.py +++ b/tests/user/testing/devices/abstract_lab/DT1/device.py @@ -3,8 +3,8 @@ from eos.devices.base_device import BaseDevice -class DT1Device(BaseDevice): - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: +class DT1(BaseDevice): + async def _initialize(self, init_parameters: dict[str, Any]) -> None: pass async def _cleanup(self) -> None: diff --git a/tests/user/testing/devices/abstract_lab/DT1/device.yml b/tests/user/testing/devices/abstract_lab/DT1/device.yml index 2fd54c5..9f8aee3 100644 --- a/tests/user/testing/devices/abstract_lab/DT1/device.yml +++ b/tests/user/testing/devices/abstract_lab/DT1/device.yml @@ -1,2 +1,2 @@ type: DT1 -description: An abstract device for testing +desc: An abstract device for testing diff --git a/tests/user/testing/devices/abstract_lab/DT2/device.py b/tests/user/testing/devices/abstract_lab/DT2/device.py index ca9c76b..91c8974 100644 --- a/tests/user/testing/devices/abstract_lab/DT2/device.py +++ b/tests/user/testing/devices/abstract_lab/DT2/device.py @@ -3,8 +3,8 @@ from eos.devices.base_device import BaseDevice -class DT2Device(BaseDevice): - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: +class DT2(BaseDevice): + async def _initialize(self, init_parameters: dict[str, Any]) -> None: pass async def _cleanup(self) -> None: diff --git a/tests/user/testing/devices/abstract_lab/DT2/device.yml b/tests/user/testing/devices/abstract_lab/DT2/device.yml index 758cf63..ea6a084 100644 --- a/tests/user/testing/devices/abstract_lab/DT2/device.yml +++ b/tests/user/testing/devices/abstract_lab/DT2/device.yml @@ -1,2 +1,2 @@ type: DT2 -description: An abstract device for testing +desc: An abstract device for testing diff --git a/tests/user/testing/devices/abstract_lab/DT3/device.py b/tests/user/testing/devices/abstract_lab/DT3/device.py index ee3f5d0..866b378 100644 --- a/tests/user/testing/devices/abstract_lab/DT3/device.py +++ b/tests/user/testing/devices/abstract_lab/DT3/device.py @@ -3,8 +3,8 @@ from eos.devices.base_device import BaseDevice -class DT3Device(BaseDevice): - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: +class DT3(BaseDevice): + async def _initialize(self, init_parameters: dict[str, Any]) -> None: pass async def _cleanup(self) -> None: diff --git a/tests/user/testing/devices/abstract_lab/DT3/device.yml b/tests/user/testing/devices/abstract_lab/DT3/device.yml index 52e952d..703bd56 100644 --- a/tests/user/testing/devices/abstract_lab/DT3/device.yml +++ b/tests/user/testing/devices/abstract_lab/DT3/device.yml @@ -1,2 +1,2 @@ type: DT3 -description: An abstract device for testing +desc: An abstract device for testing diff --git a/tests/user/testing/devices/abstract_lab/DT4/device.py b/tests/user/testing/devices/abstract_lab/DT4/device.py index 35e2d98..4bd705f 100644 --- a/tests/user/testing/devices/abstract_lab/DT4/device.py +++ b/tests/user/testing/devices/abstract_lab/DT4/device.py @@ -3,8 +3,8 @@ from eos.devices.base_device import BaseDevice -class DT4Device(BaseDevice): - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: +class DT4(BaseDevice): + async def _initialize(self, init_parameters: dict[str, Any]) -> None: pass async def _cleanup(self) -> None: diff --git a/tests/user/testing/devices/abstract_lab/DT4/device.yml b/tests/user/testing/devices/abstract_lab/DT4/device.yml index 2257574..5857457 100644 --- a/tests/user/testing/devices/abstract_lab/DT4/device.yml +++ b/tests/user/testing/devices/abstract_lab/DT4/device.yml @@ -1,2 +1,2 @@ type: DT4 -description: An abstract device for testing +desc: An abstract device for testing diff --git a/tests/user/testing/devices/abstract_lab/DT5/device.py b/tests/user/testing/devices/abstract_lab/DT5/device.py index 1e335b6..dc318dc 100644 --- a/tests/user/testing/devices/abstract_lab/DT5/device.py +++ b/tests/user/testing/devices/abstract_lab/DT5/device.py @@ -3,8 +3,8 @@ from eos.devices.base_device import BaseDevice -class DT5Device(BaseDevice): - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: +class DT5(BaseDevice): + async def _initialize(self, init_parameters: dict[str, Any]) -> None: pass async def _cleanup(self) -> None: diff --git a/tests/user/testing/devices/abstract_lab/DT5/device.yml b/tests/user/testing/devices/abstract_lab/DT5/device.yml index 47bc540..044572c 100644 --- a/tests/user/testing/devices/abstract_lab/DT5/device.yml +++ b/tests/user/testing/devices/abstract_lab/DT5/device.yml @@ -1,2 +1,2 @@ type: DT5 -description: An abstract device for testing +desc: An abstract device for testing diff --git a/tests/user/testing/devices/abstract_lab/DT6/device.py b/tests/user/testing/devices/abstract_lab/DT6/device.py index f929fff..8c67807 100644 --- a/tests/user/testing/devices/abstract_lab/DT6/device.py +++ b/tests/user/testing/devices/abstract_lab/DT6/device.py @@ -3,8 +3,8 @@ from eos.devices.base_device import BaseDevice -class DT6Device(BaseDevice): - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: +class DT6(BaseDevice): + async def _initialize(self, init_parameters: dict[str, Any]) -> None: pass async def _cleanup(self) -> None: diff --git a/tests/user/testing/devices/abstract_lab/DT6/device.yml b/tests/user/testing/devices/abstract_lab/DT6/device.yml index 32f8009..c07b115 100644 --- a/tests/user/testing/devices/abstract_lab/DT6/device.yml +++ b/tests/user/testing/devices/abstract_lab/DT6/device.yml @@ -1,2 +1,2 @@ type: DT6 -description: An abstract device for testing +desc: An abstract device for testing diff --git a/tests/user/testing/devices/multiplication_lab/analyzer/device.py b/tests/user/testing/devices/multiplication_lab/analyzer/device.py index 1542391..798d2b4 100644 --- a/tests/user/testing/devices/multiplication_lab/analyzer/device.py +++ b/tests/user/testing/devices/multiplication_lab/analyzer/device.py @@ -3,8 +3,8 @@ from eos.devices.base_device import BaseDevice -class AnalyzerDevice(BaseDevice): - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: +class Analyzer(BaseDevice): + async def _initialize(self, init_parameters: dict[str, Any]) -> None: pass async def _cleanup(self) -> None: diff --git a/tests/user/testing/devices/multiplication_lab/analyzer/device.yml b/tests/user/testing/devices/multiplication_lab/analyzer/device.yml index 42333c7..15244ea 100644 --- a/tests/user/testing/devices/multiplication_lab/analyzer/device.yml +++ b/tests/user/testing/devices/multiplication_lab/analyzer/device.yml @@ -1,2 +1,2 @@ type: analyzer -description: A device for analyzing the result of the multiplication of two numbers +desc: A device for analyzing the result of the multiplication of two numbers diff --git a/tests/user/testing/devices/multiplication_lab/multiplier/device.py b/tests/user/testing/devices/multiplication_lab/multiplier/device.py index 8ad0af9..ddce344 100644 --- a/tests/user/testing/devices/multiplication_lab/multiplier/device.py +++ b/tests/user/testing/devices/multiplication_lab/multiplier/device.py @@ -3,8 +3,8 @@ from eos.devices.base_device import BaseDevice -class MultiplierDevice(BaseDevice): - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: +class Multiplier(BaseDevice): + async def _initialize(self, init_parameters: dict[str, Any]) -> None: pass async def _cleanup(self) -> None: diff --git a/tests/user/testing/devices/multiplication_lab/multiplier/device.yml b/tests/user/testing/devices/multiplication_lab/multiplier/device.yml index efe0ee4..c8ff252 100644 --- a/tests/user/testing/devices/multiplication_lab/multiplier/device.yml +++ b/tests/user/testing/devices/multiplication_lab/multiplier/device.yml @@ -1,2 +1,2 @@ type: multiplier -description: A device for multiplying two numbers +desc: A device for multiplying two numbers diff --git a/tests/user/testing/devices/small_lab/computer/device.py b/tests/user/testing/devices/small_lab/computer/device.py index b9b145d..f9db8b7 100644 --- a/tests/user/testing/devices/small_lab/computer/device.py +++ b/tests/user/testing/devices/small_lab/computer/device.py @@ -3,8 +3,8 @@ from eos.devices.base_device import BaseDevice -class ComputerDevice(BaseDevice): - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: +class Computer(BaseDevice): + async def _initialize(self, init_parameters: dict[str, Any]) -> None: pass async def _cleanup(self) -> None: diff --git a/tests/user/testing/devices/small_lab/computer/device.yml b/tests/user/testing/devices/small_lab/computer/device.yml index 40c6f12..a749c42 100644 --- a/tests/user/testing/devices/small_lab/computer/device.yml +++ b/tests/user/testing/devices/small_lab/computer/device.yml @@ -1,2 +1,2 @@ type: computer -description: General-purpose computer +desc: General-purpose computer diff --git a/tests/user/testing/devices/small_lab/evaporator/device.py b/tests/user/testing/devices/small_lab/evaporator/device.py index e66527e..cad1bba 100644 --- a/tests/user/testing/devices/small_lab/evaporator/device.py +++ b/tests/user/testing/devices/small_lab/evaporator/device.py @@ -3,8 +3,8 @@ from eos.devices.base_device import BaseDevice -class EvaporatorDevice(BaseDevice): - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: +class Evaporator(BaseDevice): + async def _initialize(self, init_parameters: dict[str, Any]) -> None: pass async def _cleanup(self) -> None: diff --git a/tests/user/testing/devices/small_lab/evaporator/device.yml b/tests/user/testing/devices/small_lab/evaporator/device.yml index e2dd67b..7d10393 100644 --- a/tests/user/testing/devices/small_lab/evaporator/device.yml +++ b/tests/user/testing/devices/small_lab/evaporator/device.yml @@ -1,2 +1,2 @@ type: evaporator -description: Evaporator for substance purification +desc: Evaporator for substance purification diff --git a/tests/user/testing/devices/small_lab/fridge/device.py b/tests/user/testing/devices/small_lab/fridge/device.py index b436246..84fb481 100644 --- a/tests/user/testing/devices/small_lab/fridge/device.py +++ b/tests/user/testing/devices/small_lab/fridge/device.py @@ -3,8 +3,8 @@ from eos.devices.base_device import BaseDevice -class FridgeDevice(BaseDevice): - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: +class Fridge(BaseDevice): + async def _initialize(self, init_parameters: dict[str, Any]) -> None: pass async def _cleanup(self) -> None: diff --git a/tests/user/testing/devices/small_lab/fridge/device.yml b/tests/user/testing/devices/small_lab/fridge/device.yml index 8acfc81..a0ebc3c 100644 --- a/tests/user/testing/devices/small_lab/fridge/device.yml +++ b/tests/user/testing/devices/small_lab/fridge/device.yml @@ -1,2 +1,2 @@ type: fridge -description: Fridge for storing temperature-sensitive substances +desc: Fridge for storing temperature-sensitive substances diff --git a/tests/user/testing/devices/small_lab/magnetic_mixer/device.py b/tests/user/testing/devices/small_lab/magnetic_mixer/device.py index 44c1693..bfb6e01 100644 --- a/tests/user/testing/devices/small_lab/magnetic_mixer/device.py +++ b/tests/user/testing/devices/small_lab/magnetic_mixer/device.py @@ -3,8 +3,8 @@ from eos.devices.base_device import BaseDevice -class MagneticMixerDevice(BaseDevice): - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: +class MagneticMixer(BaseDevice): + async def _initialize(self, init_parameters: dict[str, Any]) -> None: pass async def _cleanup(self) -> None: diff --git a/tests/user/testing/devices/small_lab/magnetic_mixer/device.yml b/tests/user/testing/devices/small_lab/magnetic_mixer/device.yml index f3f3128..6ee115e 100644 --- a/tests/user/testing/devices/small_lab/magnetic_mixer/device.yml +++ b/tests/user/testing/devices/small_lab/magnetic_mixer/device.yml @@ -1,2 +1,2 @@ type: magnetic_mixer -description: Magnetic mixer for mixing substances +desc: Magnetic mixer for mixing substances diff --git a/tests/user/testing/experiments/abstract_experiment/experiment.yml b/tests/user/testing/experiments/abstract_experiment/experiment.yml index cac30cc..dd7b874 100644 --- a/tests/user/testing/experiments/abstract_experiment/experiment.yml +++ b/tests/user/testing/experiments/abstract_experiment/experiment.yml @@ -1,5 +1,5 @@ type: abstract_experiment -description: An abstract experiment for testing +desc: An abstract experiment for testing labs: - abstract_lab diff --git a/tests/user/testing/experiments/optimize_multiplication/experiment.yml b/tests/user/testing/experiments/optimize_multiplication/experiment.yml index 8322acb..4fe7624 100644 --- a/tests/user/testing/experiments/optimize_multiplication/experiment.yml +++ b/tests/user/testing/experiments/optimize_multiplication/experiment.yml @@ -1,5 +1,5 @@ type: optimize_multiplication -description: An experiment for finding the smallest number that when multiplied by two factors yields 1024 +desc: An experiment for finding the smallest number that when multiplied by two factors yields 1024 labs: - multiplication_lab diff --git a/tests/user/testing/experiments/water_purification/experiment.yml b/tests/user/testing/experiments/water_purification/experiment.yml index 6f791a1..daeb32c 100644 --- a/tests/user/testing/experiments/water_purification/experiment.yml +++ b/tests/user/testing/experiments/water_purification/experiment.yml @@ -1,5 +1,5 @@ type: water_purification -description: Experiment to find best parameters for purifying water using evaporation +desc: Experiment to find best parameters for purifying water using evaporation labs: - small_lab @@ -15,7 +15,7 @@ tasks: devices: - lab_id: small_lab id: magnetic_mixer - description: Magnetically mix water and salt + desc: Magnetically mix water and salt containers: beaker: 026749f8f40342b38157f9824ae2f512 @@ -28,7 +28,7 @@ tasks: devices: - lab_id: small_lab id: evaporator - description: Purification of water using evaporation + desc: Purification of water using evaporation dependencies: [ "mixing" ] containers: diff --git a/tests/user/testing/labs/abstract_lab/lab.yml b/tests/user/testing/labs/abstract_lab/lab.yml index 8032105..dfddfe2 100644 --- a/tests/user/testing/labs/abstract_lab/lab.yml +++ b/tests/user/testing/labs/abstract_lab/lab.yml @@ -1,5 +1,5 @@ type: abstract_lab -description: An abstract laboratory with abstract devices for testing +desc: An abstract laboratory with abstract devices for testing devices: D1: diff --git a/tests/user/testing/labs/multiplication_lab/lab.yml b/tests/user/testing/labs/multiplication_lab/lab.yml index c07c0d6..0380538 100644 --- a/tests/user/testing/labs/multiplication_lab/lab.yml +++ b/tests/user/testing/labs/multiplication_lab/lab.yml @@ -1,5 +1,5 @@ type: multiplication_lab -description: An abstract laboratory for testing multiplication +desc: An abstract laboratory for testing multiplication devices: multiplier: diff --git a/tests/user/testing/labs/small_lab/lab.yml b/tests/user/testing/labs/small_lab/lab.yml index 1b6bbc0..f9513b0 100644 --- a/tests/user/testing/labs/small_lab/lab.yml +++ b/tests/user/testing/labs/small_lab/lab.yml @@ -1,80 +1,80 @@ type: small_lab -description: A small laboratory for testing +desc: A small laboratory for testing locations: gc_1: - description: Gas Chromatography station 1 + desc: Gas Chromatography station 1 metadata: map_coordinates: { x: 100, y: 32, rotation: 0 } areas: injection_port: - description: Injection port for the gas chromatograph + desc: Injection port for the gas chromatograph gc_2: - description: Gas Chromatography station 2 + desc: Gas Chromatography station 2 metadata: map_coordinates: { x: 110, y: 32, rotation: 0 } areas: injection_port: - description: Injection port for the gas chromatograph + desc: Injection port for the gas chromatograph wafer_station: - description: Wafer processing station + desc: Wafer processing station metadata: map_coordinates: { x: 120, y: 32, rotation: 0 } areas: wafer_stack: - description: Wafer storage + desc: Wafer storage cartesian_robot_head: - description: Head of the cartesian robot that holds the wafer + desc: Head of the cartesian robot that holds the wafer mixing_station: - description: Station equipped with magnetic mixers for substance blending + desc: Station equipped with magnetic mixers for substance blending metadata: map_coordinates: { x: 140, y: 32, rotation: 0 } substance_shelf: - description: Storage shelf for chemical substances + desc: Storage shelf for chemical substances metadata: map_coordinates: { x: 50, y: 10, rotation: 0 } substance_fridge: - description: Refrigerated storage for temperature-sensitive substances + desc: Refrigerated storage for temperature-sensitive substances metadata: map_coordinates: { x: 60, y: 10, rotation: 0 } fetch_charging_station: - description: Charging station for the Fetch mobile manipulation robot + desc: Charging station for the Fetch mobile manipulation robot metadata: map_coordinates: { x: 10, y: 10, rotation: 0 } devices: general_computer: - description: General-purpose computer + desc: General-purpose computer type: computer location: gc_1 computer: eos_computer magnetic_mixer: - description: Mixer for substance blending + desc: Mixer for substance blending type: magnetic_mixer location: mixing_station computer: eos_computer magnetic_mixer_2: - description: Mixer for substance blending + desc: Mixer for substance blending type: magnetic_mixer location: mixing_station computer: eos_computer evaporator: - description: Evaporator for substance purification + desc: Evaporator for substance purification type: evaporator location: mixing_station computer: eos_computer substance_fridge: - description: Fridge for storing temperature-sensitive substances + desc: Fridge for storing temperature-sensitive substances type: fridge location: substance_fridge computer: eos_computer diff --git a/tests/user/testing/tasks/file_generation_task/task.py b/tests/user/testing/tasks/file_generation_task/task.py index 7052bd2..ece2831 100644 --- a/tests/user/testing/tasks/file_generation_task/task.py +++ b/tests/user/testing/tasks/file_generation_task/task.py @@ -3,7 +3,7 @@ from eos.tasks.base_task import BaseTask -class FileGenerationTask(BaseTask): +class FileGeneration(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/tests/user/testing/tasks/file_generation_task/task.yml b/tests/user/testing/tasks/file_generation_task/task.yml index 6731387..d925337 100644 --- a/tests/user/testing/tasks/file_generation_task/task.yml +++ b/tests/user/testing/tasks/file_generation_task/task.yml @@ -1,13 +1,13 @@ type: File Generation -description: Generates a file with random data. +desc: Generates a file with random data. device_types: - general_computer input_parameters: content_length: - type: integer + type: int unit: n/a value: 10 min: 0 - description: How many characters to generate in the file. + desc: How many characters to generate in the file. diff --git a/tests/user/testing/tasks/fridge_temperature_control/task.py b/tests/user/testing/tasks/fridge_temperature_control/task.py index 5c6cf26..993d7de 100644 --- a/tests/user/testing/tasks/fridge_temperature_control/task.py +++ b/tests/user/testing/tasks/fridge_temperature_control/task.py @@ -1,7 +1,7 @@ from eos.tasks.base_task import BaseTask -class FridgeTemperatureControlTask(BaseTask): +class FridgeTemperatureControl(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/tests/user/testing/tasks/fridge_temperature_control/task.yml b/tests/user/testing/tasks/fridge_temperature_control/task.yml index c74d53e..7741db0 100644 --- a/tests/user/testing/tasks/fridge_temperature_control/task.yml +++ b/tests/user/testing/tasks/fridge_temperature_control/task.yml @@ -1,13 +1,13 @@ type: Fridge Temperature Control -description: This task adjusts the temperature of a laboratory refrigerator to a specified target to ensure optimal storage conditions for substances that require precise temperature control. +desc: This task adjusts the temperature of a laboratory refrigerator to a specified target to ensure optimal storage conditions for substances that require precise temperature control. device_types: - fridge input_parameters: target_temperature: - type: integer + type: int unit: celsius min: -20 max: 10 - description: The new temperature for the fridge. + desc: The new temperature for the fridge. diff --git a/tests/user/testing/tasks/gc_analysis/task.py b/tests/user/testing/tasks/gc_analysis/task.py index eb8fa0f..fc0e4c4 100644 --- a/tests/user/testing/tasks/gc_analysis/task.py +++ b/tests/user/testing/tasks/gc_analysis/task.py @@ -1,7 +1,7 @@ from eos.tasks.base_task import BaseTask -class GcAnalysisTask(BaseTask): +class GcAnalysis(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/tests/user/testing/tasks/gc_analysis/task.yml b/tests/user/testing/tasks/gc_analysis/task.yml index 601ceac..f502cc8 100644 --- a/tests/user/testing/tasks/gc_analysis/task.yml +++ b/tests/user/testing/tasks/gc_analysis/task.yml @@ -1,50 +1,50 @@ type: GC Analysis -description: Perform gas chromatography (GC) analysis on a sample. +desc: Perform gas chromatography (GC) analysis on a sample. device_types: - gas_chromatograph input_parameters: injection_volume: - type: integer + type: int unit: ul min: 1 max: 10 - description: The volume of the sample to be injected into the GC system. + desc: The volume of the sample to be injected into the GC system. oven_temperature_initial: - type: integer + type: int unit: C min: 40 max: 100 - description: The initial temperature of the GC oven. + desc: The initial temperature of the GC oven. oven_temperature_final: - type: integer + type: int unit: C min: 150 max: 300 - description: The final temperature of the GC oven, should be higher than the initial temperature. + desc: The final temperature of the GC oven, should be higher than the initial temperature. temperature_ramp_rate: - type: integer + type: int unit: C/min min: 1 max: 20 - description: The rate at which the oven temperature increases. + desc: The rate at which the oven temperature increases. carrier_gas: - type: string - description: The type of carrier gas used in the GC analysis, e.g., Helium. + type: str + desc: The type of carrier gas used in the GC analysis, e.g., Helium. flow_rate: - type: integer + type: int unit: ml/min min: 1 max: 5 - description: The flow rate of the carrier gas. + desc: The flow rate of the carrier gas. output_parameters: result_folder_path: - type: string - description: The file path to the folder containing the results of the GC analysis. + type: str + desc: The file path to the folder containing the results of the GC analysis. diff --git a/tests/user/testing/tasks/gc_injection/task.py b/tests/user/testing/tasks/gc_injection/task.py index 98c5b7e..dceef94 100644 --- a/tests/user/testing/tasks/gc_injection/task.py +++ b/tests/user/testing/tasks/gc_injection/task.py @@ -1,7 +1,7 @@ from eos.tasks.base_task import BaseTask -class GcInjectionTask(BaseTask): +class GcInjection(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/tests/user/testing/tasks/gc_injection/task.yml b/tests/user/testing/tasks/gc_injection/task.yml index 6953701..f8df651 100644 --- a/tests/user/testing/tasks/gc_injection/task.yml +++ b/tests/user/testing/tasks/gc_injection/task.yml @@ -1,10 +1,10 @@ type: GC Injection -description: This task involves the use of a mobile robot to perform sample injection in a GC. +desc: This task involves the use of a mobile robot to perform sample injection in a GC. device_types: - mobile_manipulation_robot input_parameters: gc_target_name: - type: string - description: The name of the GC target as defined in the GC injection task configuration YAML file. + type: str + desc: The name of the GC target as defined in the GC injection task configuration YAML file. diff --git a/tests/user/testing/tasks/hplc_analysis/task.py b/tests/user/testing/tasks/hplc_analysis/task.py index a898eae..e28f2dc 100644 --- a/tests/user/testing/tasks/hplc_analysis/task.py +++ b/tests/user/testing/tasks/hplc_analysis/task.py @@ -1,7 +1,7 @@ from eos.tasks.base_task import BaseTask -class HplcAnalysisTask(BaseTask): +class HplcAnalysis(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/tests/user/testing/tasks/hplc_analysis/task.yml b/tests/user/testing/tasks/hplc_analysis/task.yml index 7ba9c26..162f34b 100644 --- a/tests/user/testing/tasks/hplc_analysis/task.yml +++ b/tests/user/testing/tasks/hplc_analysis/task.yml @@ -1,6 +1,6 @@ type: HPLC Analysis -description: This task performs High-Performance Liquid Chromatography (HPLC) analysis on a sample to separate, identify, and quantify its chemical components. +desc: This task performs High-Performance Liquid Chromatography (HPLC) analysis on a sample to separate, identify, and quantify its chemical components. device_types: - high_performance_liquid_chromatograph @@ -17,52 +17,52 @@ input_parameters: - C18 - C8 - HILIC - description: The type of HPLC column to be used for separation. + desc: The type of HPLC column to be used for separation. mobile_phase_a: - type: string + type: str value: water - description: The first mobile phase component (usually an aqueous solvent). + desc: The first mobile phase component (usually an aqueous solvent). mobile_phase_b: - type: string + type: str value: acetonitrile - description: The second mobile phase component (usually an organic solvent). + desc: The second mobile phase component (usually an organic solvent). gradient: - type: string + type: str value: "0 min: 5%B, 10 min: 95%B, 12 min: 95%B, 13 min: 5%B, 15 min: 5%B" - description: The gradient elution profile, specifying the change in mobile phase composition over time. + desc: The gradient elution profile, specifying the change in mobile phase composition over time. flow_rate: - type: decimal + type: float unit: ml/min value: 1.0 min: 0.1 max: 2.0 - description: The flow rate of the mobile phase through the HPLC column. + desc: The flow rate of the mobile phase through the HPLC column. injection_volume: - type: integer + type: int unit: uL value: 10 min: 1 max: 100 - description: The volume of sample injected into the HPLC system. + desc: The volume of sample injected into the HPLC system. detection_wavelength: - type: integer + type: int unit: nm value: 254 min: 190 max: 800 - description: The wavelength at which the detector is set to monitor the eluting compounds. + desc: The wavelength at which the detector is set to monitor the eluting compounds. output_parameters: peak_table_file_path: - type: string - description: Path to output file summarizing the detected peaks, their retention times, and areas. + type: str + desc: Path to output file summarizing the detected peaks, their retention times, and areas. chromatogram_file_path: - type: string - description: Path to output file of chromatogram data representing the detector response over time. \ No newline at end of file + type: str + desc: Path to output file of chromatogram data representing the detector response over time. \ No newline at end of file diff --git a/tests/user/testing/tasks/magnetic_mixing/task.py b/tests/user/testing/tasks/magnetic_mixing/task.py index ac8fdee..3d3766f 100644 --- a/tests/user/testing/tasks/magnetic_mixing/task.py +++ b/tests/user/testing/tasks/magnetic_mixing/task.py @@ -1,7 +1,7 @@ from eos.tasks.base_task import BaseTask -class MagneticMixingTask(BaseTask): +class MagneticMixing(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/tests/user/testing/tasks/magnetic_mixing/task.yml b/tests/user/testing/tasks/magnetic_mixing/task.yml index 1afae50..2a58508 100644 --- a/tests/user/testing/tasks/magnetic_mixing/task.yml +++ b/tests/user/testing/tasks/magnetic_mixing/task.yml @@ -1,5 +1,5 @@ type: Magnetic Mixing -description: This task involves the use of a magnetic stirrer to blend multiple substances into a homogeneous mixture. Both solid and liquid forms can be mixed to produce a liquid output. +desc: This task involves the use of a magnetic stirrer to blend multiple substances into a homogeneous mixture. Both solid and liquid forms can be mixed to produce a liquid output. device_types: - magnetic_mixer @@ -10,22 +10,22 @@ input_containers: input_parameters: speed: - type: integer + type: int unit: rpm value: 10 min: 1 max: 100 - description: The speed at which the magnetic stirrer operates, measured in revolutions per minute (rpm). + desc: The speed at which the magnetic stirrer operates, measured in revolutions per minute (rpm). time: - type: integer + type: int unit: sec value: 360 min: 3 max: 720 - description: The total time duration for which the substances will be mixed, measured in seconds. + desc: The total time duration for which the substances will be mixed, measured in seconds. output_parameters: mixing_time: - type: integer + type: int unit: sec - description: The total time duration for which the substances were mixed, measured in seconds. \ No newline at end of file + desc: The total time duration for which the substances were mixed, measured in seconds. \ No newline at end of file diff --git a/tests/user/testing/tasks/multiplication_lab/compute_multiplication_objective/task.py b/tests/user/testing/tasks/multiplication_lab/compute_multiplication_objective/task.py index 0ea7e3a..9cfcc36 100644 --- a/tests/user/testing/tasks/multiplication_lab/compute_multiplication_objective/task.py +++ b/tests/user/testing/tasks/multiplication_lab/compute_multiplication_objective/task.py @@ -1,7 +1,7 @@ from eos.tasks.base_task import BaseTask -class ComputeMultiplicationObjectiveTask(BaseTask): +class ComputeMultiplicationObjective(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/tests/user/testing/tasks/multiplication_lab/compute_multiplication_objective/task.yml b/tests/user/testing/tasks/multiplication_lab/compute_multiplication_objective/task.yml index 032e186..3402310 100644 --- a/tests/user/testing/tasks/multiplication_lab/compute_multiplication_objective/task.yml +++ b/tests/user/testing/tasks/multiplication_lab/compute_multiplication_objective/task.yml @@ -1,21 +1,21 @@ type: Compute Multiplication Objective -description: This task computes the objective for the optimize_multiplication experiment. +desc: This task computes the objective for the optimize_multiplication experiment. device_types: - analyzer input_parameters: number: - type: integer + type: int unit: none - description: The number to multiply. + desc: The number to multiply. product: - type: integer + type: int unit: none - description: The final product. + desc: The final product. output_parameters: objective: - type: integer + type: int unit: none - description: The objective for the find_smallest_number experiment. + desc: The objective for the find_smallest_number experiment. diff --git a/tests/user/testing/tasks/multiplication_lab/multiplication/task.py b/tests/user/testing/tasks/multiplication_lab/multiplication/task.py index cdfa745..d98307b 100644 --- a/tests/user/testing/tasks/multiplication_lab/multiplication/task.py +++ b/tests/user/testing/tasks/multiplication_lab/multiplication/task.py @@ -1,7 +1,7 @@ from eos.tasks.base_task import BaseTask -class MultiplicationTask(BaseTask): +class Multiplication(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/tests/user/testing/tasks/multiplication_lab/multiplication/task.yml b/tests/user/testing/tasks/multiplication_lab/multiplication/task.yml index 163edd1..ab9e968 100644 --- a/tests/user/testing/tasks/multiplication_lab/multiplication/task.yml +++ b/tests/user/testing/tasks/multiplication_lab/multiplication/task.yml @@ -1,21 +1,21 @@ type: Multiplication -description: This task takes a number and a factor and multiplies them together. +desc: This task takes a number and a factor and multiplies them together. device_types: - multiplier input_parameters: number: - type: integer + type: int unit: none - description: The number to multiply. + desc: The number to multiply. factor: - type: integer + type: int unit: none - description: The factor to multiply the number by. + desc: The factor to multiply the number by. output_parameters: product: - type: integer + type: int unit: none - description: The product of the number and the factor. + desc: The product of the number and the factor. diff --git a/tests/user/testing/tasks/noop/task.py b/tests/user/testing/tasks/noop/task.py index 013ea1c..f52ec57 100644 --- a/tests/user/testing/tasks/noop/task.py +++ b/tests/user/testing/tasks/noop/task.py @@ -1,7 +1,7 @@ from eos.tasks.base_task import BaseTask -class NoopTask(BaseTask): +class Noop(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/tests/user/testing/tasks/noop/task.yml b/tests/user/testing/tasks/noop/task.yml index 5c5bcc7..fc51325 100644 --- a/tests/user/testing/tasks/noop/task.yml +++ b/tests/user/testing/tasks/noop/task.yml @@ -1,2 +1,2 @@ type: Noop -description: This task does nothing. +desc: This task does nothing. diff --git a/tests/user/testing/tasks/purification/task.py b/tests/user/testing/tasks/purification/task.py index 5d6fe80..5c30296 100644 --- a/tests/user/testing/tasks/purification/task.py +++ b/tests/user/testing/tasks/purification/task.py @@ -1,7 +1,7 @@ from eos.tasks.base_task import BaseTask -class PurificationTask(BaseTask): +class Purification(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/tests/user/testing/tasks/purification/task.yml b/tests/user/testing/tasks/purification/task.yml index 516fe29..c6455f7 100644 --- a/tests/user/testing/tasks/purification/task.yml +++ b/tests/user/testing/tasks/purification/task.yml @@ -1,5 +1,5 @@ type: Purification -description: "This task aims to purify a single substance by separating it from its impurities. The device supports two methods: evaporation and simple mixing." +desc: This task aims to purify a single substance by separating it from its impurities. The device supports evaporation and simple mixing. device_types: - evaporator @@ -15,58 +15,58 @@ input_parameters: choices: - evaporation - simple_mixing - description: The purification method to be used. Choose between evaporation and simple mixing. + desc: The purification method to be used. Choose between evaporation and simple mixing. # Evaporation parameters evaporation_time: - type: integer + type: int unit: sec value: 120 min: 60 - description: Duration of evaporation in seconds. + desc: Duration of evaporation in seconds. evaporation_temperature: - type: integer + type: int unit: celsius value: 90 min: 30 max: 150 - description: Evaporation temperature in degrees Celsius. + desc: Evaporation temperature in degrees Celsius. evaporation_rotation_speed: - type: integer + type: int unit: rpm value: 120 min: 10 max: 300 - description: Speed of rotation in rpm. + desc: Speed of rotation in rpm. evaporation_sparging: - type: boolean + type: bool value: true - description: Whether to use sparging gas during evaporation. + desc: Whether to use sparging gas during evaporation. evaporation_sparging_flow: - type: integer + type: int unit: ml/min value: 5 min: 1 max: 10 - description: Flow rate of sparging gas in ml/min. + desc: Flow rate of sparging gas in ml/min. # Simple mixing parameters simple_mixing_time: - type: integer + type: int unit: sec value: 120 min: 60 - description: Duration of simple mixing in seconds. + desc: Duration of simple mixing in seconds. simple_mixing_rotation_speed: - type: integer + type: int unit: rpm value: 120 min: 10 max: 300 - description: Speed of rotation in rpm. + desc: Speed of rotation in rpm. output_parameters: water_salinity: - type: integer + type: int unit: ppm - description: The salinity of the purified water in parts per million. + desc: The salinity of the purified water in parts per million. diff --git a/tests/user/testing/tasks/robot_arm_container_transfer/task.py b/tests/user/testing/tasks/robot_arm_container_transfer/task.py index 7ca83ff..ee821b9 100644 --- a/tests/user/testing/tasks/robot_arm_container_transfer/task.py +++ b/tests/user/testing/tasks/robot_arm_container_transfer/task.py @@ -1,7 +1,7 @@ from eos.tasks.base_task import BaseTask -class RobotArmContainerTransferTask(BaseTask): +class RobotArmContainerTransfer(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/tests/user/testing/tasks/robot_arm_container_transfer/task.yml b/tests/user/testing/tasks/robot_arm_container_transfer/task.yml index b28a49b..2760fc0 100644 --- a/tests/user/testing/tasks/robot_arm_container_transfer/task.yml +++ b/tests/user/testing/tasks/robot_arm_container_transfer/task.yml @@ -1,22 +1,22 @@ type: Container Transfer -description: Transfer a container from one location area to another using a robot arm. +desc: Transfer a container from one location area to another using a robot arm. device_types: - fixed_arm_robot input_parameters: source_location: - type: string - description: The name of the source location area. + type: str + desc: The name of the source location area. source_location_area: - type: string - description: The name of the source location area. + type: str + desc: The name of the source location area. target_location: - type: string - description: The name of the target location area. + type: str + desc: The name of the target location area. target_location_area: - type: string - description: The name of the target location area. + type: str + desc: The name of the target location area. diff --git a/tests/user/testing/tasks/sleep/task.py b/tests/user/testing/tasks/sleep/task.py index a5256d4..ebc4cce 100644 --- a/tests/user/testing/tasks/sleep/task.py +++ b/tests/user/testing/tasks/sleep/task.py @@ -4,7 +4,7 @@ from eos.tasks.base_task import BaseTask -class SleepTask(BaseTask): +class Sleep(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/tests/user/testing/tasks/sleep/task.yml b/tests/user/testing/tasks/sleep/task.yml index 3a1d992..afdd81c 100644 --- a/tests/user/testing/tasks/sleep/task.yml +++ b/tests/user/testing/tasks/sleep/task.yml @@ -1,10 +1,10 @@ type: Sleep -description: This task sleeps for the specified amount of time. +desc: This task sleeps for the specified amount of time. input_parameters: time: - type: integer + type: int unit: sec value: 0 min: 0 - description: The total time duration for which to sleep for. + desc: The total time duration for which to sleep for. diff --git a/tests/user/testing/tasks/wafer_sampling/task.py b/tests/user/testing/tasks/wafer_sampling/task.py index 3fc993d..08bbdb4 100644 --- a/tests/user/testing/tasks/wafer_sampling/task.py +++ b/tests/user/testing/tasks/wafer_sampling/task.py @@ -1,7 +1,7 @@ from eos.tasks.base_task import BaseTask -class WaferSamplingTask(BaseTask): +class WaferSampling(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/tests/user/testing/tasks/wafer_sampling/task.yml b/tests/user/testing/tasks/wafer_sampling/task.yml index 119f365..ad608b4 100644 --- a/tests/user/testing/tasks/wafer_sampling/task.yml +++ b/tests/user/testing/tasks/wafer_sampling/task.yml @@ -1,5 +1,5 @@ type: Wafer Sampling -description: Perform wafer sampling with a cartesian robot and pump/valve system. +desc: Perform wafer sampling with a cartesian robot and pump/valve system. device_types: - cartesian_robot @@ -7,9 +7,9 @@ device_types: input_parameters: wafer_spot: type: list - element_type: integer + element_type: int length: 2 min: [ -10, -10 ] max: [ 10, 10 ] value: [ 0, 0 ] - description: The coordinates of the wafer spot in the wafer station. + desc: The coordinates of the wafer spot in the wafer station. diff --git a/tests/user/testing/tasks/weigh_container/task.py b/tests/user/testing/tasks/weigh_container/task.py index a8d1870..656a66a 100644 --- a/tests/user/testing/tasks/weigh_container/task.py +++ b/tests/user/testing/tasks/weigh_container/task.py @@ -1,7 +1,7 @@ from eos.tasks.base_task import BaseTask -class WeighContainerTask(BaseTask): +class WeighContainer(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/tests/user/testing/tasks/weigh_container/task.yml b/tests/user/testing/tasks/weigh_container/task.yml index a65ffec..ef4164e 100644 --- a/tests/user/testing/tasks/weigh_container/task.yml +++ b/tests/user/testing/tasks/weigh_container/task.yml @@ -1,19 +1,19 @@ type: Weigh Container -description: This task involves using an analytical balance to accurately measure the mass of a container. +desc: This task involves using an analytical balance to accurately measure the mass of a container. device_types: - balance input_parameters: minimum_weight: - type: decimal + type: float unit: g value: 0.1 min: 0.0001 - description: The minimum weight required for the measurement to be considered valid. + desc: The minimum weight required for the measurement to be considered valid. output_parameters: weight: - type: decimal + type: float unit: g - description: The measured weight of the container. \ No newline at end of file + desc: The measured weight of the container. \ No newline at end of file diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_ray_actor_wrapper.py b/tests/utils/test_ray_actor_wrapper.py similarity index 100% rename from tests/test_ray_actor_wrapper.py rename to tests/utils/test_ray_actor_wrapper.py diff --git a/user/example/devices/analyzer/device.py b/user/example/devices/analyzer/device.py index 9d43be7..d19cfd0 100644 --- a/user/example/devices/analyzer/device.py +++ b/user/example/devices/analyzer/device.py @@ -3,10 +3,10 @@ from eos.devices.base_device import BaseDevice -class AnalyzerDevice(BaseDevice): +class Analyzer(BaseDevice): """Analyzes the multiplication result to produce a loss.""" - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: + async def _initialize(self, init_parameters: dict[str, Any]) -> None: pass async def _cleanup(self) -> None: diff --git a/user/example/devices/analyzer/device.yml b/user/example/devices/analyzer/device.yml index a214e23..d10b80e 100644 --- a/user/example/devices/analyzer/device.yml +++ b/user/example/devices/analyzer/device.yml @@ -1,2 +1,2 @@ type: analyzer -description: A device for analyzing the result of the multiplication of some numbers and computing a loss. +desc: A device for analyzing the result of the multiplication of some numbers and computing a loss. diff --git a/user/example/devices/multiplier/device.py b/user/example/devices/multiplier/device.py index eed5905..b5d4508 100644 --- a/user/example/devices/multiplier/device.py +++ b/user/example/devices/multiplier/device.py @@ -3,10 +3,10 @@ from eos.devices.base_device import BaseDevice -class MultiplierDevice(BaseDevice): +class Multiplier(BaseDevice): """Multiplies two numbers.""" - async def _initialize(self, initialization_parameters: dict[str, Any]) -> None: + async def _initialize(self, init_parameters: dict[str, Any]) -> None: pass async def _cleanup(self) -> None: diff --git a/user/example/devices/multiplier/device.yml b/user/example/devices/multiplier/device.yml index efe0ee4..c8ff252 100644 --- a/user/example/devices/multiplier/device.yml +++ b/user/example/devices/multiplier/device.yml @@ -1,2 +1,2 @@ type: multiplier -description: A device for multiplying two numbers +desc: A device for multiplying two numbers diff --git a/user/example/experiments/optimize_multiplication/experiment.yml b/user/example/experiments/optimize_multiplication/experiment.yml index 9f91aa4..3f3c1a6 100644 --- a/user/example/experiments/optimize_multiplication/experiment.yml +++ b/user/example/experiments/optimize_multiplication/experiment.yml @@ -1,5 +1,5 @@ type: optimize_multiplication -description: An experiment for finding the smallest number that when multiplied by two factors yields 1024 +desc: An experiment for finding the smallest number that when multiplied by two factors yields 1024 labs: - multiplication_lab diff --git a/user/example/labs/multiplication_lab/lab.yml b/user/example/labs/multiplication_lab/lab.yml index 4c4ccf0..a651859 100644 --- a/user/example/labs/multiplication_lab/lab.yml +++ b/user/example/labs/multiplication_lab/lab.yml @@ -1,5 +1,5 @@ type: multiplication_lab -description: An example laboratory for testing multiplication +desc: An example laboratory for testing multiplication devices: multiplier: diff --git a/user/example/tasks/multiplication/task.py b/user/example/tasks/multiplication/task.py index 9e18aaf..d33d7dc 100644 --- a/user/example/tasks/multiplication/task.py +++ b/user/example/tasks/multiplication/task.py @@ -1,7 +1,7 @@ from eos.tasks.base_task import BaseTask -class MultiplicationTask(BaseTask): +class Multiplication(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/user/example/tasks/multiplication/task.yml b/user/example/tasks/multiplication/task.yml index 7f78d7f..81249e1 100644 --- a/user/example/tasks/multiplication/task.yml +++ b/user/example/tasks/multiplication/task.yml @@ -1,21 +1,21 @@ type: Multiplication -description: This task takes a number and a factor and multiplies them together using a "multiplier" device. +desc: This task takes a number and a factor and multiplies them together using a "multiplier" device. device_types: - multiplier input_parameters: number: - type: integer + type: int unit: none - description: The number to multiply. + desc: The number to multiply. factor: - type: integer + type: int unit: none - description: The factor to multiply the number by. + desc: The factor to multiply the number by. output_parameters: product: - type: integer + type: int unit: none - description: The product of the number and the factor. + desc: The product of the number and the factor. diff --git a/user/example/tasks/score_multiplication/task.py b/user/example/tasks/score_multiplication/task.py index a6f4046..514b7bd 100644 --- a/user/example/tasks/score_multiplication/task.py +++ b/user/example/tasks/score_multiplication/task.py @@ -1,7 +1,7 @@ from eos.tasks.base_task import BaseTask -class ScoreMultiplicationTask(BaseTask): +class ScoreMultiplication(BaseTask): async def _execute( self, devices: BaseTask.DevicesType, diff --git a/user/example/tasks/score_multiplication/task.yml b/user/example/tasks/score_multiplication/task.yml index f0080e6..19809ab 100644 --- a/user/example/tasks/score_multiplication/task.yml +++ b/user/example/tasks/score_multiplication/task.yml @@ -1,21 +1,21 @@ type: Score Multiplication -description: Scores multiplication based on how close the product is to 1024 and how small the initial number is using an "analyzer" device. +desc: Scores multiplication based on how close the product is to 1024 and how small the initial number is using an "analyzer" device. device_types: - analyzer input_parameters: number: - type: integer + type: int unit: none - description: The number that was multiplied with some factors. + desc: The number that was multiplied with some factors. product: - type: integer + type: int unit: none - description: The final product after multiplying with some factors. + desc: The final product after multiplying with some factors. output_parameters: loss: - type: integer + type: int unit: none - description: The multiplication loss. Captures how far the product is from 1024 and how large the initial number is. + desc: The multiplication loss. Captures how far the product is from 1024 and how large the initial number is.