diff --git a/.docker/tests/test_aiida.py b/.docker/tests/test_aiida.py
index 05fe007db2..7f952bd855 100644
--- a/.docker/tests/test_aiida.py
+++ b/.docker/tests/test_aiida.py
@@ -1,4 +1,5 @@
import json
+import re
import pytest
from packaging.version import parse
@@ -32,8 +33,10 @@ def test_verdi_status(aiida_exec, container_user):
assert '✔ broker:' in output
assert 'Daemon is running' in output
- # check that we have suppressed the warnings
- assert 'Warning' not in output
+ # Check that we have suppressed the warnings coming from using an install from repo and newer RabbitMQ version.
+ # Make sure to match only lines that start with ``Warning:`` because otherwise deprecation warnings from other
+ # packages that we cannot control may fail the test.
+ assert not re.match('^Warning:.*', output)
def test_computer_setup_success(aiida_exec, container_user):
diff --git a/docs/source/installation/guide_complete.rst b/docs/source/installation/guide_complete.rst
index 49136bbba7..f576a17465 100644
--- a/docs/source/installation/guide_complete.rst
+++ b/docs/source/installation/guide_complete.rst
@@ -130,7 +130,7 @@ Although it is possible to run AiiDA without a daemon it does provide significan
.. important::
The ``aiida-core.services`` package ensures that RabbitMQ is installed in the conda environment.
- However, it is not a _service_, in the sense that it is not automatically started, but has to be started manually.
+ However, it is not a *service*, in the sense that it is not automatically started, but has to be started manually.
.. code-block:: console
@@ -254,7 +254,7 @@ Common options
The exact options available for the ``verdi profile setup`` command depend on the selected storage plugin, but there are a number of common options and functionality:
-* ``--profile``: The name of the profile.
+* ``--profile-name``: The name of the profile.
* ``--set-as-default``: Whether the new profile should be defined as the new default.
* ``--email``: Email for the default user that is created.
* ``--first-name``: First name for the default user that is created.
@@ -276,6 +276,10 @@ The exact options available for the ``verdi profile setup`` command depend on th
``core.sqlite_dos``
-------------------
+.. tip::
+
+ The ``verdi presto`` command provides a fully automated way to set up a profile with the ``core.sqlite_dos`` storage plugin if no configuration is required.
+
This storage plugin uses `SQLite `_ and the `disk-objectstore `_ to store data.
The ``disk-objectstore`` is a Python package that is automatically installed as a dependency when installing ``aiida-core``, which was covered in the :ref:`Python package installation section `.
The installation instructions for SQLite depend on your system; please visit the `SQLite website `_ for details.
@@ -296,20 +300,23 @@ The options specific to the ``core.sqlite_dos`` storage plugin are:
``core.psql_dos``
-----------------
-This storage plugin uses `PostgreSQL `_ and the `disk-objectstore `_ to store data.
-The ``disk-objectstore`` is a Python package that is automatically installed as a dependency when installing ``aiida-core``, which was covered in the :ref:`Python package installation section `.
-The storage plugin can connect to a PostgreSQL instance running on the localhost or on a server that can be reached over the internet.
-Instructions for installing PostgreSQL is beyond the scope of this guide.
-
.. tip::
The creation of the PostgreSQL user and database as explained below is implemented in an automated way in the ``verdi presto`` command.
- Instead of performing the steps below manually and running ``verdi profile setup core.psql_dos`` manually, it is possible to run:
+ Instead of performing the steps below manually and running ``verdi profile setup core.psql_dos``, it is possible to run:
.. code-block::
verdi presto --use-postgres
+ The ``verdi presto`` command also automatically tries to configure RabbitMQ as the broker if it is running locally.
+ Therefore, if the command succeeds in connecting to both PostgreSQL and RabbitMQ, ``verdi presto --use-postgres`` provides a fully automated way to create a profile suitable for production workloads.
+
+This storage plugin uses `PostgreSQL `_ and the `disk-objectstore `_ to store data.
+The ``disk-objectstore`` is a Python package that is automatically installed as a dependency when installing ``aiida-core``, which was covered in the :ref:`Python package installation section `.
+The storage plugin can connect to a PostgreSQL instance running on the localhost or on a server that can be reached over the internet.
+Instructions for installing PostgreSQL is beyond the scope of this guide.
+
Before creating a profile, a database (and optionally a custom database user) has to be created.
First, connect to PostgreSQL using ``psql``, the `native command line client for PostgreSQL `_:
diff --git a/docs/source/installation/guide_quick.rst b/docs/source/installation/guide_quick.rst
index af9aaa8dc0..1805bde908 100644
--- a/docs/source/installation/guide_quick.rst
+++ b/docs/source/installation/guide_quick.rst
@@ -4,17 +4,18 @@
Quick installation guide
========================
+.. warning::
+
+ Not all AiiDA functionality is supported by the quick installation.
+ Please refer to the :ref:`section below ` for more information and see the :ref:`complete installation guide ` for instructions to set up a feature-complete and performant installation.
+
+
First, install the ``aiida-core`` Python package:
.. code-block:: console
pip install aiida-core
-.. attention::
-
- AiiDA requires a recent version of Python.
- Please refer to the `Python Package Index `_ for the minimum required version.
-
Next, set up a profile where all data is stored:
.. code-block:: console
@@ -40,46 +41,67 @@ If none of the lines show a red cross, indicating a problem, the installation wa
If you encountered any issues, please refer to the :ref:`troubleshooting section `.
-.. warning::
-
- Not all AiiDA functionality is supported by the quick installation.
- Please refer to the :ref:`section below ` for more information.
-
.. _installation:guide-quick:limitations:
Quick install limitations
=========================
-Functionality
--------------
+A setup that is ideal for production work requires the PostgreSQL and RabbitMQ services.
+By default, ``verdi presto`` creates a profile that allows running AiiDA without these:
+
+* **Database**: The PostgreSQL database that is used to store queryable data, is replaced by SQLite.
+* **Broker**: The RabbitMQ message broker that allows communication with and between processes is disabled.
+
+The following matrix shows the possible combinations of the database and broker options and their use cases:
+
++----------------------+----------------------------------------------------+-------------------------------------------------------------+
+| | **SQLite database** | **PostgreSQL database** |
++======================+====================================================+=============================================================+
+| **No broker** | Quick start with AiiDA | [*not really relevant for any usecase*] |
++----------------------+----------------------------------------------------+-------------------------------------------------------------+
+| **RabbitMQ** | Production (low-throughput; beta, has limitations) | Production (maximum performance, ideal for high-throughput) |
++----------------------+----------------------------------------------------+-------------------------------------------------------------+
+
+The sections below provide details on the use of the PostgreSQL and RabbitMQ services and the limitations when running AiiDA without them.
+
+.. _installation:guide-quick:limitations:rabbitmq:
+
+RabbitMQ
+--------
Part of AiiDA's functionality requires a `message broker `_, with the default implementation using `RabbitMQ `_.
-The message broker is used to allow communication with the :ref:`daemon `.
-Since RabbitMQ is a separate service and is not always trivial to install, the quick installation guide sets up a profile that does not require it.
-As a result, the daemon cannot be started and processes cannot be submitted to it but can only be run locally.
+The message broker is used to allow communication with processes and the :ref:`daemon ` as well as between themselves.
+Since RabbitMQ is a separate service and is not always trivial to install, the quick installation guide allows setting up a profile that does not require it.
+However, as a result the profile:
+
+* is not suitable for high-throughput workloads (a polling-based mechanism is used rather than an event-based one)
+* cannot run the daemon (no ``verdi daemon start/stop``) and therefore processes cannot be submitted to the daemon (i.e., one can only use ``run()`` instead of ``submit()`` to launch calculations and workflows)
+* cannot play, pause, kill processes
.. note::
The ``verdi presto`` command automatically checks if RabbitMQ is running on the localhost.
- If it can successfully connect, it configures the profile with the message broker and therefore the daemon functionality will be available.
+ If it can successfully connect, it configures the profile with the message broker and therefore the limitations listed above do not apply.
.. tip::
- The connection parameters of RabbitMQ can be (re)configured after the profile is set up with ``verdi profile configure-rabbitmq``.
- This can be useful when the RabbitMQ setup is different from the default that AiiDA checks for and the automatic configuration of ``verdi presto`` failed.
+ A profile created by ``verdi presto`` can easily start using RabbitMQ as the broker at a later stage.
+ Once a RabbitMQ service is available (see :ref:`install RabbitMQ ` for instruction to install it) and run ``verdi profile configure-rabbitmq`` to configure its use for the profile.
+.. _installation:guide-quick:limitations:postgresql:
-Performance
------------
+PostgreSQL
+----------
-The quick installation guide by default creates a profile that uses `SQLite `_ for the database.
-Since SQLite does not require running a service, it is easy to install and use on essentially any system.
-However, for certain use cases it is not going to be the most performant solution.
-AiiDA also supports `PostgreSQL `_ which is often going to be more performant compared to SQLite.
+AiiDA stores (part of) the data of the provenance graph in a database and the `PostgreSQL `_ service provides great performance for use-cases that require high-throughput.
+Since PostgreSQL is a separate service and is not always trivial to install, the quick installation guide allows setting up a profile that uses the serverless `SQLite `_ instead.
+However, as a result the profile:
+
+* is not suitable for high-throughput workloads (concurrent writes from multiple processes to the database are serialized)
+* does not support the ``has_key`` and ``contains`` operators in the ``QueryBuilder``
+* does not support the ``get_creation_statistics`` method of the ``QueryBuilder``
.. tip::
If a PostgreSQL service is available, run ``verdi presto --use-postgres`` to set up a profile that uses PostgreSQL instead of SQLite.
The command tries to connect to the service and automatically create a user account and database to use for the new profile.
AiiDA provides defaults that work for most setups where PostgreSQL is installed on the localhost.
Should this fail, the connection parameters can be customized using the ``--postgres-hostname``, ``--postgres-port``, ``--postgres-username``, ``--postgres-password`` options.
-
-Please refer to the :ref:`complete installation guide ` for instructions to set up a feature-complete and performant installation.
diff --git a/pyproject.toml b/pyproject.toml
index 90e70ffb23..cb7df62131 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -174,6 +174,7 @@ requires-python = '>=3.9'
[project.entry-points.'aiida.transports']
'core.local' = 'aiida.transports.plugins.local:LocalTransport'
'core.ssh' = 'aiida.transports.plugins.ssh:SshTransport'
+'core.ssh_auto' = 'aiida.transports.plugins.ssh_auto:SshAutoTransport'
[project.entry-points.'aiida.workflows']
'core.arithmetic.add_multiply' = 'aiida.workflows.arithmetic.add_multiply:add_multiply'
diff --git a/src/aiida/__init__.py b/src/aiida/__init__.py
index 5067f789e2..8e86350e40 100644
--- a/src/aiida/__init__.py
+++ b/src/aiida/__init__.py
@@ -27,7 +27,7 @@
'For further information please visit http://www.aiida.net/. All rights reserved.'
)
__license__ = 'MIT license, see LICENSE.txt file.'
-__version__ = '2.6.1'
+__version__ = '2.6.1.post0'
__authors__ = 'The AiiDA team.'
__paper__ = (
'S. P. Huber et al., "AiiDA 1.0, a scalable computational infrastructure for automated reproducible workflows and '
diff --git a/src/aiida/cmdline/commands/cmd_computer.py b/src/aiida/cmdline/commands/cmd_computer.py
index acb9c2da81..4ac8480258 100644
--- a/src/aiida/cmdline/commands/cmd_computer.py
+++ b/src/aiida/cmdline/commands/cmd_computer.py
@@ -605,7 +605,7 @@ def computer_test(user, print_traceback, computer):
message += '\n Use the `--print-traceback` option to see the full traceback.'
echo.echo(message)
- echo.echo_warning(f'{1} out of {num_tests} tests failed')
+ echo.echo_warning('1 out of 1 tests failed')
@verdi_computer.command('delete')
diff --git a/src/aiida/cmdline/commands/cmd_presto.py b/src/aiida/cmdline/commands/cmd_presto.py
index 6fa9518443..09d7070e7c 100644
--- a/src/aiida/cmdline/commands/cmd_presto.py
+++ b/src/aiida/cmdline/commands/cmd_presto.py
@@ -25,9 +25,17 @@
def get_default_presto_profile_name():
+ from aiida.common.exceptions import ConfigurationError
from aiida.manage import get_config
- profile_names = get_config().profile_names
+ try:
+ profile_names = get_config().profile_names
+ except ConfigurationError:
+ # This can happen when tab-completing in an environment that did not create the configuration folder yet.
+ # It would have been possible to just call ``get_config(create=True)`` to create the config directory, but this
+ # should not be done during tab-completion just to generate a default value.
+ return DEFAULT_PROFILE_NAME_PREFIX
+
indices = []
for profile_name in profile_names:
@@ -170,7 +178,7 @@ def verdi_presto(
created profile uses the new PostgreSQL database instead of SQLite.
"""
from aiida.brokers.rabbitmq.defaults import detect_rabbitmq_config
- from aiida.common import exceptions
+ from aiida.common import docs, exceptions
from aiida.manage.configuration import create_profile, load_profile
from aiida.orm import Computer
@@ -209,6 +217,7 @@ def verdi_presto(
broker_config = detect_rabbitmq_config()
except ConnectionError as exception:
echo.echo_report(f'RabbitMQ server not found ({exception}): configuring the profile without a broker.')
+ echo.echo_report(f'See {docs.URL_NO_BROKER} for details on the limitations of running without a broker.')
else:
echo.echo_report('RabbitMQ server detected: configuring the profile with a broker.')
broker_backend = 'core.rabbitmq'
diff --git a/src/aiida/cmdline/commands/cmd_process.py b/src/aiida/cmdline/commands/cmd_process.py
index 77e14a3300..c9c492ae14 100644
--- a/src/aiida/cmdline/commands/cmd_process.py
+++ b/src/aiida/cmdline/commands/cmd_process.py
@@ -101,6 +101,7 @@ def process_list(
from aiida.cmdline.commands.cmd_daemon import execute_client_command
from aiida.cmdline.utils.common import print_last_process_state_change
+ from aiida.common.docs import URL_NO_BROKER
from aiida.common.exceptions import ConfigurationError
from aiida.engine.daemon.client import get_daemon_client
from aiida.orm import ProcessNode, QueryBuilder
@@ -137,7 +138,7 @@ def process_list(
try:
client = get_daemon_client()
except ConfigurationError:
- echo.echo_warning('This profile does not have a broker and so it has no daemon.')
+ echo.echo_warning(f'This profile does not have a broker and so it has no daemon. See {URL_NO_BROKER}')
return
if not client.is_daemon_running:
diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py
index 057f2de5a9..3dd21b56bf 100644
--- a/src/aiida/cmdline/commands/cmd_profile.py
+++ b/src/aiida/cmdline/commands/cmd_profile.py
@@ -55,6 +55,7 @@ def command_create_profile(
:param kwargs: Arguments to initialise instance of the selected storage implementation.
"""
from aiida.brokers.rabbitmq.defaults import detect_rabbitmq_config
+ from aiida.common import docs
from aiida.plugins.entry_point import get_entry_point_from_class
if not storage_cls.read_only and email is None:
@@ -79,6 +80,7 @@ def command_create_profile(
else:
echo.echo_report('Creating profile without RabbitMQ.')
echo.echo_report('It can be configured at a later point in time with `verdi profile configure-rabbitmq`.')
+ echo.echo_report(f'See {docs.URL_NO_BROKER} for details on the limitations of running without a broker.')
try:
profile = create_profile(
diff --git a/src/aiida/cmdline/commands/cmd_status.py b/src/aiida/cmdline/commands/cmd_status.py
index dc4521af02..f3c32327dc 100644
--- a/src/aiida/cmdline/commands/cmd_status.py
+++ b/src/aiida/cmdline/commands/cmd_status.py
@@ -58,6 +58,7 @@ class ServiceStatus(enum.IntEnum):
def verdi_status(print_traceback, no_rmq):
"""Print status of AiiDA services."""
from aiida import __version__
+ from aiida.common.docs import URL_NO_BROKER
from aiida.common.exceptions import ConfigurationError
from aiida.engine.daemon.client import DaemonException, DaemonNotRunningException
from aiida.manage.configuration.settings import AIIDA_CONFIG_FOLDER
@@ -141,7 +142,7 @@ def verdi_status(print_traceback, no_rmq):
print_status(
ServiceStatus.WARNING,
'broker',
- 'No broker defined for this profile: certain functionality not available.',
+ f'No broker defined for this profile: certain functionality not available. See {URL_NO_BROKER}',
)
# Getting the daemon status
@@ -151,7 +152,7 @@ def verdi_status(print_traceback, no_rmq):
print_status(
ServiceStatus.WARNING,
'daemon',
- 'No broker defined for this profile: daemon is not available.',
+ 'No broker defined for this profile: daemon is not available. See {URL_NO_BROKER}',
)
except DaemonNotRunningException as exception:
print_status(ServiceStatus.WARNING, 'daemon', str(exception))
diff --git a/src/aiida/cmdline/commands/cmd_storage.py b/src/aiida/cmdline/commands/cmd_storage.py
index 5382ff455f..f6f64b755e 100644
--- a/src/aiida/cmdline/commands/cmd_storage.py
+++ b/src/aiida/cmdline/commands/cmd_storage.py
@@ -8,6 +8,8 @@
###########################################################################
"""`verdi storage` commands."""
+import sys
+
import click
from click_spinner import spinner
@@ -24,14 +26,37 @@ def verdi_storage():
@verdi_storage.command('version')
def storage_version():
- """Print the current version of the storage schema."""
+ """Print the current version of the storage schema.
+
+ The command returns the following exit codes:
+
+ * 0: If the storage schema is equal and compatible to the schema version of the code
+ * 3: If the storage cannot be reached or is corrupt
+ * 4: If the storage schema is compatible with the code schema version and probably needs to be migrated.
+ """
from aiida import get_profile
+ from aiida.common.exceptions import CorruptStorage, IncompatibleStorageSchema, UnreachableStorage
- profile = get_profile()
- head_version = profile.storage_cls.version_head()
- profile_version = profile.storage_cls.version_profile(profile)
- echo.echo(f'Latest storage schema version: {head_version!r}')
- echo.echo(f'Storage schema version of {profile.name!r}: {profile_version!r}')
+ try:
+ profile = get_profile()
+ head_version = profile.storage_cls.version_head()
+ profile_version = profile.storage_cls.version_profile(profile)
+ echo.echo(f'Latest storage schema version: {head_version!r}')
+ echo.echo(f'Storage schema version of {profile.name!r}: {profile_version!r}')
+ except Exception as exception:
+ echo.echo_critical(f'Failed to determine the storage version: {exception}')
+
+ try:
+ profile.storage_cls(profile)
+ except (CorruptStorage, UnreachableStorage) as exception:
+ echo.echo_error(f'The storage cannot be reached or is corrupt: {exception}')
+ sys.exit(3)
+ except IncompatibleStorageSchema:
+ echo.echo_error(
+ f'The storage schema version {profile_version} is incompatible with the code version {head_version}.'
+ 'Run `verdi storage migrate` to migrate the storage.'
+ )
+ sys.exit(4)
@verdi_storage.command('migrate')
@@ -41,12 +66,14 @@ def storage_migrate(force):
from aiida.engine.daemon.client import get_daemon_client
from aiida.manage import get_manager
- client = get_daemon_client()
- if client.is_daemon_running:
- echo.echo_critical('Migration aborted, the daemon for the profile is still running.')
-
manager = get_manager()
profile = manager.get_profile()
+
+ if profile.process_control_backend:
+ client = get_daemon_client()
+ if client.is_daemon_running:
+ echo.echo_critical('Migration aborted, the daemon for the profile is still running.')
+
storage_cls = profile.storage_cls
if not force:
diff --git a/src/aiida/cmdline/utils/decorators.py b/src/aiida/cmdline/utils/decorators.py
index 84386710f9..5363926978 100644
--- a/src/aiida/cmdline/utils/decorators.py
+++ b/src/aiida/cmdline/utils/decorators.py
@@ -45,6 +45,7 @@ def with_broker(wrapped, _, args, kwargs):
If the currently loaded profile does not define a broker, the command is aborted.
"""
+ from aiida.common.docs import URL_NO_BROKER
from aiida.manage import get_manager
broker = get_manager().get_broker()
@@ -54,6 +55,7 @@ def with_broker(wrapped, _, args, kwargs):
if broker is None:
echo.echo_critical(
f'Profile `{profile.name}` does not support this functionality as it does not provide a broker.'
+ f'See {URL_NO_BROKER} for more details.'
)
kwargs['broker'] = broker
@@ -313,6 +315,7 @@ def start_daemon():
If the loaded profile does not define a broker, the command will exit with a critical error.
"""
+ from aiida.common.docs import URL_NO_BROKER
from aiida.manage import get_manager
manager = get_manager()
@@ -323,6 +326,9 @@ def start_daemon():
assert profile is not None
if manager.get_broker() is None:
- echo.echo_critical(f'profile `{profile.name}` does not define a broker and so cannot use this functionality.')
+ echo.echo_critical(
+ f'profile `{profile.name}` does not define a broker and so cannot use this functionality.'
+ f'See {URL_NO_BROKER} for more details.'
+ )
return wrapped(*args, **kwargs)
diff --git a/src/aiida/common/docs.py b/src/aiida/common/docs.py
new file mode 100644
index 0000000000..797c944387
--- /dev/null
+++ b/src/aiida/common/docs.py
@@ -0,0 +1,4 @@
+"""Collection of links to the documentation that can be used in log messages for reference."""
+
+URL_BASE = 'https://aiida-core.readthedocs.io/en/stable'
+URL_NO_BROKER = f'{URL_BASE}/installation/guide_quick.html#quick-install-limitations'
diff --git a/src/aiida/engine/daemon/client.py b/src/aiida/engine/daemon/client.py
index 67ca7b99b5..ef250802f7 100644
--- a/src/aiida/engine/daemon/client.py
+++ b/src/aiida/engine/daemon/client.py
@@ -91,6 +91,8 @@ def __init__(self, profile: Profile):
:param profile: The profile instance.
"""
+ from aiida.common.docs import URL_NO_BROKER
+
type_check(profile, Profile)
config = get_config()
self._profile = profile
@@ -99,7 +101,8 @@ def __init__(self, profile: Profile):
if self._profile.process_control_backend is None:
raise ConfigurationError(
- f'profile `{self._profile.name}` does not define a broker so the daemon cannot be used.'
+ f'profile `{self._profile.name}` does not define a broker so the daemon cannot be used. '
+ f'See {URL_NO_BROKER} for more details.'
)
@property
diff --git a/src/aiida/engine/launch.py b/src/aiida/engine/launch.py
index d37cf46905..34fd1d7c0d 100644
--- a/src/aiida/engine/launch.py
+++ b/src/aiida/engine/launch.py
@@ -103,6 +103,8 @@ def submit(
:param kwargs: inputs to be passed to the process. This is an alternative to the positional ``inputs`` argument.
:return: the calculation node of the process
"""
+ from aiida.common.docs import URL_NO_BROKER
+
inputs = prepare_inputs(inputs, **kwargs)
# Submitting from within another process requires ``self.submit``` unless it is a work function, in which case the
@@ -117,7 +119,8 @@ def submit(
'Cannot submit because the runner does not have a process controller, probably because the profile does '
'not define a broker like RabbitMQ. If a RabbitMQ server is available, the profile can be configured to '
'use it with `verdi profile configure-rabbitmq`. Otherwise, use :meth:`aiida.engine.launch.run` instead to '
- 'run the process in the local Python interpreter instead of submitting it to the daemon.'
+ 'run the process in the local Python interpreter instead of submitting it to the daemon. '
+ f'See {URL_NO_BROKER} for more details.'
)
assert runner.persister is not None, 'runner does not have a persister'
diff --git a/src/aiida/engine/processes/process.py b/src/aiida/engine/processes/process.py
index 5eabfd56f7..c5fc50bcc3 100644
--- a/src/aiida/engine/processes/process.py
+++ b/src/aiida/engine/processes/process.py
@@ -415,8 +415,19 @@ def on_create(self) -> None:
@override
def on_entered(self, from_state: Optional[plumpy.process_states.State]) -> None:
"""After entering a new state, save a checkpoint and update the latest process state change timestamp."""
+ from plumpy import ProcessState
+
from aiida.engine.utils import set_process_state_change_timestamp
+ super().on_entered(from_state)
+
+ if self._state.LABEL is ProcessState.EXCEPTED:
+ # The process is already excepted so simply update the process state on the node and let the process
+ # complete the state transition to the terminal state. If another exception is raised during this exception
+ # handling, the process transitioning is cut short and never makes it to the terminal state.
+ self.node.set_process_state(self._state.LABEL)
+ return
+
# For reasons unknown, it is important to update the outputs first, before doing anything else, otherwise there
# is the risk that certain outputs do not get attached before the process reaches a terminal state. Nevertheless
# we need to guarantee that the process state gets updated even if the ``update_outputs`` call excepts, for
@@ -431,8 +442,7 @@ def on_entered(self, from_state: Optional[plumpy.process_states.State]) -> None:
self.node.set_process_state(self._state.LABEL) # type: ignore[arg-type]
self._save_checkpoint()
- set_process_state_change_timestamp(self)
- super().on_entered(from_state)
+ set_process_state_change_timestamp(self.node)
@override
def on_terminated(self) -> None:
diff --git a/src/aiida/engine/utils.py b/src/aiida/engine/utils.py
index 888089dc64..4053156a97 100644
--- a/src/aiida/engine/utils.py
+++ b/src/aiida/engine/utils.py
@@ -18,6 +18,8 @@
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Iterator, List, Optional, Tuple, Type, Union
if TYPE_CHECKING:
+ from aiida.orm import ProcessNode
+
from .processes import Process, ProcessBuilder
from .runners import Runner
@@ -259,33 +261,45 @@ def loop_scope(loop) -> Iterator[None]:
asyncio.set_event_loop(current)
-def set_process_state_change_timestamp(process: 'Process') -> None:
+def set_process_state_change_timestamp(node: 'ProcessNode') -> None:
"""Set the global setting that reflects the last time a process changed state, for the process type
of the given process, to the current timestamp. The process type will be determined based on
the class of the calculation node it has as its database container.
:param process: the Process instance that changed its state
"""
+ from sqlalchemy.exc import OperationalError
+
from aiida.common import timezone
from aiida.manage import get_manager
from aiida.orm import CalculationNode, ProcessNode, WorkflowNode
- if isinstance(process.node, CalculationNode):
+ if isinstance(node, CalculationNode):
process_type = 'calculation'
- elif isinstance(process.node, WorkflowNode):
+ elif isinstance(node, WorkflowNode):
process_type = 'work'
- elif isinstance(process.node, ProcessNode):
+ elif isinstance(node, ProcessNode):
# This will only occur for testing, as in general users cannot launch plain Process classes
return
else:
- raise ValueError(f'unsupported calculation node type {type(process.node)}')
+ raise ValueError(f'unsupported calculation node type {type(node)}')
key = PROCESS_STATE_CHANGE_KEY.format(process_type)
description = PROCESS_STATE_CHANGE_DESCRIPTION.format(process_type)
value = timezone.now().isoformat()
backend = get_manager().get_profile_storage()
- backend.set_global_variable(key, value, description)
+
+ try:
+ backend.set_global_variable(key, value, description)
+ except OperationalError:
+ # This typically happens for SQLite-based storage plugins like ``core.sqlite_dos``. Since this is merely an
+ # update of a timestamp that is likely to be updated soon again, just ignoring the failed update is not a
+ # problem.
+ LOGGER.warning(
+ f'Failed to write global variable `{key}` to `{value}` because the database was locked. If the storage '
+ 'plugin being used is `core.sqlite_dos` this is to be expected and can be safely ignored.'
+ )
def get_process_state_change_timestamp(process_type: Optional[str] = None) -> Optional[datetime]:
diff --git a/src/aiida/transports/plugins/local.py b/src/aiida/transports/plugins/local.py
index 0740837fc4..b8263620d3 100644
--- a/src/aiida/transports/plugins/local.py
+++ b/src/aiida/transports/plugins/local.py
@@ -228,9 +228,18 @@ def put(self, localpath, remotepath, *args, **kwargs):
:raise OSError: if remotepath is not valid
:raise ValueError: if localpath is not valid
"""
+ from aiida.common.warnings import warn_deprecation
+
+ if 'ignore_noexisting' in kwargs:
+ # Backwards compatibility check for old keyword that was misspelled
+ warn_deprecation(
+ 'Detected `ignore_noexisting` which is now deprecated. Use `ignore_nonexisting` instead.', version=3
+ )
+ ignore_nonexisting = kwargs.get('ignore_noexisting', args[2] if len(args) > 2 else False)
+
dereference = kwargs.get('dereference', args[0] if args else True)
overwrite = kwargs.get('overwrite', args[1] if len(args) > 1 else True)
- ignore_nonexisting = kwargs.get('ignore_noexisting', args[2] if len(args) > 2 else False)
+ ignore_nonexisting = kwargs.get('ignore_nonexisting', args[2] if len(args) > 2 else False)
if not remotepath:
raise OSError('Input remotepath to put function must be a non empty string')
if not localpath:
diff --git a/src/aiida/transports/plugins/ssh_auto.py b/src/aiida/transports/plugins/ssh_auto.py
new file mode 100644
index 0000000000..5d193cac87
--- /dev/null
+++ b/src/aiida/transports/plugins/ssh_auto.py
@@ -0,0 +1,61 @@
+###########################################################################
+# Copyright (c), The AiiDA team. All rights reserved. #
+# This file is part of the AiiDA code. #
+# #
+# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
+# For further information on the license, see the LICENSE.txt file #
+# For further information please visit http://www.aiida.net #
+###########################################################################
+"""Plugin for transport over SSH (and SFTP for file transfer)."""
+
+import pathlib
+
+import paramiko
+
+from .ssh import SshTransport
+
+__all__ = ('SshAutoTransport',)
+
+
+class SshAutoTransport(SshTransport):
+ """Support connection, command execution and data transfer to remote computers via SSH+SFTP."""
+
+ _valid_connect_params = []
+ _valid_auth_options = []
+
+ FILEPATH_CONFIG: pathlib.Path = pathlib.Path('~').expanduser() / '.ssh' / 'config'
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs, key_policy='AutoAddPolicy')
+
+ config_ssh = paramiko.SSHConfig()
+
+ try:
+ with self.FILEPATH_CONFIG.open() as handle:
+ config_ssh.parse(handle)
+ except FileNotFoundError as exception:
+ raise RuntimeError(
+ f'Could not determine connection configuration as the `{self.FILEPATH_CONFIG}` does not exist.'
+ ) from exception
+ except PermissionError as exception:
+ raise RuntimeError(
+ f'Could not determine connection configuration as the `{self.FILEPATH_CONFIG}` is not readable.'
+ ) from exception
+
+ if self._machine not in config_ssh.get_hostnames():
+ self.logger.warning(
+ f'The host `{self._machine}` is not defined in SSH config, connection will most likely fail to open.'
+ )
+
+ config_host = config_ssh.lookup(self._machine)
+
+ self._connect_args = {
+ 'port': config_host.get('port', 22),
+ 'username': config_host.get('user'),
+ 'key_filename': config_host.get('identityfile', []),
+ 'timeout': config_host.get('connecttimeout', 60),
+ 'proxy_command': config_host.get('proxycommand', None),
+ 'proxy_jump': config_host.get('proxyjump', None),
+ }
+
+ self._machine = config_host['hostname']
diff --git a/tests/cmdline/commands/test_computer.py b/tests/cmdline/commands/test_computer.py
index dac1170770..6ae94c1634 100644
--- a/tests/cmdline/commands/test_computer.py
+++ b/tests/cmdline/commands/test_computer.py
@@ -971,3 +971,17 @@ def time_use_login_shell(authinfo, auth_params, use_login_shell, iterations) ->
result = run_cli_command(computer_test, [aiida_localhost.label], use_subprocess=False)
assert 'Success: all 6 tests succeeded' in result.output
assert 'computer is configured to use a login shell, which is slower compared to a normal shell' in result.output
+
+
+def test_computer_ssh_auto(run_cli_command, aiida_computer):
+ """Test setup of computer with ``core.ssh_auto`` entry point.
+
+ The configure step should only require the common shared options ``safe_interval`` and ``use_login_shell``.
+ """
+ computer = aiida_computer(transport_type='core.ssh_auto').store()
+ assert not computer.is_configured
+
+ # It is important that no other options (except for `--safe-interval`) have to be specified for this transport type.
+ options = ['core.ssh_auto', computer.uuid, '--non-interactive', '--safe-interval', '0']
+ run_cli_command(computer_configure, options, use_subprocess=False)
+ assert computer.is_configured
diff --git a/tests/cmdline/commands/test_storage.py b/tests/cmdline/commands/test_storage.py
index 8c374885a2..59698343e6 100644
--- a/tests/cmdline/commands/test_storage.py
+++ b/tests/cmdline/commands/test_storage.py
@@ -23,6 +23,26 @@ def tests_storage_version(run_cli_command):
assert version in result.output
+@pytest.mark.parametrize(
+ 'exception_cls, exit_code',
+ (
+ (exceptions.CorruptStorage, 3),
+ (exceptions.UnreachableStorage, 3),
+ (exceptions.IncompatibleStorageSchema, 4),
+ ),
+)
+def tests_storage_version_non_zero_exit_code(aiida_profile, run_cli_command, monkeypatch, exception_cls, exit_code):
+ """Test the ``verdi storage version`` command when it returns a non-zero exit code."""
+
+ def validate_storage(self):
+ raise exception_cls()
+
+ with monkeypatch.context() as context:
+ context.setattr(aiida_profile.storage_cls.migrator, 'validate_storage', validate_storage)
+ result = run_cli_command(cmd_storage.storage_version, raises=True)
+ assert result.exit_code == exit_code
+
+
def tests_storage_info(aiida_localhost, run_cli_command):
"""Test the ``verdi storage info`` command with the ``--detailed`` option."""
from aiida import orm
@@ -35,6 +55,15 @@ def tests_storage_info(aiida_localhost, run_cli_command):
assert node.node_type in result.output
+@pytest.mark.usefixtures('stopped_daemon_client')
+def tests_storage_migrate_no_broker(aiida_config_tmp, aiida_profile_factory, run_cli_command):
+ """Test the ``verdi storage migrate`` command for a profile without a broker."""
+ with aiida_profile_factory(aiida_config_tmp) as profile:
+ assert profile.process_control_backend is None
+ result = run_cli_command(cmd_storage.storage_migrate, parameters=['--force'], use_subprocess=False)
+ assert 'Migrating to the head of the main branch' in result.output
+
+
@pytest.mark.usefixtures('stopped_daemon_client')
def tests_storage_migrate_force(run_cli_command):
"""Test the ``verdi storage migrate`` command (with force option)."""
diff --git a/tests/engine/daemon/test_execmanager.py b/tests/engine/daemon/test_execmanager.py
index bb4209659d..5b34f099a7 100644
--- a/tests/engine/daemon/test_execmanager.py
+++ b/tests/engine/daemon/test_execmanager.py
@@ -16,7 +16,6 @@
from aiida.common.folders import SandboxFolder
from aiida.engine.daemon import execmanager
from aiida.orm import CalcJobNode, FolderData, PortableCode, RemoteData, SinglefileData
-from aiida.plugins import entry_point
from aiida.transports.plugins.local import LocalTransport
@@ -40,7 +39,7 @@ def file_hierarchy_simple():
}
-@pytest.fixture(params=entry_point.get_entry_point_names('aiida.transports'))
+@pytest.fixture(params=('core.local', 'core.ssh'))
def node_and_calc_info(aiida_localhost, aiida_computer_ssh, aiida_code_installed, request):
"""Return a ``CalcJobNode`` and associated ``CalcInfo`` instance."""
diff --git a/tests/engine/test_utils.py b/tests/engine/test_utils.py
index 00bf79b7bc..cca5866343 100644
--- a/tests/engine/test_utils.py
+++ b/tests/engine/test_utils.py
@@ -9,6 +9,7 @@
"""Test engine utilities such as the exponential backoff mechanism."""
import asyncio
+import contextlib
import pytest
from aiida import orm
@@ -16,9 +17,11 @@
from aiida.engine.utils import (
InterruptableFuture,
exponential_backoff_retry,
+ get_process_state_change_timestamp,
instantiate_process,
interruptable_task,
is_process_function,
+ set_process_state_change_timestamp,
)
ITERATION = 0
@@ -225,3 +228,52 @@ async def coro():
result = await task_fut
assert result == 'NOT ME!!!'
+
+
+@pytest.mark.parametrize('with_transaction', (True, False))
+@pytest.mark.parametrize('monkeypatch_process_state_change', (True, False))
+def test_set_process_state_change_timestamp(manager, with_transaction, monkeypatch_process_state_change, monkeypatch):
+ """Test :func:`aiida.engine.utils.set_process_state_change_timestamp`.
+
+ This function is known to except when the ``core.sqlite_dos`` storage plugin is used and multiple processes are run.
+ The function is called each time a process changes state and since it is updating the same row in the settings table
+ the limitation of SQLite to not allow concurrent writes to the same page causes an exception to be thrown because
+ the database is locked. This exception is caught in ``set_process_state_change_timestamp`` and simply is ignored.
+ This test makes sure that if this happens, any other state changes, e.g. an extra being set on a node, are not
+ accidentally reverted, when the changes are performed in an explicit transaction or not.
+ """
+ storage = manager.get_profile_storage()
+
+ node = orm.CalculationNode().store()
+ extra_key = 'some_key'
+ extra_value = 'some value'
+
+ # Initialize the process state change timestamp so it is possible to check whether it was changed or not at the
+ # end of the test.
+ set_process_state_change_timestamp(node)
+ current_timestamp = get_process_state_change_timestamp()
+ assert current_timestamp is not None
+
+ if monkeypatch_process_state_change:
+
+ def set_global_variable(*_, **__):
+ from sqlalchemy.exc import OperationalError
+
+ raise OperationalError('monkey failure', None, '', '')
+
+ monkeypatch.setattr(storage, 'set_global_variable', set_global_variable)
+
+ transaction_context = storage.transaction if with_transaction else contextlib.nullcontext
+
+ with transaction_context():
+ node.base.extras.set(extra_key, extra_value)
+ set_process_state_change_timestamp(node)
+
+ # The node extra should always have been set, regardless if the process state change excepted
+ assert node.base.extras.get(extra_key) == extra_value
+
+ # The process state change should have changed if the storage plugin was not monkeypatched to fail
+ if monkeypatch_process_state_change:
+ assert get_process_state_change_timestamp() == current_timestamp
+ else:
+ assert get_process_state_change_timestamp() != current_timestamp
diff --git a/tests/engine/test_work_chain.py b/tests/engine/test_work_chain.py
index e7288a3806..1fbb578b2a 100644
--- a/tests/engine/test_work_chain.py
+++ b/tests/engine/test_work_chain.py
@@ -422,6 +422,7 @@ def illegal(self):
orm.QueryBuilder().append(orm.ProcessNode, tag='node').order_by({'node': {'id': 'desc'}}).first(flat=True)
)
assert node.is_excepted
+ assert node.is_sealed
assert 'ValueError: Workflow tried returning an unstored `Data` node.' in node.exception
def test_same_input_node(self):
diff --git a/tests/orm/nodes/data/test_jsonable.py b/tests/orm/nodes/data/test_jsonable.py
index 59fbe9b6a8..bacedac73a 100644
--- a/tests/orm/nodes/data/test_jsonable.py
+++ b/tests/orm/nodes/data/test_jsonable.py
@@ -6,7 +6,7 @@
import pytest
from aiida.orm import load_node
from aiida.orm.nodes.data.jsonable import JsonableData
-from pymatgen.core import Molecule
+from pymatgen.core.structure import Molecule
class JsonableClass:
diff --git a/tests/transports/test_all_plugins.py b/tests/transports/test_all_plugins.py
index 986dd465a9..c536b196a2 100644
--- a/tests/transports/test_all_plugins.py
+++ b/tests/transports/test_all_plugins.py
@@ -34,14 +34,22 @@
@pytest.fixture(scope='function', params=entry_point.get_entry_point_names('aiida.transports'))
-def custom_transport(request) -> Transport:
+def custom_transport(request, tmp_path, monkeypatch) -> Transport:
"""Fixture that parametrizes over all the registered implementations of the ``CommonRelaxWorkChain``."""
+ plugin = TransportFactory(request.param)
+
if request.param == 'core.ssh':
kwargs = {'machine': 'localhost', 'timeout': 30, 'load_system_host_keys': True, 'key_policy': 'AutoAddPolicy'}
+ elif request.param == 'core.ssh_auto':
+ kwargs = {'machine': 'localhost'}
+ filepath_config = tmp_path / 'config'
+ monkeypatch.setattr(plugin, 'FILEPATH_CONFIG', filepath_config)
+ if not filepath_config.exists():
+ filepath_config.write_text('Host localhost')
else:
kwargs = {}
- return TransportFactory(request.param)(**kwargs)
+ return plugin(**kwargs)
def test_is_open(custom_transport):