From ab48a4f627b4c9eec9133b5efa9fb888ce2c4914 Mon Sep 17 00:00:00 2001 From: Alexander Goscinski Date: Thu, 6 Jun 2024 17:36:58 +0200 Subject: [PATCH 01/82] CLI: Rename `verdi profile setdefault` to `verdi profile set-default` (#6426) This is for consistency with `verdi user set-default`. --- docs/source/reference/command_line.rst | 12 +++++++----- src/aiida/cmdline/commands/cmd_profile.py | 17 ++++++++++++++--- src/aiida/cmdline/commands/cmd_user.py | 2 +- tests/cmdline/commands/test_profile.py | 18 ++++++++++++++++-- 4 files changed, 38 insertions(+), 11 deletions(-) diff --git a/docs/source/reference/command_line.rst b/docs/source/reference/command_line.rst index 1d7287e40c..2cb3f4495b 100644 --- a/docs/source/reference/command_line.rst +++ b/docs/source/reference/command_line.rst @@ -414,11 +414,13 @@ Below is a list with all available subcommands. --help Show this message and exit. Commands: - delete Delete one or more profiles. - list Display a list of all available profiles. - setdefault Set a profile as the default one. - setup Set up a new profile. - show Show details for a profile. + delete Delete one or more profiles. + list Display a list of all available profiles. + set-default Set a profile as the default profile. + setdefault (Deprecated) Set a profile as the default profile (use `verdi profile set- + default`). + setup Set up a new profile. + show Show details for a profile. .. _reference:command-line:verdi-quicksetup: diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py index 0d22b9025b..b1922058af 100644 --- a/src/aiida/cmdline/commands/cmd_profile.py +++ b/src/aiida/cmdline/commands/cmd_profile.py @@ -78,7 +78,7 @@ def command_create_profile( echo.echo_success(f'Created new profile `{profile.name}`.') if set_as_default: - ctx.invoke(profile_setdefault, profile=profile) + ctx.invoke(profile_set_default, profile=profile) @verdi_profile.group( @@ -147,10 +147,21 @@ def profile_show(profile): echo.echo_dictionary(config, fmt='yaml') -@verdi_profile.command('setdefault') +@verdi_profile.command('setdefault', deprecated='Please use `verdi profile set-default` instead.') @arguments.PROFILE(required=True, default=None) def profile_setdefault(profile): - """Set a profile as the default one.""" + """Set a profile as the default profile (use `verdi profile set-default`).""" + _profile_set_default(profile) + + +@verdi_profile.command('set-default') +@arguments.PROFILE(required=True, default=None) +def profile_set_default(profile): + """Set a profile as the default profile.""" + _profile_set_default(profile) + + +def _profile_set_default(profile): try: config = get_config() except (exceptions.MissingConfigurationError, exceptions.ConfigurationError) as exception: diff --git a/src/aiida/cmdline/commands/cmd_user.py b/src/aiida/cmdline/commands/cmd_user.py index b3505baa51..e01bdbe95b 100644 --- a/src/aiida/cmdline/commands/cmd_user.py +++ b/src/aiida/cmdline/commands/cmd_user.py @@ -60,7 +60,7 @@ def user_list(): @click.option( '--set-default', prompt='Set as default?', - help='Set the user as the default user for the current profile.', + help='Set the user as the default user.', is_flag=True, cls=options.interactive.InteractiveOption, contextual_default=lambda ctx: ctx.params['user'].is_default, diff --git a/tests/cmdline/commands/test_profile.py b/tests/cmdline/commands/test_profile.py index b0a83cdc00..a78876ad0c 100644 --- a/tests/cmdline/commands/test_profile.py +++ b/tests/cmdline/commands/test_profile.py @@ -53,7 +53,7 @@ def _factory(**kwargs): @pytest.mark.parametrize( 'command', - (cmd_profile.profile_list, cmd_profile.profile_setdefault, cmd_profile.profile_delete, cmd_profile.profile_show), + (cmd_profile.profile_list, cmd_profile.profile_set_default, cmd_profile.profile_delete, cmd_profile.profile_show), ) def test_help(run_cli_command, command): """Tests help text for all ``verdi profile`` commands.""" @@ -73,7 +73,21 @@ def test_list(run_cli_command, mock_profiles): def test_setdefault(run_cli_command, mock_profiles): """Test the ``verdi profile setdefault`` command.""" profile_list = mock_profiles() - run_cli_command(cmd_profile.profile_setdefault, [profile_list[1]], use_subprocess=False) + setdefault_result = run_cli_command(cmd_profile.profile_setdefault, [profile_list[1]], use_subprocess=False) + result = run_cli_command(cmd_profile.profile_list, use_subprocess=False) + + assert 'Report: configuration folder:' in result.output + assert f'* {profile_list[1]}' in result.output + + # test if deprecation warning is printed + assert 'Deprecated:' in setdefault_result.output + assert 'Deprecated:' in setdefault_result.stderr + + +def test_set_default(run_cli_command, mock_profiles): + """Test the ``verdi profile set-default`` command.""" + profile_list = mock_profiles() + run_cli_command(cmd_profile.profile_set_default, [profile_list[1]], use_subprocess=False) result = run_cli_command(cmd_profile.profile_list, use_subprocess=False) assert 'Report: configuration folder:' in result.output From 3dbde9e311781509b738202ad6f1de3bbd4b7a82 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 6 Jun 2024 17:38:24 +0200 Subject: [PATCH 02/82] CLI: Remove the deprecated `verdi database` command (#6460) This was deprecated in `v2.0` and had been slated to be removed ever since `v2.1`. --- docs/source/reference/command_line.rst | 23 ---- src/aiida/cmdline/commands/__init__.py | 1 - src/aiida/cmdline/commands/cmd_database.py | 130 --------------------- 3 files changed, 154 deletions(-) delete mode 100644 src/aiida/cmdline/commands/cmd_database.py diff --git a/docs/source/reference/command_line.rst b/docs/source/reference/command_line.rst index 2cb3f4495b..9f8a278760 100644 --- a/docs/source/reference/command_line.rst +++ b/docs/source/reference/command_line.rst @@ -179,29 +179,6 @@ Below is a list with all available subcommands. --help Show this message and exit. -.. _reference:command-line:verdi-database: - -``verdi database`` ------------------- - -.. code:: console - - Usage: [OPTIONS] COMMAND [ARGS]... - - Inspect and manage the database. - - .. deprecated:: v2.0.0 - - Options: - --help Show this message and exit. - - Commands: - integrity Check the integrity of the database and fix potential issues. - migrate Migrate the database to the latest schema version. - summary Summarise the entities in the database. - version Show the version of the database. - - .. _reference:command-line:verdi-devel: ``verdi devel`` diff --git a/src/aiida/cmdline/commands/__init__.py b/src/aiida/cmdline/commands/__init__.py index 79f9be05af..ddcefe9249 100644 --- a/src/aiida/cmdline/commands/__init__.py +++ b/src/aiida/cmdline/commands/__init__.py @@ -20,7 +20,6 @@ cmd_config, cmd_daemon, cmd_data, - cmd_database, cmd_devel, cmd_group, cmd_help, diff --git a/src/aiida/cmdline/commands/cmd_database.py b/src/aiida/cmdline/commands/cmd_database.py deleted file mode 100644 index dde0f58d87..0000000000 --- a/src/aiida/cmdline/commands/cmd_database.py +++ /dev/null @@ -1,130 +0,0 @@ -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### -"""`verdi database` commands.""" - -import click - -from aiida.cmdline.commands.cmd_verdi import verdi -from aiida.cmdline.params import options -from aiida.cmdline.utils import decorators - - -@verdi.group('database', hidden=True) -def verdi_database(): - """Inspect and manage the database. - - .. deprecated:: v2.0.0 - """ - - -@verdi_database.command('version') -@decorators.deprecated_command( - 'This command has been deprecated and no longer has any effect. It will be removed soon from the CLI (in v2.1).\n' - 'The same information is now available through `verdi storage version`.\n' -) -def database_version(): - """Show the version of the database. - - The database version is defined by the tuple of the schema generation and schema revision. - - .. deprecated:: v2.0.0 - """ - - -@verdi_database.command('migrate') -@options.FORCE() -@click.pass_context -@decorators.deprecated_command( - 'This command has been deprecated and will be removed soon (in v3.0). ' - 'Please call `verdi storage migrate` instead.\n' -) -def database_migrate(ctx, force): - """Migrate the database to the latest schema version. - - .. deprecated:: v2.0.0 - """ - from aiida.cmdline.commands.cmd_storage import storage_migrate - - ctx.forward(storage_migrate) - - -@verdi_database.group('integrity') -def verdi_database_integrity(): - """Check the integrity of the database and fix potential issues. - - .. deprecated:: v2.0.0 - """ - - -@verdi_database_integrity.command('detect-duplicate-uuid') -@click.option( - '-t', - '--table', - default='db_dbnode', - type=click.Choice(('db_dbcomment', 'db_dbcomputer', 'db_dbgroup', 'db_dbnode')), - help='The database table to operate on.', -) -@click.option( - '-a', '--apply-patch', is_flag=True, help='Actually apply the proposed changes instead of performing a dry run.' -) -@decorators.deprecated_command( - 'This command has been deprecated and no longer has any effect. It will be removed soon from the CLI (in v2.1).\n' - 'For remaining available integrity checks, use `verdi storage integrity` instead.\n' -) -def detect_duplicate_uuid(table, apply_patch): - """Detect and fix entities with duplicate UUIDs. - - Before aiida-core v1.0.0, there was no uniqueness constraint on the UUID column of the node table in the database - and a few other tables as well. This made it possible to store multiple entities with identical UUIDs in the same - table without the database complaining. This bug was fixed in aiida-core=1.0.0 by putting an explicit uniqueness - constraint on UUIDs on the database level. However, this would leave databases created before this patch with - duplicate UUIDs in an inconsistent state. This command will run an analysis to detect duplicate UUIDs in a given - table and solve it by generating new UUIDs. Note that it will not delete or merge any rows. - - - .. deprecated:: v2.0.0 - """ - - -@verdi_database_integrity.command('detect-invalid-links') -@decorators.with_dbenv() -@decorators.deprecated_command( - 'This command has been deprecated and no longer has any effect. It will be removed soon from the CLI (in v2.1).\n' - 'For remaining available integrity checks, use `verdi storage integrity` instead.\n' -) -def detect_invalid_links(): - """Scan the database for invalid links. - - .. deprecated:: v2.0.0 - """ - - -@verdi_database_integrity.command('detect-invalid-nodes') -@decorators.with_dbenv() -@decorators.deprecated_command( - 'This command has been deprecated and no longer has any effect. It will be removed soon from the CLI (in v2.1).\n' - 'For remaining available integrity checks, use `verdi storage integrity` instead.\n' -) -def detect_invalid_nodes(): - """Scan the database for invalid nodes. - - .. deprecated:: v2.0.0 - """ - - -@verdi_database.command('summary') -@decorators.deprecated_command( - 'This command has been deprecated and no longer has any effect. It will be removed soon from the CLI (in v2.1).\n' - 'Please call `verdi storage info` instead.\n' -) -def database_summary(): - """Summarise the entities in the database. - - .. deprecated:: v2.0.0 - """ From 4c11c0616c583236119f838a1780a606c58b4ee2 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 6 Jun 2024 19:22:18 +0200 Subject: [PATCH 03/82] CLI: Deprecate the `deprecated_command` decorator (#6461) In an ironic turn of events, the `deprecated_command` decorator is itself deprecated. The current way of deprecating `verdi` commands is by passing the deprecation message in the `deprecated` argument in the `command` decorator when the command is declared. New functionality in `VerdiCommandGroup` then ensures that a deprecation message is printed when the command is invoked and the help text is updated accordingly. --- docs/source/reference/command_line.rst | 2 +- src/aiida/cmdline/commands/cmd_archive.py | 8 ++--- .../cmdline/commands/cmd_data/cmd_upf.py | 30 +++++++++---------- src/aiida/cmdline/commands/cmd_rabbitmq.py | 3 +- src/aiida/cmdline/commands/cmd_setup.py | 10 +++---- src/aiida/cmdline/utils/decorators.py | 9 ++++++ 6 files changed, 34 insertions(+), 28 deletions(-) diff --git a/docs/source/reference/command_line.rst b/docs/source/reference/command_line.rst index 9f8a278760..530c4e0b66 100644 --- a/docs/source/reference/command_line.rst +++ b/docs/source/reference/command_line.rst @@ -409,7 +409,7 @@ Below is a list with all available subcommands. Usage: [OPTIONS] - Setup a new profile in a fully automated fashion. + (Deprecated) Setup a new profile in a fully automated fashion. Options: -n, --non-interactive In non-interactive mode, the CLI never prompts but diff --git a/src/aiida/cmdline/commands/cmd_archive.py b/src/aiida/cmdline/commands/cmd_archive.py index 2db9fbcd62..e6a8d8ff44 100644 --- a/src/aiida/cmdline/commands/cmd_archive.py +++ b/src/aiida/cmdline/commands/cmd_archive.py @@ -78,15 +78,13 @@ def archive_info(path, detailed): echo.echo_dictionary(data, sort_keys=False, fmt='yaml') -@verdi_archive.command('inspect', hidden=True) +@verdi_archive.command( + 'inspect', hidden=True, deprecated='Use `verdi archive version` or `verdi archive info` instead.' +) @click.argument('archive', nargs=1, type=click.Path(exists=True, readable=True)) @click.option('-v', '--version', is_flag=True, help='Print the archive format version and exit.') @click.option('-m', '--meta-data', is_flag=True, help='Print the meta data contents and exit.') @click.option('-d', '--database', is_flag=True, help='Include information on entities in the database.') -@decorators.deprecated_command( - 'This command has been deprecated and will be removed soon. ' - 'Please call `verdi archive version` or `verdi archive info` instead.\n' -) @click.pass_context def inspect(ctx, archive, version, meta_data, database): """Inspect contents of an archive without importing it. diff --git a/src/aiida/cmdline/commands/cmd_data/cmd_upf.py b/src/aiida/cmdline/commands/cmd_data/cmd_upf.py index 64c3e15464..67d3be4594 100644 --- a/src/aiida/cmdline/commands/cmd_data/cmd_upf.py +++ b/src/aiida/cmdline/commands/cmd_data/cmd_upf.py @@ -23,9 +23,9 @@ def upf(): """Manipulate UpfData objects (UPF-format pseudopotentials).""" -@upf.command('uploadfamily') -@decorators.deprecated_command( - 'See https://aiida-pseudo.readthedocs.io/en/latest/howto.html#migrate-from-legacy-upfdata-from-aiida-core' +@upf.command( + 'uploadfamily', + deprecated='See https://aiida-pseudo.readthedocs.io/en/latest/howto.html#migrate-from-legacy-upfdata-from-aiida-core', ) @click.argument('folder', type=click.Path(exists=True, file_okay=False, resolve_path=True)) @click.argument('group_label', type=click.STRING) @@ -50,9 +50,9 @@ def upf_uploadfamily(folder, group_label, group_description, stop_if_existing): echo.echo_success(f'UPF files found: {files_found}. New files uploaded: {files_uploaded}') -@upf.command('listfamilies') -@decorators.deprecated_command( - 'See https://aiida-pseudo.readthedocs.io/en/latest/howto.html#migrate-from-legacy-upfdata-from-aiida-core' +@upf.command( + 'listfamilies', + deprecated='See https://aiida-pseudo.readthedocs.io/en/latest/howto.html#migrate-from-legacy-upfdata-from-aiida-core', ) @click.option( '-d', @@ -96,9 +96,9 @@ def upf_listfamilies(elements, with_description): echo.echo_warning('No valid UPF pseudopotential family found.') -@upf.command('exportfamily') -@decorators.deprecated_command( - 'See https://aiida-pseudo.readthedocs.io/en/latest/howto.html#migrate-from-legacy-upfdata-from-aiida-core' +@upf.command( + 'exportfamily', + deprecated='See https://aiida-pseudo.readthedocs.io/en/latest/howto.html#migrate-from-legacy-upfdata-from-aiida-core', ) @click.argument('folder', type=click.Path(exists=True, file_okay=False, resolve_path=True)) @arguments.GROUP() @@ -119,9 +119,9 @@ def upf_exportfamily(folder, group): echo.echo_warning(f'File {node.filename} is already present in the destination folder') -@upf.command('import') -@decorators.deprecated_command( - 'See https://aiida-pseudo.readthedocs.io/en/latest/howto.html#migrate-from-legacy-upfdata-from-aiida-core' +@upf.command( + 'import', + deprecated='See https://aiida-pseudo.readthedocs.io/en/latest/howto.html#migrate-from-legacy-upfdata-from-aiida-core', ) @click.argument('filename', type=click.Path(exists=True, dir_okay=False, resolve_path=True)) @decorators.with_dbenv() @@ -133,9 +133,9 @@ def upf_import(filename): echo.echo_success(f'Imported: {node}') -@upf.command('export') -@decorators.deprecated_command( - 'See https://aiida-pseudo.readthedocs.io/en/latest/howto.html#migrate-from-legacy-upfdata-from-aiida-core' +@upf.command( + 'export', + deprecated='See https://aiida-pseudo.readthedocs.io/en/latest/howto.html#migrate-from-legacy-upfdata-from-aiida-core', ) @arguments.DATUM(type=types.DataParamType(sub_classes=('aiida.data:core.upf',))) @options.EXPORT_FORMAT( diff --git a/src/aiida/cmdline/commands/cmd_rabbitmq.py b/src/aiida/cmdline/commands/cmd_rabbitmq.py index 34f068e97c..c6a66d6da2 100644 --- a/src/aiida/cmdline/commands/cmd_rabbitmq.py +++ b/src/aiida/cmdline/commands/cmd_rabbitmq.py @@ -222,10 +222,9 @@ def cmd_tasks_list(broker): echo.echo(pk) -@cmd_tasks.command('analyze') +@cmd_tasks.command('analyze', deprecated='Use `verdi process repair` instead.') @click.option('--fix', is_flag=True, help='Attempt to fix the inconsistencies if any are detected.') @decorators.only_if_daemon_not_running() -@decorators.deprecated_command('Use `verdi process repair` instead.') @click.pass_context def cmd_tasks_analyze(ctx, fix): """Perform analysis of process tasks. diff --git a/src/aiida/cmdline/commands/cmd_setup.py b/src/aiida/cmdline/commands/cmd_setup.py index 0c18ce4238..93e6162141 100644 --- a/src/aiida/cmdline/commands/cmd_setup.py +++ b/src/aiida/cmdline/commands/cmd_setup.py @@ -13,7 +13,7 @@ from aiida.cmdline.commands.cmd_verdi import verdi from aiida.cmdline.params import options from aiida.cmdline.params.options.commands import setup as options_setup -from aiida.cmdline.utils import decorators, echo +from aiida.cmdline.utils import echo from aiida.manage.configuration import Profile, load_profile @@ -136,10 +136,10 @@ def setup( echo.echo_success(f'created new profile `{profile.name}`.') -@verdi.command('quicksetup') -@decorators.deprecated_command( - 'This command is deprecated. For a fully automated alternative, use `verdi presto --use-postgres` instead. ' - 'For full control, use `verdi profile setup core.psql_dos`.' +@verdi.command( + 'quicksetup', + deprecated='This command is deprecated. For a fully automated alternative, use `verdi presto --use-postgres` ' + 'instead. For full control, use `verdi profile setup core.psql_dos`.', ) @options.NON_INTERACTIVE() # Cannot use `default` because that will fail validation of the `ProfileParamType` if the profile already exists and it diff --git a/src/aiida/cmdline/utils/decorators.py b/src/aiida/cmdline/utils/decorators.py index 0a91c2c5e1..84386710f9 100644 --- a/src/aiida/cmdline/utils/decorators.py +++ b/src/aiida/cmdline/utils/decorators.py @@ -248,7 +248,16 @@ def deprecated_command(message): @deprecated_command('This command has been deprecated in AiiDA v1.0, please use 'foo' instead.) def mycommand(): pass + + .. deprecated:: 2.6 + + Ironically, this decorator itself has been deprecated. ``verdi`` commands that should be deprecated should + simply use the ``deprecated`` argument in the ``command`` decorator and specify the deprecation message. + """ + from aiida.common.warnings import warn_deprecation + + warn_deprecation('The `deprecated_command` decorator is deprecated', version=3) @decorator def wrapper(wrapped, _, args, kwargs): From 1b4a19a44461271aea58e54acd93e896220b413d Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 6 Jun 2024 19:22:34 +0200 Subject: [PATCH 04/82] Devops: Remove `verdi tui` from CLI reference documentation (#6464) The `verdi-autodocs` pre-commit hook automatically generates the reference documentation for `verdi`. It does so based on the available commands. The `verdi tui` command is added dynamically if and only if the optional dependency `trogon` is installed. Since having the dependency installed slows down the loading time `verdi` significantly, many dev environments prefer not to install it. However, this leads to the `verdi-autodocs` hook always failing as it removes the `verdi tui` section from the docs. The developer then has to manually reset the changes and run `git commit` with `--no-verify`. This is annoying enough that as a workaround the `verdi-autodocs` hook now skips the `verdi tui` command when generating the docs. --- docs/source/reference/command_line.rst | 15 --------------- utils/validate_consistency.py | 8 ++++++++ 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/docs/source/reference/command_line.rst b/docs/source/reference/command_line.rst index 530c4e0b66..50e69919e0 100644 --- a/docs/source/reference/command_line.rst +++ b/docs/source/reference/command_line.rst @@ -613,21 +613,6 @@ Below is a list with all available subcommands. version Print the current version of the storage schema. -.. _reference:command-line:verdi-tui: - -``verdi tui`` -------------- - -.. code:: console - - Usage: [OPTIONS] - - Open Textual TUI. - - Options: - --help Show this message and exit. - - .. _reference:command-line:verdi-user: ``verdi user`` diff --git a/utils/validate_consistency.py b/utils/validate_consistency.py index aa72e23863..297dfccab8 100644 --- a/utils/validate_consistency.py +++ b/utils/validate_consistency.py @@ -119,6 +119,14 @@ def validate_verdi_documentation(): block = [f"{header}\n{'=' * len(header)}\n{message}\n\n"] for name, command in sorted(verdi.commands.items()): + if name == 'tui': + # This command is only generated when the optional dependency ``trogon`` is installed. It provides a TUI + # version of ``verdi``. However, since it is optional, if a development environment does not have it + # installed, this check will always fail as the generated docs are different. Since ``trogon`` significantly + # slows down tab-completion of ``verdi``, many dev environments do not want to have it installed. As a + # workaround, we are excluding this command from the automatically generated reference documentation. + continue + ctx = click.Context(command, terminal_width=width) header_label = f'.. _reference:command-line:verdi-{name}:' From 82bba130792f6c965f0ede8b221eee70fd01d9f1 Mon Sep 17 00:00:00 2001 From: Daniel Hollas Date: Fri, 7 Jun 2024 07:33:09 +0200 Subject: [PATCH 05/82] Devops: Do not pin the mamba version (#6466) * Remove pin of mamba version * Set `PIP_USER` as default * Remove double install of `xz-utils` --- .docker/aiida-core-base/Dockerfile | 8 +++----- .docker/aiida-core-with-services/Dockerfile | 3 +-- .docker/docker-bake.hcl | 1 - 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/.docker/aiida-core-base/Dockerfile b/.docker/aiida-core-base/Dockerfile index 4dd66eecfb..87339724bc 100644 --- a/.docker/aiida-core-base/Dockerfile +++ b/.docker/aiida-core-base/Dockerfile @@ -104,9 +104,6 @@ USER ${SYSTEM_UID} # Pin python version here ARG PYTHON_VERSION -# Pin mamba version here -ARG MAMBA_VERSION - # Download and install Micromamba, and initialize Conda prefix. # # Similar projects using Micromamba: @@ -137,7 +134,7 @@ RUN set -x && \ --prefix="${CONDA_DIR}" \ --yes \ "${PYTHON_SPECIFIER}" \ - "mamba=${MAMBA_VERSION}" && \ + mamba && \ rm micromamba && \ # Pin major.minor version of python mamba list python | grep '^python ' | tr -s ' ' | cut -d ' ' -f 1,2 >> "${CONDA_DIR}/conda-meta/pinned" && \ @@ -146,8 +143,9 @@ RUN set -x && \ fix-permissions "/home/${SYSTEM_USER}" # Add ~/.local/bin to PATH where the dependencies get installed via pip -# This require the package installed with `--user` flag in pip +# This require the package installed with `--user` flag in pip, which we set as default. ENV PATH=${PATH}:/home/${SYSTEM_USER}/.local/bin +ENV PIP_USER 1 # Switch to root to install AiiDA and set AiiDA as service # Install AiiDA from source code diff --git a/.docker/aiida-core-with-services/Dockerfile b/.docker/aiida-core-with-services/Dockerfile index 119cf7d0d1..276186175a 100644 --- a/.docker/aiida-core-with-services/Dockerfile +++ b/.docker/aiida-core-with-services/Dockerfile @@ -22,8 +22,7 @@ RUN mamba install --yes \ # Install erlang. RUN apt-get update --yes && \ apt-get install --yes --no-install-recommends \ - erlang \ - xz-utils && \ + erlang && \ apt-get clean && rm -rf /var/lib/apt/lists/* && \ # Install rabbitmq. wget -c --no-check-certificate https://github.com/rabbitmq/rabbitmq-server/releases/download/v${RMQ_VERSION}/rabbitmq-server-generic-unix-${RMQ_VERSION}.tar.xz && \ diff --git a/.docker/docker-bake.hcl b/.docker/docker-bake.hcl index 8aa32a2e5e..12938b490c 100644 --- a/.docker/docker-bake.hcl +++ b/.docker/docker-bake.hcl @@ -43,7 +43,6 @@ target "aiida-core-base" { platforms = "${PLATFORMS}" args = { "PYTHON_VERSION" = "${PYTHON_VERSION}" - "MAMBA_VERSION" = "1.5.2" } } target "aiida-core-with-services" { From 0812f4b9eeffdff5a8c3d0802aea94c8919d9922 Mon Sep 17 00:00:00 2001 From: Daniel Hollas Date: Fri, 7 Jun 2024 10:26:05 +0200 Subject: [PATCH 06/82] Devops: Add Dependabot config for maintaining GH actions (#6467) * Add dependabot config for maintaining GH actions * Enable `FORCE_COLOR` environment variable * Update `uv` installer * Remove code coverage in `presto` test suite * Do not install from `requirements.txt` for `verdi` --- .github/actions/install-aiida-core/action.yml | 2 +- .github/dependabot.yml | 11 +++++++++++ .github/workflows/ci-code.yml | 13 ++----------- .github/workflows/ci-style.yml | 3 +++ .github/workflows/docs-build.yml | 3 +++ .github/workflows/release.yml | 8 +++----- .readthedocs.yml | 4 ++-- 7 files changed, 25 insertions(+), 19 deletions(-) create mode 100644 .github/dependabot.yml diff --git a/.github/actions/install-aiida-core/action.yml b/.github/actions/install-aiida-core/action.yml index f51cf4d299..3c82771cdc 100644 --- a/.github/actions/install-aiida-core/action.yml +++ b/.github/actions/install-aiida-core/action.yml @@ -28,7 +28,7 @@ runs: - name: Install uv installer run: curl --proto '=https' --tlsv1.2 -LsSf https://${{ env.UV_URL }} | sh env: - UV_VERSION: 0.2.5 + UV_VERSION: 0.2.9 UV_URL: github.com/astral-sh/uv/releases/download/$UV_VERSION/uv-installer.sh shell: bash diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..8f0af41bb2 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +version: 2 +updates: +# Maintain dependencies for GitHub Actions +- package-ecosystem: github-actions + directory: / + schedule: + interval: monthly + groups: + gha-dependencies: + patterns: + - '*' diff --git a/.github/workflows/ci-code.yml b/.github/workflows/ci-code.yml index bde141594b..60db44dcce 100644 --- a/.github/workflows/ci-code.yml +++ b/.github/workflows/ci-code.yml @@ -139,17 +139,7 @@ jobs: - name: Run test suite env: AIIDA_WARN_v3: 0 - run: pytest -m 'presto' --cov aiida - - - name: Upload coverage report - if: github.repository == 'aiidateam/aiida-core' - uses: codecov/codecov-action@v4 - with: - token: ${{ secrets.CODECOV_TOKEN }} - name: aiida-pytests-presto - flags: presto - file: ./coverage.xml - fail_ci_if_error: false # don't fail job, if coverage upload fails + run: pytest -m 'presto' verdi: @@ -165,6 +155,7 @@ jobs: uses: ./.github/actions/install-aiida-core with: python-version: '3.12' + from-requirements: 'false' - name: Run verdi tests run: | diff --git a/.github/workflows/ci-style.yml b/.github/workflows/ci-style.yml index 83a0e2bd87..1f4b549ad2 100644 --- a/.github/workflows/ci-style.yml +++ b/.github/workflows/ci-style.yml @@ -6,6 +6,9 @@ on: pull_request: branches-ignore: [gh-pages] +env: + FORCE_COLOR: 1 + jobs: pre-commit: diff --git a/.github/workflows/docs-build.yml b/.github/workflows/docs-build.yml index 012a83d3b9..bf1cce6d4c 100644 --- a/.github/workflows/docs-build.yml +++ b/.github/workflows/docs-build.yml @@ -7,6 +7,9 @@ on: branches-ignore: [gh-pages] paths: [docs/**] +env: + FORCE_COLOR: 1 + jobs: docs-linkcheck: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0b6b27c979..4a3f0e8a19 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,6 +9,9 @@ on: tags: - v[0-9]+.[0-9]+.[0-9]+* +env: + FORCE_COLOR: 1 + jobs: check-release-tag: @@ -33,11 +36,6 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Install system dependencies - # note libkrb5-dev is required as a dependency for the gssapi pip install - run: | - sudo apt update - sudo apt install libkrb5-dev ruby ruby-dev - name: Install aiida-core and pre-commit uses: ./.github/actions/install-aiida-core diff --git a/.readthedocs.yml b/.readthedocs.yml index 3ceaf73344..8f1e3118d0 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -17,8 +17,8 @@ build: # https://docs.readthedocs.io/en/stable/build-customization.html#install-dependencies-with-uv post_create_environment: - asdf plugin add uv - - asdf install uv 0.1.44 - - asdf global uv 0.1.44 + - asdf install uv 0.2.9 + - asdf global uv 0.2.9 post_install: - VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH uv pip install .[docs,tests,rest,atomic_tools] From a56a1389dee5cb9ae70a5511d77aad248ea21731 Mon Sep 17 00:00:00 2001 From: Alexander Goscinski Date: Thu, 6 Jun 2024 23:01:41 +0200 Subject: [PATCH 07/82] CLI: Fix `verdi process [show|report|status|watch|call-root]` no output The commands take one or more processes, but if no processes were provided no output was shown whatsoever which can be confusing to the user. Now an error is now shown if no processes are defined. --- src/aiida/cmdline/commands/cmd_process.py | 36 ++++++++++++++ src/aiida/cmdline/utils/echo.py | 1 + tests/cmdline/commands/test_process.py | 58 ++++++++++++++++++----- 3 files changed, 83 insertions(+), 12 deletions(-) diff --git a/src/aiida/cmdline/commands/cmd_process.py b/src/aiida/cmdline/commands/cmd_process.py index 52b286e795..242e23549b 100644 --- a/src/aiida/cmdline/commands/cmd_process.py +++ b/src/aiida/cmdline/commands/cmd_process.py @@ -188,6 +188,11 @@ def process_show(processes, most_recent_node): """Show details for one or multiple processes.""" from aiida.cmdline.utils.common import get_node_info + if not processes and not most_recent_node: + raise click.UsageError( + 'Please specify one or multiple processes by their identifier (PK, UUID or label) or use an option.' + ) + if processes and most_recent_node: raise click.BadOptionUsage( 'most_recent_node', @@ -206,6 +211,8 @@ def process_show(processes, most_recent_node): @decorators.with_dbenv() def process_call_root(processes): """Show root process of the call stack for the given processes.""" + if not processes: + raise click.UsageError('Please specify one or multiple processes by their identifier (PK, UUID or label).') for process in processes: caller = process.caller @@ -244,6 +251,11 @@ def process_report(processes, most_recent_node, levelname, indent_size, max_dept from aiida.cmdline.utils.common import get_calcjob_report, get_process_function_report, get_workchain_report from aiida.orm import CalcFunctionNode, CalcJobNode, WorkChainNode, WorkFunctionNode + if not processes and not most_recent_node: + raise click.UsageError( + 'Please specify one or multiple processes by their identifier (PK, UUID or label) or use an option.' + ) + if processes and most_recent_node: raise click.BadOptionUsage( 'most_recent_node', @@ -275,6 +287,11 @@ def process_status(call_link_label, most_recent_node, max_depth, processes): """Print the status of one or multiple processes.""" from aiida.cmdline.utils.ascii_vis import format_call_graph + if not processes and not most_recent_node: + raise click.UsageError( + 'Please specify one or multiple processes by their identifier (PK, UUID or label) or use an option.' + ) + if processes and most_recent_node: raise click.BadOptionUsage( 'most_recent_node', @@ -299,6 +316,11 @@ def process_kill(processes, all_entries, timeout, wait): """Kill running processes.""" from aiida.engine.processes import control + if not processes and not all_entries: + raise click.UsageError( + 'Please specify one or multiple processes by their identifier (PK, UUID or label) or use an option.' + ) + if processes and all_entries: raise click.BadOptionUsage('all', 'cannot specify individual processes and the `--all` flag at the same time.') @@ -326,6 +348,11 @@ def process_pause(processes, all_entries, timeout, wait): """Pause running processes.""" from aiida.engine.processes import control + if not processes and not all_entries: + raise click.UsageError( + 'Please specify one or multiple processes by their identifier (PK, UUID or label) or use an option.' + ) + if processes and all_entries: raise click.BadOptionUsage('all', 'cannot specify individual processes and the `--all` flag at the same time.') @@ -350,6 +377,11 @@ def process_play(processes, all_entries, timeout, wait): """Play (unpause) paused processes.""" from aiida.engine.processes import control + if not processes and not all_entries: + raise click.UsageError( + 'Please specify one or multiple processes by their identifier (PK, UUID or label) or use an option.' + ) + if processes and all_entries: raise click.BadOptionUsage('all', 'cannot specify individual processes and the `--all` flag at the same time.') @@ -370,6 +402,10 @@ def process_play(processes, all_entries, timeout, wait): @decorators.only_if_daemon_running(echo.echo_warning, 'daemon is not running, so process may not be reachable') def process_watch(broker, processes): """Watch the state transitions for a process.""" + + if not processes: + raise click.UsageError('Please specify one or multiple processes by their identifier (PK, UUID or label).') + from time import sleep from kiwipy import BroadcastFilter diff --git a/src/aiida/cmdline/utils/echo.py b/src/aiida/cmdline/utils/echo.py index 26e88337da..d8e4bea6b8 100644 --- a/src/aiida/cmdline/utils/echo.py +++ b/src/aiida/cmdline/utils/echo.py @@ -35,6 +35,7 @@ class ExitCode(enum.IntEnum): """Exit codes for the verdi command line.""" CRITICAL = 1 + USAGE_ERROR = 2 DEPRECATED = 80 UNKNOWN = 99 SUCCESS = 0 diff --git a/tests/cmdline/commands/test_process.py b/tests/cmdline/commands/test_process.py index 162a471db2..7982a3346a 100644 --- a/tests/cmdline/commands/test_process.py +++ b/tests/cmdline/commands/test_process.py @@ -206,11 +206,11 @@ def test_process_show(self, run_cli_command): calcjob_one.store() calcjob_two.store() - # Running without identifiers should not except and not print anything + # Running without identifiers should except and print something options = [] - result = run_cli_command(cmd_process.process_show, options) - - assert len(result.output_lines) == 0 + result = run_cli_command(cmd_process.process_show, options, raises=True) + assert result.exit_code == ExitCode.USAGE_ERROR + assert len(result.output_lines) > 0 # Giving a single identifier should print a non empty string message options = [str(workchain_one.pk)] @@ -232,11 +232,11 @@ def test_process_report(self, run_cli_command): """Test verdi process report""" node = WorkflowNode().store() - # Running without identifiers should not except and not print anything + # Running without identifiers should except and print something options = [] - result = run_cli_command(cmd_process.process_report, options) - - assert len(result.output_lines) == 0 + result = run_cli_command(cmd_process.process_report, options, raises=True) + assert result.exit_code == ExitCode.USAGE_ERROR + assert len(result.output_lines) > 0 # Giving a single identifier should print a non empty string message options = [str(node.pk)] @@ -255,11 +255,11 @@ def test_process_status(self, run_cli_command): node = WorkflowNode().store() node.set_process_state(ProcessState.RUNNING) - # Running without identifiers should not except and not print anything + # Running without identifiers should except and print something options = [] - result = run_cli_command(cmd_process.process_status, options) - assert result.exception is None, result.output - assert len(result.output_lines) == 0 + result = run_cli_command(cmd_process.process_status, options, raises=True) + assert result.exit_code == ExitCode.USAGE_ERROR + assert len(result.output_lines) > 0 # Giving a single identifier should print a non empty string message options = [str(node.pk)] @@ -273,6 +273,15 @@ def test_process_status(self, run_cli_command): assert result.exception is None, result.output assert len(result.output_lines) == 0 + @pytest.mark.requires_rmq + def test_process_watch(self, run_cli_command): + """Test verdi process watch""" + # Running without identifiers should except and print something + options = [] + result = run_cli_command(cmd_process.process_watch, options, raises=True) + assert result.exit_code == ExitCode.USAGE_ERROR + assert len(result.output_lines) > 0 + def test_process_status_call_link_label(self, run_cli_command): """Test ``verdi process status --call-link-label``.""" node = WorkflowNode().store() @@ -460,6 +469,13 @@ def test_multiple_processes(self, run_cli_command): assert str(self.node_root.pk) in result.output_lines[1] assert str(self.node_root.pk) in result.output_lines[2] + def test_no_process_argument(self, run_cli_command): + # Running without identifiers should except and print something + options = [] + result = run_cli_command(cmd_process.process_call_root, options, raises=True) + assert result.exit_code == ExitCode.USAGE_ERROR + assert len(result.output_lines) > 0 + @pytest.mark.requires_rmq @pytest.mark.usefixtures('started_daemon_client') @@ -471,6 +487,12 @@ def test_process_pause(submit_and_await, run_cli_command): run_cli_command(cmd_process.process_pause, [str(node.pk), '--wait']) await_condition(lambda: node.paused) + # Running without identifiers should except and print something + options = [] + result = run_cli_command(cmd_process.process_pause, options, raises=True) + assert result.exit_code == ExitCode.USAGE_ERROR + assert len(result.output_lines) > 0 + @pytest.mark.requires_rmq @pytest.mark.usefixtures('started_daemon_client') @@ -484,6 +506,12 @@ def test_process_play(submit_and_await, run_cli_command): run_cli_command(cmd_process.process_play, [str(node.pk), '--wait']) await_condition(lambda: not node.paused) + # Running without identifiers should except and print something + options = [] + result = run_cli_command(cmd_process.process_play, options, raises=True) + assert result.exit_code == ExitCode.USAGE_ERROR + assert len(result.output_lines) > 0 + @pytest.mark.requires_rmq @pytest.mark.usefixtures('started_daemon_client') @@ -515,6 +543,12 @@ def test_process_kill(submit_and_await, run_cli_command): await_condition(lambda: node.is_killed) assert node.process_status == 'Killed through `verdi process kill`' + # Running without identifiers should except and print something + options = [] + result = run_cli_command(cmd_process.process_kill, options, raises=True) + assert result.exit_code == ExitCode.USAGE_ERROR + assert len(result.output_lines) > 0 + @pytest.mark.requires_rmq @pytest.mark.usefixtures('started_daemon_client') From 72692fa5cb667e2a7462770af18b7cedeaf8b3f0 Mon Sep 17 00:00:00 2001 From: Alexander Goscinski Date: Tue, 28 May 2024 17:09:21 +0200 Subject: [PATCH 08/82] CLI: Add `--most-recent-node` option to `verdi process watch` --- src/aiida/cmdline/commands/cmd_process.py | 18 +++++++++++++++--- tests/cmdline/commands/test_process.py | 6 ++++++ 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/src/aiida/cmdline/commands/cmd_process.py b/src/aiida/cmdline/commands/cmd_process.py index 242e23549b..969cee3593 100644 --- a/src/aiida/cmdline/commands/cmd_process.py +++ b/src/aiida/cmdline/commands/cmd_process.py @@ -397,14 +397,23 @@ def process_play(processes, all_entries, timeout, wait): @verdi_process.command('watch') @arguments.PROCESSES() +@options.MOST_RECENT_NODE() @decorators.with_dbenv() @decorators.with_broker @decorators.only_if_daemon_running(echo.echo_warning, 'daemon is not running, so process may not be reachable') -def process_watch(broker, processes): +def process_watch(broker, processes, most_recent_node): """Watch the state transitions for a process.""" - if not processes: - raise click.UsageError('Please specify one or multiple processes by their identifier (PK, UUID or label).') + if not processes and not most_recent_node: + raise click.UsageError( + 'Please specify one or multiple processes by their identifier (PK, UUID or label) or use an option.' + ) + + if processes and most_recent_node: + raise click.BadOptionUsage( + 'most_recent_node', + 'cannot specify individual processes and the `-M/--most-recent-node` flag at the same time.', + ) from time import sleep @@ -423,6 +432,9 @@ def _print(communicator, body, sender, subject, correlation_id): communicator = broker.get_communicator() echo.echo_report('watching for broadcasted messages, press CTRL+C to stop...') + if most_recent_node: + processes = [get_most_recent_node()] + for process in processes: if process.is_terminated: echo.echo_error(f'Process<{process.pk}> is already terminated') diff --git a/tests/cmdline/commands/test_process.py b/tests/cmdline/commands/test_process.py index 7982a3346a..fae9957f80 100644 --- a/tests/cmdline/commands/test_process.py +++ b/tests/cmdline/commands/test_process.py @@ -282,6 +282,12 @@ def test_process_watch(self, run_cli_command): assert result.exit_code == ExitCode.USAGE_ERROR assert len(result.output_lines) > 0 + # Running with both identifiers should raise an error and print something + options = ['--most-recent-node', '1'] + result = run_cli_command(cmd_process.process_watch, options, raises=True) + assert result.exit_code == ExitCode.USAGE_ERROR + assert len(result.output_lines) > 0 + def test_process_status_call_link_label(self, run_cli_command): """Test ``verdi process status --call-link-label``.""" node = WorkflowNode().store() From d91e0a58dabfd242b5f886d692c8761499a6719c Mon Sep 17 00:00:00 2001 From: Alexander Goscinski Date: Tue, 28 May 2024 18:05:44 +0200 Subject: [PATCH 09/82] CLI: Unify help of `verdi process` commands The `verdi process` commands pause, play, kill, wait, show, status, report, call-root had inconsistent help messages so we unified them. --- docs/source/reference/command_line.rst | 12 ++++---- src/aiida/cmdline/commands/cmd_process.py | 36 ++++++++++++++++------- 2 files changed, 32 insertions(+), 16 deletions(-) diff --git a/docs/source/reference/command_line.rst b/docs/source/reference/command_line.rst index 50e69919e0..aa00b218e4 100644 --- a/docs/source/reference/command_line.rst +++ b/docs/source/reference/command_line.rst @@ -363,17 +363,17 @@ Below is a list with all available subcommands. --help Show this message and exit. Commands: - call-root Show root process of the call stack for the given processes. + call-root Show root process of processes. dump Dump process input and output files to disk. kill Kill running processes. - list Show a list of running or terminated processes. + list Show a list of processes. pause Pause running processes. play Play (unpause) paused processes. repair Automatically repair all stuck processes. - report Show the log report for one or multiple processes. - show Show details for one or multiple processes. - status Print the status of one or multiple processes. - watch Watch the state transitions for a process. + report Show the log report of processes. + show Show details of processes. + status Show the status of processes. + watch Watch the state transitions of processes. .. _reference:command-line:verdi-profile: diff --git a/src/aiida/cmdline/commands/cmd_process.py b/src/aiida/cmdline/commands/cmd_process.py index 969cee3593..77e14a3300 100644 --- a/src/aiida/cmdline/commands/cmd_process.py +++ b/src/aiida/cmdline/commands/cmd_process.py @@ -93,9 +93,9 @@ def process_list( order_by, order_dir, ): - """Show a list of running or terminated processes. + """Show a list of processes. - By default, only those that are still running are shown, but there are options to show also the finished ones. + By default, only processes that are still running are shown, but there are options to show also the finished ones. """ from tabulate import tabulate @@ -185,7 +185,9 @@ def process_list( @options.MOST_RECENT_NODE() @decorators.with_dbenv() def process_show(processes, most_recent_node): - """Show details for one or multiple processes.""" + """Show details of processes. + + Show details for one or multiple processes.""" from aiida.cmdline.utils.common import get_node_info if not processes and not most_recent_node: @@ -210,7 +212,9 @@ def process_show(processes, most_recent_node): @arguments.PROCESSES() @decorators.with_dbenv() def process_call_root(processes): - """Show root process of the call stack for the given processes.""" + """Show root process of processes. + + Show root process(es) of the call stack for one or multiple processes.""" if not processes: raise click.UsageError('Please specify one or multiple processes by their identifier (PK, UUID or label).') for process in processes: @@ -247,7 +251,9 @@ def process_call_root(processes): ) @decorators.with_dbenv() def process_report(processes, most_recent_node, levelname, indent_size, max_depth): - """Show the log report for one or multiple processes.""" + """Show the log report of processes. + + Show the log report for one or multiple processes.""" from aiida.cmdline.utils.common import get_calcjob_report, get_process_function_report, get_workchain_report from aiida.orm import CalcFunctionNode, CalcJobNode, WorkChainNode, WorkFunctionNode @@ -284,7 +290,9 @@ def process_report(processes, most_recent_node, levelname, indent_size, max_dept ) @arguments.PROCESSES() def process_status(call_link_label, most_recent_node, max_depth, processes): - """Print the status of one or multiple processes.""" + """Show the status of processes. + + Show the status of one or multiple processes.""" from aiida.cmdline.utils.ascii_vis import format_call_graph if not processes and not most_recent_node: @@ -313,7 +321,9 @@ def process_status(call_link_label, most_recent_node, max_depth, processes): @options.WAIT() @decorators.with_dbenv() def process_kill(processes, all_entries, timeout, wait): - """Kill running processes.""" + """Kill running processes. + + Kill one or multiple running processes.""" from aiida.engine.processes import control if not processes and not all_entries: @@ -345,7 +355,9 @@ def process_kill(processes, all_entries, timeout, wait): @options.WAIT() @decorators.with_dbenv() def process_pause(processes, all_entries, timeout, wait): - """Pause running processes.""" + """Pause running processes. + + Pause one or multiple running processes.""" from aiida.engine.processes import control if not processes and not all_entries: @@ -374,7 +386,9 @@ def process_pause(processes, all_entries, timeout, wait): @options.WAIT() @decorators.with_dbenv() def process_play(processes, all_entries, timeout, wait): - """Play (unpause) paused processes.""" + """Play (unpause) paused processes. + + Play (unpause) one or multiple paused processes.""" from aiida.engine.processes import control if not processes and not all_entries: @@ -402,7 +416,9 @@ def process_play(processes, all_entries, timeout, wait): @decorators.with_broker @decorators.only_if_daemon_running(echo.echo_warning, 'daemon is not running, so process may not be reachable') def process_watch(broker, processes, most_recent_node): - """Watch the state transitions for a process.""" + """Watch the state transitions of processes. + + Watch the state transitions for one or multiple running processes.""" if not processes and not most_recent_node: raise click.UsageError( From ccb56286c40f6be0d61a0c62442993e43faf1ba6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Jun 2024 11:26:33 +0200 Subject: [PATCH 10/82] Devops: Bump the gha-dependencies group with 4 updates (#6468) Updates `peter-evans/commit-comment` from 1 to 3 - [Release notes](https://github.com/peter-evans/commit-comment/releases) - [Commits](https://github.com/peter-evans/commit-comment/compare/v1...v3) Updates `conda-incubator/setup-miniconda` from 2 to 3 - [Release notes](https://github.com/conda-incubator/setup-miniconda/releases) - [Changelog](https://github.com/conda-incubator/setup-miniconda/blob/main/CHANGELOG.md) - [Commits](https://github.com/conda-incubator/setup-miniconda/compare/v2...v3) Updates `peter-evans/create-pull-request` from 3 to 6 - [Release notes](https://github.com/peter-evans/create-pull-request/releases) - [Commits](https://github.com/peter-evans/create-pull-request/compare/v3...v6) Updates `peter-evans/create-or-update-comment` from 1 to 4 - [Release notes](https://github.com/peter-evans/create-or-update-comment/releases) - [Commits](https://github.com/peter-evans/create-or-update-comment/compare/v1...v4) --- .github/workflows/ci-code.yml | 2 +- .github/workflows/test-install.yml | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci-code.yml b/.github/workflows/ci-code.yml index 60db44dcce..ff95047bb4 100644 --- a/.github/workflows/ci-code.yml +++ b/.github/workflows/ci-code.yml @@ -40,7 +40,7 @@ jobs: - name: Create commit comment if: failure() && steps.check_reqs.outputs.error - uses: peter-evans/commit-comment@v1 + uses: peter-evans/commit-comment@v3 with: path: pyproject.toml body: | diff --git a/.github/workflows/test-install.yml b/.github/workflows/test-install.yml index c0d0ab12d9..0baab3c074 100644 --- a/.github/workflows/test-install.yml +++ b/.github/workflows/test-install.yml @@ -81,7 +81,7 @@ jobs: - uses: actions/checkout@v4 - name: Setup Conda - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 with: channels: conda-forge @@ -148,7 +148,7 @@ jobs: - uses: actions/checkout@v4 - name: Setup Conda - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 with: channels: conda-forge @@ -274,7 +274,7 @@ jobs: # - name: Create commit comment if: steps.check_reqs.outcome == 'Failure' # only run if requirements/ are inconsistent - uses: peter-evans/commit-comment@v1 + uses: peter-evans/commit-comment@v3 with: token: ${{ secrets.GITHUB_TOKEN }} path: pyproject.toml @@ -306,7 +306,7 @@ jobs: if: steps.check_reqs.outcome == 'Failure' # only run if requirements/ are inconsistent id: create_update_requirements_pr continue-on-error: true - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v6 with: branch: update-requirements commit-message: Automated update of requirements/ files. @@ -324,7 +324,7 @@ jobs: - name: Create PR comment on success if: steps.create_update_requirements_pr.outcome == 'Success' - uses: peter-evans/create-or-update-comment@v1 + uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ github.event.number }} body: | @@ -333,7 +333,7 @@ jobs: - name: Create PR comment on failure if: steps.create_update_requirements_pr.outcome == 'Failure' - uses: peter-evans/create-or-update-comment@v1 + uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ github.event.number }} body: | From f553f805e86d766da6208eb1682f7cf12c7907ac Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 7 Jun 2024 13:00:13 +0200 Subject: [PATCH 11/82] CLI: Add RabbitMQ options to `verdi profile setup` (#6453) The `verdi profile setup` command was added to replace the deprecated command `verdi setup`. The new command dynamically generates a subcommand for each installed storage plugin. However, the new command did not allow to configure the connection parameters for the RabbitMQ broker, unlike `verdi setup`. These common options are now added to the subcommands. In addition, a new option is added `--use-rabbitmq/--no-use-rabbitmq`. This flag is on by default, to keep the old behavior of `verdi setup`. When toggled to `--no-use-rabbitmq`, the RabbitMQ configuration options are no longer required and are also not prompted for. The profile is then configured without a broker. --- src/aiida/cmdline/commands/cmd_profile.py | 47 +++++++++++++++---- .../cmdline/params/options/commands/setup.py | 9 ++++ src/aiida/manage/configuration/profile.py | 2 +- src/aiida/storage/sqlite_dos/backend.py | 5 +- tests/cmdline/commands/test_profile.py | 13 +++++ 5 files changed, 64 insertions(+), 12 deletions(-) diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py index b1922058af..2de2ce173d 100644 --- a/src/aiida/cmdline/commands/cmd_profile.py +++ b/src/aiida/cmdline/commands/cmd_profile.py @@ -52,6 +52,23 @@ def command_create_profile( _, storage_entry_point = get_entry_point_from_class(storage_cls.__module__, storage_cls.__name__) assert storage_entry_point is not None + if kwargs.pop('use_rabbitmq'): + broker_backend = 'core.rabbitmq' + broker_config = { + key: kwargs.get(key) + for key in ( + 'broker_protocol', + 'broker_username', + 'broker_password', + 'broker_host', + 'broker_port', + 'broker_virtual_host', + ) + } + else: + broker_backend = None + broker_config = None + try: profile = create_profile( ctx.obj.config, @@ -62,15 +79,8 @@ def command_create_profile( institution=institution, storage_backend=storage_entry_point.name, storage_config=kwargs, - broker_backend='core.rabbitmq', - broker_config={ - 'broker_protocol': 'amqp', - 'broker_username': 'guest', - 'broker_password': 'guest', - 'broker_host': '127.0.0.1', - 'broker_port': 5672, - 'broker_virtual_host': '', - }, + broker_backend=broker_backend, + broker_config=broker_config, ) except (ValueError, TypeError, exceptions.EntryPointError, exceptions.StorageMigrationError) as exception: echo.echo_critical(str(exception)) @@ -93,6 +103,25 @@ def command_create_profile( setup.SETUP_USER_FIRST_NAME(), setup.SETUP_USER_LAST_NAME(), setup.SETUP_USER_INSTITUTION(), + setup.SETUP_USE_RABBITMQ(), + setup.SETUP_BROKER_PROTOCOL( + prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] + ), + setup.SETUP_BROKER_USERNAME( + prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] + ), + setup.SETUP_BROKER_PASSWORD( + prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] + ), + setup.SETUP_BROKER_HOST( + prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] + ), + setup.SETUP_BROKER_PORT( + prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] + ), + setup.SETUP_BROKER_VIRTUAL_HOST( + prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] + ), ], ) def profile_setup(): diff --git a/src/aiida/cmdline/params/options/commands/setup.py b/src/aiida/cmdline/params/options/commands/setup.py index 3a0d6c3a67..bbd980c976 100644 --- a/src/aiida/cmdline/params/options/commands/setup.py +++ b/src/aiida/cmdline/params/options/commands/setup.py @@ -323,6 +323,15 @@ def get_quicksetup_password(ctx, param, value): cls=options.interactive.InteractiveOption, ) +SETUP_USE_RABBITMQ = options.OverridableOption( + '--use-rabbitmq/--no-use-rabbitmq', + prompt='Use RabbitMQ?', + is_flag=True, + default=True, + cls=options.interactive.InteractiveOption, + help='Whether to configure the RabbitMQ broker. Required to enable the daemon and submitting processes.', +) + SETUP_BROKER_PROTOCOL = QUICKSETUP_BROKER_PROTOCOL.clone( prompt='Broker protocol', required=True, diff --git a/src/aiida/manage/configuration/profile.py b/src/aiida/manage/configuration/profile.py index 6365e7a1b5..acaca2e892 100644 --- a/src/aiida/manage/configuration/profile.py +++ b/src/aiida/manage/configuration/profile.py @@ -128,7 +128,7 @@ def process_control_backend(self) -> str | None: @property def process_control_config(self) -> Dict[str, Any]: """Return the configuration required by the process control backend.""" - return self._attributes[self.KEY_PROCESS][self.KEY_PROCESS_CONFIG] + return self._attributes[self.KEY_PROCESS][self.KEY_PROCESS_CONFIG] or {} def set_process_controller(self, name: str, config: Dict[str, Any]) -> None: """Set the process control backend and its configuration. diff --git a/src/aiida/storage/sqlite_dos/backend.py b/src/aiida/storage/sqlite_dos/backend.py index 21195d2475..3b13764b3d 100644 --- a/src/aiida/storage/sqlite_dos/backend.py +++ b/src/aiida/storage/sqlite_dos/backend.py @@ -93,9 +93,10 @@ def initialise_database(self) -> None: class SqliteDosStorage(PsqlDosBackend): - """A lightweight backend intended for demos and testing. + """A lightweight storage that is easy to install. - This backend implementation uses an Sqlite database and + This backend implementation uses an SQLite database and a disk-objectstore container as the file repository. As + such, this storage plugin does not require any services, making it easy to install and use on most systems. """ migrator = SqliteDosMigrator diff --git a/tests/cmdline/commands/test_profile.py b/tests/cmdline/commands/test_profile.py index a78876ad0c..909562245a 100644 --- a/tests/cmdline/commands/test_profile.py +++ b/tests/cmdline/commands/test_profile.py @@ -256,3 +256,16 @@ def test_setup_email_required(run_cli_command, isolated_config, tmp_path, entry_ else: result = run_cli_command(cmd_profile.profile_setup, options, use_subprocess=False, raises=True) assert 'Invalid value for --email: The option is required for storages that are not read-only.' in result.output + + +def test_setup_no_use_rabbitmq(run_cli_command, isolated_config): + """Test the ``--no-use-rabbitmq`` option.""" + profile_name = 'profile-no-broker' + options = ['core.sqlite_dos', '-n', '--email', 'a@a', '--profile', profile_name, '--no-use-rabbitmq'] + + result = run_cli_command(cmd_profile.profile_setup, options, use_subprocess=False) + assert f'Created new profile `{profile_name}`.' in result.output + assert profile_name in isolated_config.profile_names + profile = isolated_config.get_profile(profile_name) + assert profile.process_control_backend is None + assert profile.process_control_config == {} From 202a3ece9705289a1f12c85e64cf90307ca85c39 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 7 Jun 2024 14:49:18 +0200 Subject: [PATCH 12/82] CLI: Add the `verdi profile configure-rabbitmq` command (#6454) Now that profiles can be created without defining a broker, a command is needed that can add a RabbitMQ connection configuration. The new command `verdi profile configure-rabbitmq` enables a broker for a profile if it wasn't already, and allows configuring the connection parameters. --- docs/source/reference/command_line.rst | 14 +++++------ src/aiida/cmdline/commands/cmd_profile.py | 22 +++++++++++++++++- .../cmdline/params/options/commands/setup.py | 2 ++ tests/cmdline/commands/test_profile.py | 23 +++++++++++++++++++ 4 files changed, 53 insertions(+), 8 deletions(-) diff --git a/docs/source/reference/command_line.rst b/docs/source/reference/command_line.rst index aa00b218e4..0953d027f7 100644 --- a/docs/source/reference/command_line.rst +++ b/docs/source/reference/command_line.rst @@ -391,13 +391,13 @@ Below is a list with all available subcommands. --help Show this message and exit. Commands: - delete Delete one or more profiles. - list Display a list of all available profiles. - set-default Set a profile as the default profile. - setdefault (Deprecated) Set a profile as the default profile (use `verdi profile set- - default`). - setup Set up a new profile. - show Show details for a profile. + configure-rabbitmq Configure RabbitMQ for a profile. + delete Delete one or more profiles. + list Display a list of all available profiles. + set-default Set a profile as the default profile. + setdefault (Deprecated) Set a profile as the default profile. + setup Set up a new profile. + show Show details for a profile. .. _reference:command-line:verdi-quicksetup: diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py index 2de2ce173d..0b8065a9a8 100644 --- a/src/aiida/cmdline/commands/cmd_profile.py +++ b/src/aiida/cmdline/commands/cmd_profile.py @@ -128,6 +128,26 @@ def profile_setup(): """Set up a new profile.""" +@verdi_profile.command('configure-rabbitmq') # type: ignore[arg-type] +@arguments.PROFILE(default=defaults.get_default_profile) +@setup.SETUP_BROKER_PROTOCOL() +@setup.SETUP_BROKER_USERNAME() +@setup.SETUP_BROKER_PASSWORD() +@setup.SETUP_BROKER_HOST() +@setup.SETUP_BROKER_PORT() +@setup.SETUP_BROKER_VIRTUAL_HOST() +@options.NON_INTERACTIVE() +@click.pass_context +def profile_configure_rabbitmq(ctx, profile, **kwargs): + """Configure RabbitMQ for a profile. + + Enable RabbitMQ for a profile that was created without a broker, or reconfigure existing connection details. + """ + profile.set_process_controller(name='core.rabbitmq', config=kwargs) + ctx.obj.config.update_profile(profile) + ctx.obj.config.store() + + @verdi_profile.command('list') def profile_list(): """Display a list of all available profiles.""" @@ -179,7 +199,7 @@ def profile_show(profile): @verdi_profile.command('setdefault', deprecated='Please use `verdi profile set-default` instead.') @arguments.PROFILE(required=True, default=None) def profile_setdefault(profile): - """Set a profile as the default profile (use `verdi profile set-default`).""" + """Set a profile as the default profile.""" _profile_set_default(profile) diff --git a/src/aiida/cmdline/params/options/commands/setup.py b/src/aiida/cmdline/params/options/commands/setup.py index bbd980c976..008f51b3a0 100644 --- a/src/aiida/cmdline/params/options/commands/setup.py +++ b/src/aiida/cmdline/params/options/commands/setup.py @@ -50,6 +50,8 @@ def get_profile_attribute_default(attribute_tuple, ctx): try: data = ctx.params['profile'].dictionary for part in parts: + if data is None: + return default data = data[part] return data except KeyError: diff --git a/tests/cmdline/commands/test_profile.py b/tests/cmdline/commands/test_profile.py index 909562245a..51594b6ca7 100644 --- a/tests/cmdline/commands/test_profile.py +++ b/tests/cmdline/commands/test_profile.py @@ -269,3 +269,26 @@ def test_setup_no_use_rabbitmq(run_cli_command, isolated_config): profile = isolated_config.get_profile(profile_name) assert profile.process_control_backend is None assert profile.process_control_config == {} + + +def test_configure_rabbitmq(run_cli_command, isolated_config): + """Test the ``verdi profile configure-rabbitmq`` command.""" + profile_name = 'profile' + + # First setup a profile without a broker configured + options = ['core.sqlite_dos', '-n', '--email', 'a@a', '--profile', profile_name, '--no-use-rabbitmq'] + run_cli_command(cmd_profile.profile_setup, options, use_subprocess=False) + profile = isolated_config.get_profile(profile_name) + assert profile.process_control_backend is None + assert profile.process_control_config == {} + + # Now run the command to configure the broker + options = [profile_name, '-n'] + run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) + assert profile.process_control_backend == 'core.rabbitmq' + + # Call it again to check it works to reconfigure existing broker connection parameters + options = [profile_name, '-n', '--broker-host', 'rabbitmq.broker.com'] + run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) + assert profile.process_control_backend == 'core.rabbitmq' + assert profile.process_control_config['broker_host'] == 'rabbitmq.broker.com' From cd0f9acb4b932557a91387b7d804cb94ac4dcbb3 Mon Sep 17 00:00:00 2001 From: Marnik Bercx Date: Tue, 18 Jun 2024 13:27:07 +0200 Subject: [PATCH 13/82] =?UTF-8?q?=F0=9F=90=9B=20`RabbitmqBroker`:=20catch?= =?UTF-8?q?=20`ConnectionError`=20for=20`=5F=5Fstr=5F=5F`=20(#6473)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current implementation of the `RabbitmqBroker.__str__()` method always prints both the version and the URL of the RabbitMQ server. However, the `get_rabbitmq_version()` method fails with a `ConnectionError` in case the RabbitMQ broker is not able to connect to the server. This issue would bubble up into the `verdi status` command, since this prints the string representation of the `RabbitmqBroker` in the message that reports the connection failure. At this point the `ConnectionError` is no longer caught, and hence the user is exposed to the full traceback. Here we adapt the `RabbitmqBroker.__str__()` method to catch the `ConnectionError` and return the URL with the message that the connection failed. --- src/aiida/brokers/rabbitmq/broker.py | 5 ++++- tests/brokers/test_rabbitmq.py | 13 +++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/aiida/brokers/rabbitmq/broker.py b/src/aiida/brokers/rabbitmq/broker.py index dab19e28b7..5321f6d400 100644 --- a/src/aiida/brokers/rabbitmq/broker.py +++ b/src/aiida/brokers/rabbitmq/broker.py @@ -34,7 +34,10 @@ def __init__(self, profile: Profile) -> None: self._prefix = f'aiida-{self._profile.uuid}' def __str__(self): - return f'RabbitMQ v{self.get_rabbitmq_version()} @ {self.get_url()}' + try: + return f'RabbitMQ v{self.get_rabbitmq_version()} @ {self.get_url()}' + except ConnectionError: + return f'RabbitMQ @ {self.get_url()} ' def close(self): """Close the broker.""" diff --git a/tests/brokers/test_rabbitmq.py b/tests/brokers/test_rabbitmq.py index fc27a3eaf6..00ee662338 100644 --- a/tests/brokers/test_rabbitmq.py +++ b/tests/brokers/test_rabbitmq.py @@ -22,6 +22,19 @@ pytestmark = pytest.mark.requires_rmq +def test_str_method(monkeypatch, manager): + """Test the `__str__` method of the `RabbitmqBroker`.""" + + def raise_connection_error(): + raise ConnectionError + + broker = manager.get_broker() + assert 'RabbitMQ v' in str(broker) + + monkeypatch.setattr(broker, 'get_communicator', raise_connection_error) + assert 'RabbitMQ @' in str(broker) + + @pytest.mark.parametrize( ('version', 'supported'), ( From 022f049bfcf86609daf0c2d9ddc0b1c108b9ea7c Mon Sep 17 00:00:00 2001 From: Marnik Bercx Date: Wed, 19 Jun 2024 09:39:22 +0200 Subject: [PATCH 14/82] CLI: Fix bug with profile name determination in `verdi presto` (#6477) When the user is using `verdi presto` to create more than 11 profiles, the command will fail because `presto-10` already exists. This is due to the fact that the `get_default_presto_profile_name()` function sorts the existing indices as strings, which means `10` will precede `9` and hence the "last index" would be `9`, making the new index `10`, which already exists. Here we fix this issue by casting the extracted existing indices as integers, so the sorting works as intended. --- src/aiida/cmdline/commands/cmd_presto.py | 2 +- tests/cmdline/commands/test_presto.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/aiida/cmdline/commands/cmd_presto.py b/src/aiida/cmdline/commands/cmd_presto.py index b61a8b6cfd..1893a6d461 100644 --- a/src/aiida/cmdline/commands/cmd_presto.py +++ b/src/aiida/cmdline/commands/cmd_presto.py @@ -32,7 +32,7 @@ def get_default_presto_profile_name(): for profile_name in profile_names: if match := re.search(r'presto[-]?(\d+)?', profile_name): - indices.append(match.group(1) or '0') + indices.append(int(match.group(1) or '0')) if not indices: return DEFAULT_PROFILE_NAME_PREFIX diff --git a/tests/cmdline/commands/test_presto.py b/tests/cmdline/commands/test_presto.py index 13760a53b7..8651664f61 100644 --- a/tests/cmdline/commands/test_presto.py +++ b/tests/cmdline/commands/test_presto.py @@ -80,3 +80,11 @@ def test_presto_use_postgres_fail(run_cli_command): options = ['--non-interactive', '--use-postgres', '--postgres-port', str(5000)] result = run_cli_command(verdi_presto, options, raises=True) assert 'Failed to connect to the PostgreSQL server' in result.output + + +@pytest.mark.usefixtures('empty_config') +def test_presto_overdose(run_cli_command, config_with_profile_factory): + """Test that ``verdi presto`` still works for users that have over 10 presto profiles.""" + config_with_profile_factory(name='presto-10') + result = run_cli_command(verdi_presto) + assert 'Created new profile `presto-11`.' in result.output From 57598b16468030bc124846d2d995a3134659375d Mon Sep 17 00:00:00 2001 From: Marnik Bercx Date: Wed, 19 Jun 2024 13:07:30 +0200 Subject: [PATCH 15/82] =?UTF-8?q?=E2=9C=A8=20CLI:=20Make=20`NON=5FINTERACT?= =?UTF-8?q?IVE`=20option=20a=20switch=20instead=20of=20flag?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For many setup/configuration CLI commands, the `NON_INTERACTIVE` option is added to allow the user to run the command without being prompted for input and use the defaults instead. However, new users are often not aware of this option, and will not understand some options as they are prompted. Even when a sensible default is offered by the prompt, users will still want to understand the option and be unsure if the default works for them. Hence, it might be preferable to run it non-interactively by default for some commands. Here we adapt the `NON_INTERACTIVE` option into a switch (`-n/-I` or `--non-interactive`/`--interactive`) that is `--interactive` by default. --- docs/source/reference/command_line.rst | 51 +++++++++++-------- .../cmdline/params/options/interactive.py | 4 +- src/aiida/cmdline/params/options/main.py | 12 +++-- 3 files changed, 40 insertions(+), 27 deletions(-) diff --git a/docs/source/reference/command_line.rst b/docs/source/reference/command_line.rst index 0953d027f7..b3edb33a0e 100644 --- a/docs/source/reference/command_line.rst +++ b/docs/source/reference/command_line.rst @@ -329,23 +329,26 @@ Below is a list with all available subcommands. the newly created profile uses the new PostgreSQL database instead of SQLite. Options: - --profile-name TEXT Name of the profile. By default, a unique name starting with - `presto` is automatically generated. [default: (dynamic)] - --email TEXT Email of the default user. [default: (dynamic)] - --use-postgres When toggled on, the profile uses a PostgreSQL database - instead of an SQLite one. The connection details to the - PostgreSQL server can be configured with the relevant options. - The command attempts to automatically create a user and - database to use for the profile, but this can fail depending - on the configuration of the server. - --postgres-hostname TEXT The hostname of the PostgreSQL server. - --postgres-port INTEGER The port of the PostgreSQL server. - --postgres-username TEXT The username of the PostgreSQL user that is authorized to - create new databases. - --postgres-password TEXT The password of the PostgreSQL user that is authorized to - create new databases. - -n, --non-interactive Never prompt, such as for sudo password. - --help Show this message and exit. + --profile-name TEXT Name of the profile. By default, a unique name starting + with `presto` is automatically generated. [default: + (dynamic)] + --email TEXT Email of the default user. [default: (dynamic)] + --use-postgres When toggled on, the profile uses a PostgreSQL database + instead of an SQLite one. The connection details to the + PostgreSQL server can be configured with the relevant + options. The command attempts to automatically create a + user and database to use for the profile, but this can + fail depending on the configuration of the server. + --postgres-hostname TEXT The hostname of the PostgreSQL server. + --postgres-port INTEGER The port of the PostgreSQL server. + --postgres-username TEXT The username of the PostgreSQL user that is authorized + to create new databases. + --postgres-password TEXT The password of the PostgreSQL user that is authorized + to create new databases. + -n, --non-interactive / -I, --interactive + Never prompt, such as for sudo password. [default: + (--interactive)] + --help Show this message and exit. .. _reference:command-line:verdi-process: @@ -412,8 +415,11 @@ Below is a list with all available subcommands. (Deprecated) Setup a new profile in a fully automated fashion. Options: - -n, --non-interactive In non-interactive mode, the CLI never prompts but - simply uses default values for options that define one. + -n, --non-interactive / -I, --interactive + In non-interactive mode, the CLI never prompts for + options but simply uses default values for options that + define one. In interactive mode, the CLI will prompt for + each interactive option. [default: (--interactive)] --profile PROFILE The name of the new profile. [required] --email EMAIL Email address associated with the data you generate. The email address is exported along with the data, when @@ -516,8 +522,11 @@ Below is a list with all available subcommands. user has been created. Options: - -n, --non-interactive In non-interactive mode, the CLI never prompts but - simply uses default values for options that define one. + -n, --non-interactive / -I, --interactive + In non-interactive mode, the CLI never prompts for + options but simply uses default values for options that + define one. In interactive mode, the CLI will prompt for + each interactive option. [default: (--interactive)] --profile PROFILE The name of the new profile. [required] --email EMAIL Email address associated with the data you generate. The email address is exported along with the data, when diff --git a/src/aiida/cmdline/params/options/interactive.py b/src/aiida/cmdline/params/options/interactive.py index c044d04907..d6c216eca9 100644 --- a/src/aiida/cmdline/params/options/interactive.py +++ b/src/aiida/cmdline/params/options/interactive.py @@ -167,9 +167,9 @@ def get_default(self, ctx: click.Context, call: bool = True) -> t.Optional[t.Uni def is_interactive(ctx: click.Context) -> bool: """Return whether the command is being run non-interactively. - This is the case if the ``non_interactive`` parameter in the context is set to ``True``. + This is the case if the ``non_interactive`` parameter in the context is set to ``False``. - :return: ``True`` if being run non-interactively, ``False`` otherwise. + :return: ``True`` if being run interactively, ``False`` otherwise. """ return not ctx.params.get('non_interactive', False) diff --git a/src/aiida/cmdline/params/options/main.py b/src/aiida/cmdline/params/options/main.py index 85b3090ad5..aa86a1f0dd 100644 --- a/src/aiida/cmdline/params/options/main.py +++ b/src/aiida/cmdline/params/options/main.py @@ -344,11 +344,15 @@ def set_log_level(_ctx, _param, value): ) NON_INTERACTIVE = OverridableOption( - '-n', - '--non-interactive', - is_flag=True, + '-n/-I', + '--non-interactive/--interactive', is_eager=True, - help='In non-interactive mode, the CLI never prompts but simply uses default values for options that define one.', + help=( + 'In non-interactive mode, the CLI never prompts for options but simply uses default values for options that ' + 'define one. In interactive mode, the CLI will prompt for each interactive option. ' + ), + default=False, + show_default='--interactive', ) DRY_RUN = OverridableOption('-n', '--dry-run', is_flag=True, help='Perform a dry run.') From aaada5454ead927a3cd2c3d48788e5730ea53aaf Mon Sep 17 00:00:00 2001 From: Marnik Bercx Date: Wed, 19 Jun 2024 13:09:12 +0200 Subject: [PATCH 16/82] =?UTF-8?q?=F0=9F=91=8C=20CLI:=20Make=20`configure-r?= =?UTF-8?q?abbitmq`=20non-interactive=20by=20default?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The `configure-rabbitmq` command was introduced mainly to allow new users that set up their profile with `verdi presto` before they set up RabbitMQ to upgrade their profile to use the message broker. However, since the command is interactive, they would be prompted for each of the options when running the command without `-n`/`--non-interactive`. Users that don't understand these inputs will typically want to set the defaults, so switching the modus operandi of the commmand to be non-interactive will make life easier for these users. Users that _do_ want to set different values than the defaults will understand the options of the command and should be able to be able to provide them directly or via the interactive mode. --- src/aiida/cmdline/commands/cmd_profile.py | 2 +- tests/cmdline/commands/test_profile.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py index 0b8065a9a8..55d59f706e 100644 --- a/src/aiida/cmdline/commands/cmd_profile.py +++ b/src/aiida/cmdline/commands/cmd_profile.py @@ -136,7 +136,7 @@ def profile_setup(): @setup.SETUP_BROKER_HOST() @setup.SETUP_BROKER_PORT() @setup.SETUP_BROKER_VIRTUAL_HOST() -@options.NON_INTERACTIVE() +@options.NON_INTERACTIVE(default=True, show_default='--non-interactive') @click.pass_context def profile_configure_rabbitmq(ctx, profile, **kwargs): """Configure RabbitMQ for a profile. diff --git a/tests/cmdline/commands/test_profile.py b/tests/cmdline/commands/test_profile.py index 51594b6ca7..9718bd06f5 100644 --- a/tests/cmdline/commands/test_profile.py +++ b/tests/cmdline/commands/test_profile.py @@ -287,6 +287,13 @@ def test_configure_rabbitmq(run_cli_command, isolated_config): run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) assert profile.process_control_backend == 'core.rabbitmq' + # Verify that running in non-interactive mode is the default + options = [ + profile_name, + ] + run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=True) + assert profile.process_control_backend == 'core.rabbitmq' + # Call it again to check it works to reconfigure existing broker connection parameters options = [profile_name, '-n', '--broker-host', 'rabbitmq.broker.com'] run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) From c2ca6423c1803f75ef198f896cb8ba85339be3ff Mon Sep 17 00:00:00 2001 From: Marnik Bercx Date: Wed, 19 Jun 2024 13:26:55 +0200 Subject: [PATCH 17/82] =?UTF-8?q?=F0=9F=91=8C=20CLI:=20Give=20feedback=20f?= =?UTF-8?q?or=20`configure-rabbitmq`?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently the `verdi profile configure-rabbitmq` command doesn't give any feedback to the user whether the provided options can successfully connect to the RabbitMQ server. Here we adapt the `detect_rabbitmq_config` function to accept the broker configuration as `**kwargs`, and use it to check if the provided options in the `configure-rabbitmq` can successfully connect to the RabbitMQ server. A "success" message is printed if we can connect to the server, else a warning is printed and the user is asked for confirmation before proceeding to configure the broker. A `--force` flag is also added to avoid asking for confirmation in case the command is unable to connect to the broker. --- src/aiida/brokers/rabbitmq/defaults.py | 24 +++++++++++++++-------- src/aiida/cmdline/commands/cmd_profile.py | 18 ++++++++++++++++- tests/cmdline/commands/test_profile.py | 18 ++++++++++++----- 3 files changed, 46 insertions(+), 14 deletions(-) diff --git a/src/aiida/brokers/rabbitmq/defaults.py b/src/aiida/brokers/rabbitmq/defaults.py index aeeaab578d..b312897f73 100644 --- a/src/aiida/brokers/rabbitmq/defaults.py +++ b/src/aiida/brokers/rabbitmq/defaults.py @@ -29,7 +29,15 @@ ) -def detect_rabbitmq_config() -> dict[str, t.Any] | None: +def detect_rabbitmq_config( + protocol: str | None = None, + username: str | None = None, + password: str | None = None, + host: str | None = None, + port: int | None = None, + virtual_host: str | None = None, + heartbeat: int | None = None, +) -> dict[str, t.Any] | None: """Try to connect to a RabbitMQ server with the default connection parameters. :returns: The connection parameters if the RabbitMQ server was successfully connected to, or ``None`` otherwise. @@ -37,13 +45,13 @@ def detect_rabbitmq_config() -> dict[str, t.Any] | None: from kiwipy.rmq.threadcomms import connect connection_params = { - 'protocol': os.getenv('AIIDA_BROKER_PROTOCOL', BROKER_DEFAULTS['protocol']), - 'username': os.getenv('AIIDA_BROKER_USERNAME', BROKER_DEFAULTS['username']), - 'password': os.getenv('AIIDA_BROKER_PASSWORD', BROKER_DEFAULTS['password']), - 'host': os.getenv('AIIDA_BROKER_HOST', BROKER_DEFAULTS['host']), - 'port': os.getenv('AIIDA_BROKER_PORT', BROKER_DEFAULTS['port']), - 'virtual_host': os.getenv('AIIDA_BROKER_VIRTUAL_HOST', BROKER_DEFAULTS['virtual_host']), - 'heartbeat': os.getenv('AIIDA_BROKER_HEARTBEAT', BROKER_DEFAULTS['heartbeat']), + 'protocol': protocol or os.getenv('AIIDA_BROKER_PROTOCOL', BROKER_DEFAULTS['protocol']), + 'username': username or os.getenv('AIIDA_BROKER_USERNAME', BROKER_DEFAULTS['username']), + 'password': password or os.getenv('AIIDA_BROKER_PASSWORD', BROKER_DEFAULTS['password']), + 'host': host or os.getenv('AIIDA_BROKER_HOST', BROKER_DEFAULTS['host']), + 'port': port or int(os.getenv('AIIDA_BROKER_PORT', BROKER_DEFAULTS['port'])), + 'virtual_host': virtual_host or os.getenv('AIIDA_BROKER_VIRTUAL_HOST', BROKER_DEFAULTS['virtual_host']), + 'heartbeat': heartbeat or int(os.getenv('AIIDA_BROKER_HEARTBEAT', BROKER_DEFAULTS['heartbeat'])), } LOGGER.info(f'Attempting to connect to RabbitMQ with parameters: {connection_params}') diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py index 55d59f706e..047126a38c 100644 --- a/src/aiida/cmdline/commands/cmd_profile.py +++ b/src/aiida/cmdline/commands/cmd_profile.py @@ -130,6 +130,7 @@ def profile_setup(): @verdi_profile.command('configure-rabbitmq') # type: ignore[arg-type] @arguments.PROFILE(default=defaults.get_default_profile) +@options.FORCE() @setup.SETUP_BROKER_PROTOCOL() @setup.SETUP_BROKER_USERNAME() @setup.SETUP_BROKER_PASSWORD() @@ -138,15 +139,30 @@ def profile_setup(): @setup.SETUP_BROKER_VIRTUAL_HOST() @options.NON_INTERACTIVE(default=True, show_default='--non-interactive') @click.pass_context -def profile_configure_rabbitmq(ctx, profile, **kwargs): +def profile_configure_rabbitmq(ctx, profile, non_interactive, force, **kwargs): """Configure RabbitMQ for a profile. Enable RabbitMQ for a profile that was created without a broker, or reconfigure existing connection details. """ + from aiida.brokers.rabbitmq.defaults import detect_rabbitmq_config + + connection_params = {key.lstrip('broker_'): value for key, value in kwargs.items() if key.startswith('broker_')} + + broker_config = detect_rabbitmq_config(**connection_params) + + if broker_config is None: + echo.echo_warning(f'Unable to connect to RabbitMQ server with configuration: {connection_params}') + if not force: + click.confirm('Do you want to continue with the provided configuration?', abort=True) + else: + echo.echo_success('Connected to RabbitMQ with the provided connection parameters') + profile.set_process_controller(name='core.rabbitmq', config=kwargs) ctx.obj.config.update_profile(profile) ctx.obj.config.store() + echo.echo_success(f'RabbitMQ configuration for `{profile.name}` updated to: {connection_params}') + @verdi_profile.command('list') def profile_list(): diff --git a/tests/cmdline/commands/test_profile.py b/tests/cmdline/commands/test_profile.py index 9718bd06f5..781a8b3cfa 100644 --- a/tests/cmdline/commands/test_profile.py +++ b/tests/cmdline/commands/test_profile.py @@ -284,8 +284,9 @@ def test_configure_rabbitmq(run_cli_command, isolated_config): # Now run the command to configure the broker options = [profile_name, '-n'] - run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) + cli_result = run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) assert profile.process_control_backend == 'core.rabbitmq' + assert 'Connected to RabbitMQ with the provided connection parameters' in cli_result.stdout # Verify that running in non-interactive mode is the default options = [ @@ -293,9 +294,16 @@ def test_configure_rabbitmq(run_cli_command, isolated_config): ] run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=True) assert profile.process_control_backend == 'core.rabbitmq' + assert 'Connected to RabbitMQ with the provided connection parameters' in cli_result.stdout + + # Verify that configuring with incorrect options and `--force` raises a warning but still configures the broker + options = [profile_name, '-f', '--broker-port', '1234'] + cli_result = run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) + assert 'Unable to connect to RabbitMQ server with configuration:' in cli_result.stdout + assert profile.process_control_config['broker_port'] == 1234 # Call it again to check it works to reconfigure existing broker connection parameters - options = [profile_name, '-n', '--broker-host', 'rabbitmq.broker.com'] - run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) - assert profile.process_control_backend == 'core.rabbitmq' - assert profile.process_control_config['broker_host'] == 'rabbitmq.broker.com' + options = [profile_name, '-n', '--broker-port', '5672'] + cli_result = run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) + assert 'Connected to RabbitMQ with the provided connection parameters' in cli_result.stdout + assert profile.process_control_config['broker_port'] == 5672 From 6db2f4060d4ece4552f5fe757c0f7d938810f4d1 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Mon, 24 Jun 2024 10:24:13 +0200 Subject: [PATCH 18/82] Dependencies: Update `tabulate>=0.8.0,<0.10.0` (#6472) --- environment.yml | 2 +- pyproject.toml | 2 +- requirements/requirements-py-3.10.txt | 2 +- requirements/requirements-py-3.11.txt | 2 +- requirements/requirements-py-3.12.txt | 2 +- requirements/requirements-py-3.9.txt | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/environment.yml b/environment.yml index ba2bff4c93..98dd997ba1 100644 --- a/environment.yml +++ b/environment.yml @@ -32,7 +32,7 @@ dependencies: - pyyaml~=6.0 - requests~=2.0 - sqlalchemy~=2.0 -- tabulate~=0.8.5 +- tabulate<0.10.0,>=0.8.0 - tqdm~=4.45 - upf_to_json~=0.9.2 - wrapt~=1.11 diff --git a/pyproject.toml b/pyproject.toml index 85b5795a99..c70c7a96de 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,7 +44,7 @@ dependencies = [ 'pyyaml~=6.0', 'requests~=2.0', 'sqlalchemy~=2.0', - 'tabulate~=0.8.5', + 'tabulate>=0.8.0,<0.10.0', 'tqdm~=4.45', 'upf_to_json~=0.9.2', 'wrapt~=1.11' diff --git a/requirements/requirements-py-3.10.txt b/requirements/requirements-py-3.10.txt index f8c0903e2d..3955a57530 100644 --- a/requirements/requirements-py-3.10.txt +++ b/requirements/requirements-py-3.10.txt @@ -190,7 +190,7 @@ sphinxext-rediraffe==0.2.7 sqlalchemy==2.0.23 stack-data==0.6.2 sympy==1.12 -tabulate==0.8.10 +tabulate==0.9.0 tenacity==8.2.2 terminado==0.17.1 textual==0.29.0 diff --git a/requirements/requirements-py-3.11.txt b/requirements/requirements-py-3.11.txt index 1de3c788d7..feedaae17a 100644 --- a/requirements/requirements-py-3.11.txt +++ b/requirements/requirements-py-3.11.txt @@ -189,7 +189,7 @@ sphinxext-rediraffe==0.2.7 sqlalchemy==2.0.23 stack-data==0.6.2 sympy==1.12 -tabulate==0.8.10 +tabulate==0.9.0 tenacity==8.2.2 terminado==0.17.1 textual==0.29.0 diff --git a/requirements/requirements-py-3.12.txt b/requirements/requirements-py-3.12.txt index 86d44d4c36..3246ddc471 100644 --- a/requirements/requirements-py-3.12.txt +++ b/requirements/requirements-py-3.12.txt @@ -187,7 +187,7 @@ sqlalchemy==2.0.23 sqlalchemy-utils==0.37.9 stack-data==0.6.3 sympy==1.12 -tabulate==0.8.10 +tabulate==0.9.0 tenacity==8.2.3 terminado==0.17.1 tinycss2==1.2.1 diff --git a/requirements/requirements-py-3.9.txt b/requirements/requirements-py-3.9.txt index 69ffaf4f80..5b0d89b5bc 100644 --- a/requirements/requirements-py-3.9.txt +++ b/requirements/requirements-py-3.9.txt @@ -192,7 +192,7 @@ sphinxext-rediraffe==0.2.7 sqlalchemy==2.0.23 stack-data==0.6.2 sympy==1.12 -tabulate==0.8.10 +tabulate==0.9.0 tenacity==8.2.2 terminado==0.17.1 textual==0.29.0 From 63160995d6078051f0f5e524b6fab9aabb2747ed Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 28 Jun 2024 10:24:28 +0200 Subject: [PATCH 19/82] CLI: Remove the RabbitMQ options from `verdi profile setup` (#6480) For the vast majority of use cases, users will have a default setup for RabbitMQ and so the default configuration will be adequate and so they will not need the options in the command. On the flipside, showing the options by default can makes the command harder to use as users will take pause to think what value to pass. Since there is the `verdi profile configure-rabbitmq` command now that allows to configure or reconfigure the RabbitMQ connection parameters for an existing profile, it is fine to remove these options from the profile setup. Advanced users that need to customize the connection parameters can resort to that separate command. --- src/aiida/brokers/rabbitmq/defaults.py | 7 +- src/aiida/cmdline/commands/cmd_presto.py | 40 +++++++--- src/aiida/cmdline/commands/cmd_profile.py | 90 +++++++++++------------ tests/cmdline/commands/test_presto.py | 5 +- tests/cmdline/commands/test_profile.py | 2 +- 5 files changed, 78 insertions(+), 66 deletions(-) diff --git a/src/aiida/brokers/rabbitmq/defaults.py b/src/aiida/brokers/rabbitmq/defaults.py index b312897f73..21a15f1ad0 100644 --- a/src/aiida/brokers/rabbitmq/defaults.py +++ b/src/aiida/brokers/rabbitmq/defaults.py @@ -36,10 +36,10 @@ def detect_rabbitmq_config( host: str | None = None, port: int | None = None, virtual_host: str | None = None, - heartbeat: int | None = None, -) -> dict[str, t.Any] | None: +) -> dict[str, t.Any]: """Try to connect to a RabbitMQ server with the default connection parameters. + :raises ConnectionError: If the connection failed with the provided connection parameters :returns: The connection parameters if the RabbitMQ server was successfully connected to, or ``None`` otherwise. """ from kiwipy.rmq.threadcomms import connect @@ -51,7 +51,6 @@ def detect_rabbitmq_config( 'host': host or os.getenv('AIIDA_BROKER_HOST', BROKER_DEFAULTS['host']), 'port': port or int(os.getenv('AIIDA_BROKER_PORT', BROKER_DEFAULTS['port'])), 'virtual_host': virtual_host or os.getenv('AIIDA_BROKER_VIRTUAL_HOST', BROKER_DEFAULTS['virtual_host']), - 'heartbeat': heartbeat or int(os.getenv('AIIDA_BROKER_HEARTBEAT', BROKER_DEFAULTS['heartbeat'])), } LOGGER.info(f'Attempting to connect to RabbitMQ with parameters: {connection_params}') @@ -59,7 +58,7 @@ def detect_rabbitmq_config( try: connect(connection_params=connection_params) except ConnectionError: - return None + raise ConnectionError(f'Failed to connect with following connection parameters: {connection_params}') # The profile configuration expects the keys of the broker config to be prefixed with ``broker_``. return {f'broker_{key}': value for key, value in connection_params.items()} diff --git a/src/aiida/cmdline/commands/cmd_presto.py b/src/aiida/cmdline/commands/cmd_presto.py index 1893a6d461..d0835b8956 100644 --- a/src/aiida/cmdline/commands/cmd_presto.py +++ b/src/aiida/cmdline/commands/cmd_presto.py @@ -50,7 +50,13 @@ def detect_postgres_config( postgres_password: str, non_interactive: bool, ) -> dict[str, t.Any]: - """.""" + """Attempt to connect to the given PostgreSQL server and create a new user and database. + + :raises ConnectionError: If no connection could be established to the PostgreSQL server or a user and database + could not be created. + :returns: The connection configuration for the newly created user and database as can be used directly for the + storage configuration of the ``core.psql_dos`` storage plugin. + """ import secrets from aiida.manage.configuration.settings import AIIDA_CONFIG_FOLDER @@ -65,7 +71,7 @@ def detect_postgres_config( postgres = Postgres(interactive=not non_interactive, quiet=False, dbinfo=dbinfo) if not postgres.is_connected: - echo.echo_critical(f'Failed to connect to the PostgreSQL server using parameters: {dbinfo}') + raise ConnectionError(f'Failed to connect to the PostgreSQL server using parameters: {dbinfo}') database_name = f'aiida-{profile_name}' database_username = f'aiida-{profile_name}' @@ -76,7 +82,7 @@ def detect_postgres_config( dbname=database_name, dbuser=database_username, dbpass=database_password ) except Exception as exception: - echo.echo_critical(f'Unable to automatically create the PostgreSQL user and database: {exception}') + raise ConnectionError(f'Unable to automatically create the PostgreSQL user and database: {exception}') return { 'database_hostname': postgres_hostname, @@ -175,23 +181,33 @@ def verdi_presto( 'postgres_password': postgres_password, 'non_interactive': non_interactive, } - storage_config: dict[str, t.Any] = detect_postgres_config(**postgres_config_kwargs) if use_postgres else {} - storage_backend = 'core.psql_dos' if storage_config else 'core.sqlite_dos' + + storage_backend: str = 'core.sqlite_dos' + storage_config: dict[str, t.Any] = {} if use_postgres: - echo.echo_report( - '`--use-postgres` enabled and database creation successful: configuring the profile to use PostgreSQL.' - ) + try: + storage_config = detect_postgres_config(**postgres_config_kwargs) + except ConnectionError as exception: + echo.echo_critical(str(exception)) + else: + echo.echo_report( + '`--use-postgres` enabled and database creation successful: configuring the profile to use PostgreSQL.' + ) + storage_backend = 'core.psql_dos' else: echo.echo_report('Option `--use-postgres` not enabled: configuring the profile to use SQLite.') - broker_config = detect_rabbitmq_config() - broker_backend = 'core.rabbitmq' if broker_config is not None else None + broker_backend = None + broker_config = None - if broker_config is None: - echo.echo_report('RabbitMQ server not found: configuring the profile without a broker.') + try: + broker_config = detect_rabbitmq_config() + except ConnectionError as exception: + echo.echo_report(f'RabbitMQ server not found ({exception}): configuring the profile without a broker.') else: echo.echo_report('RabbitMQ server detected: configuring the profile with a broker.') + broker_backend = 'core.rabbitmq' try: profile = create_profile( diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py index 047126a38c..5e89b72d70 100644 --- a/src/aiida/cmdline/commands/cmd_profile.py +++ b/src/aiida/cmdline/commands/cmd_profile.py @@ -27,7 +27,17 @@ def verdi_profile(): def command_create_profile( - ctx: click.Context, storage_cls, non_interactive: bool, profile: Profile, set_as_default: bool = True, **kwargs + ctx: click.Context, + storage_cls, + non_interactive: bool, + profile: Profile, + set_as_default: bool = True, + email: str | None = None, + first_name: str | None = None, + last_name: str | None = None, + institution: str | None = None, + use_rabbitmq: bool = True, + **kwargs, ): """Create a new profile, initialise its storage and create a default user. @@ -37,43 +47,44 @@ def command_create_profile( :param profile: The profile instance. This is an empty ``Profile`` instance created by the command line argument which currently only contains the selected profile name for the profile that is to be created. :param set_as_default: Whether to set the created profile as the new default. + :param email: Email for the default user. + :param first_name: First name for the default user. + :param last_name: Last name for the default user. + :param institution: Institution for the default user. + :param use_rabbitmq: Whether to configure RabbitMQ as the broker. :param kwargs: Arguments to initialise instance of the selected storage implementation. """ + from aiida.brokers.rabbitmq.defaults import detect_rabbitmq_config from aiida.plugins.entry_point import get_entry_point_from_class - if not storage_cls.read_only and kwargs.get('email', None) is None: + if not storage_cls.read_only and email is None: raise click.BadParameter('The option is required for storages that are not read-only.', param_hint='--email') - email = kwargs.pop('email') - first_name = kwargs.pop('first_name') - last_name = kwargs.pop('last_name') - institution = kwargs.pop('institution') - _, storage_entry_point = get_entry_point_from_class(storage_cls.__module__, storage_cls.__name__) assert storage_entry_point is not None - if kwargs.pop('use_rabbitmq'): - broker_backend = 'core.rabbitmq' - broker_config = { - key: kwargs.get(key) - for key in ( - 'broker_protocol', - 'broker_username', - 'broker_password', - 'broker_host', - 'broker_port', - 'broker_virtual_host', - ) - } + broker_backend = None + broker_config = None + + if use_rabbitmq: + try: + broker_config = detect_rabbitmq_config() + except ConnectionError as exception: + echo.echo_warning(f'RabbitMQ server not reachable: {exception}.') + else: + echo.echo_success(f'RabbitMQ server detected with connection parameters: {broker_config}') + broker_backend = 'core.rabbitmq' + + echo.echo_report('RabbitMQ can be reconfigured with `verdi profile configure-rabbitmq`.') else: - broker_backend = None - broker_config = None + echo.echo_report('Creating profile without RabbitMQ.') + echo.echo_report('It can be configured at a later point in time with `verdi profile configure-rabbitmq`.') try: profile = create_profile( ctx.obj.config, name=profile.name, - email=email, + email=email, # type: ignore[arg-type] first_name=first_name, last_name=last_name, institution=institution, @@ -104,24 +115,6 @@ def command_create_profile( setup.SETUP_USER_LAST_NAME(), setup.SETUP_USER_INSTITUTION(), setup.SETUP_USE_RABBITMQ(), - setup.SETUP_BROKER_PROTOCOL( - prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] - ), - setup.SETUP_BROKER_USERNAME( - prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] - ), - setup.SETUP_BROKER_PASSWORD( - prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] - ), - setup.SETUP_BROKER_HOST( - prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] - ), - setup.SETUP_BROKER_PORT( - prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] - ), - setup.SETUP_BROKER_VIRTUAL_HOST( - prompt_fn=lambda ctx: ctx.params['use_rabbitmq'], required_fn=lambda ctx: ctx.params['use_rabbitmq'] - ), ], ) def profile_setup(): @@ -146,22 +139,23 @@ def profile_configure_rabbitmq(ctx, profile, non_interactive, force, **kwargs): """ from aiida.brokers.rabbitmq.defaults import detect_rabbitmq_config - connection_params = {key.lstrip('broker_'): value for key, value in kwargs.items() if key.startswith('broker_')} - - broker_config = detect_rabbitmq_config(**connection_params) + broker_config = {key: value for key, value in kwargs.items() if key.startswith('broker_')} + connection_params = {key.lstrip('broker_'): value for key, value in broker_config.items()} - if broker_config is None: - echo.echo_warning(f'Unable to connect to RabbitMQ server with configuration: {connection_params}') + try: + broker_config = detect_rabbitmq_config(**connection_params) + except ConnectionError as exception: + echo.echo_warning(f'Unable to connect to RabbitMQ server: {exception}') if not force: click.confirm('Do you want to continue with the provided configuration?', abort=True) else: echo.echo_success('Connected to RabbitMQ with the provided connection parameters') - profile.set_process_controller(name='core.rabbitmq', config=kwargs) + profile.set_process_controller(name='core.rabbitmq', config=broker_config) ctx.obj.config.update_profile(profile) ctx.obj.config.store() - echo.echo_success(f'RabbitMQ configuration for `{profile.name}` updated to: {connection_params}') + echo.echo_success(f'RabbitMQ configuration for `{profile.name}` updated to: {broker_config}') @verdi_profile.command('list') diff --git a/tests/cmdline/commands/test_presto.py b/tests/cmdline/commands/test_presto.py index 8651664f61..3ec1d1e5da 100644 --- a/tests/cmdline/commands/test_presto.py +++ b/tests/cmdline/commands/test_presto.py @@ -32,8 +32,11 @@ def test_presto_without_rmq(pytestconfig, run_cli_command, monkeypatch): """Test the ``verdi presto`` without RabbitMQ.""" from aiida.brokers.rabbitmq import defaults + def detect_rabbitmq_config(**kwargs): + raise ConnectionError() + # Patch the RabbitMQ detection function to pretend it could not find the service - monkeypatch.setattr(defaults, 'detect_rabbitmq_config', lambda: None) + monkeypatch.setattr(defaults, 'detect_rabbitmq_config', lambda: detect_rabbitmq_config()) result = run_cli_command(verdi_presto, ['--non-interactive']) assert 'Created new profile `presto`.' in result.output diff --git a/tests/cmdline/commands/test_profile.py b/tests/cmdline/commands/test_profile.py index 781a8b3cfa..7f89cd2f31 100644 --- a/tests/cmdline/commands/test_profile.py +++ b/tests/cmdline/commands/test_profile.py @@ -299,7 +299,7 @@ def test_configure_rabbitmq(run_cli_command, isolated_config): # Verify that configuring with incorrect options and `--force` raises a warning but still configures the broker options = [profile_name, '-f', '--broker-port', '1234'] cli_result = run_cli_command(cmd_profile.profile_configure_rabbitmq, options, use_subprocess=False) - assert 'Unable to connect to RabbitMQ server with configuration:' in cli_result.stdout + assert 'Unable to connect to RabbitMQ server: Failed to connect' in cli_result.stdout assert profile.process_control_config['broker_port'] == 1234 # Call it again to check it works to reconfigure existing broker connection parameters From 8ea203cd9b1d2fbb4a3b38ba67beec97bb8c7145 Mon Sep 17 00:00:00 2001 From: Julian Geiger Date: Fri, 28 Jun 2024 10:43:17 +0200 Subject: [PATCH 20/82] CLI: Change `--profile` to `-p/--profile-name` for `verdi profile setup` (#6481) This to be consistent with naming of the option for `verdi presto`. --- docs/source/howto/archive_profile.md | 2 +- docs/source/reference/command_line.rst | 2 +- docs/source/topics/storage.rst | 4 ++-- src/aiida/cmdline/commands/cmd_presto.py | 1 + src/aiida/cmdline/commands/cmd_profile.py | 2 +- src/aiida/cmdline/params/options/commands/setup.py | 11 +++++++++++ tests/cmdline/commands/test_profile.py | 12 ++++++------ 7 files changed, 23 insertions(+), 11 deletions(-) diff --git a/docs/source/howto/archive_profile.md b/docs/source/howto/archive_profile.md index 5a3e85cee0..d637f62d2b 100644 --- a/docs/source/howto/archive_profile.md +++ b/docs/source/howto/archive_profile.md @@ -25,7 +25,7 @@ See {ref}`how-to:share:archives` for information on how to create and migrate an The easiest way to inspect the contents of an archive is to create a profile that "mounts" the archive as its data storage: ```{code-cell} ipython3 -!verdi profile setup core.sqlite_zip -n --profile archive --filepath process.aiida +!verdi profile setup core.sqlite_zip -n --profile-name archive --filepath process.aiida ``` You can now inspect the contents of the `process.aiida` archive by using the `archive` profile in the same way you would a standard AiiDA profile. diff --git a/docs/source/reference/command_line.rst b/docs/source/reference/command_line.rst index b3edb33a0e..c3f3250c9c 100644 --- a/docs/source/reference/command_line.rst +++ b/docs/source/reference/command_line.rst @@ -329,7 +329,7 @@ Below is a list with all available subcommands. the newly created profile uses the new PostgreSQL database instead of SQLite. Options: - --profile-name TEXT Name of the profile. By default, a unique name starting + -p, --profile-name TEXT Name of the profile. By default, a unique name starting with `presto` is automatically generated. [default: (dynamic)] --email TEXT Email of the default user. [default: (dynamic)] diff --git a/docs/source/topics/storage.rst b/docs/source/topics/storage.rst index 7b358a91f9..59d6761360 100644 --- a/docs/source/topics/storage.rst +++ b/docs/source/topics/storage.rst @@ -141,7 +141,7 @@ A fully operational profile using this storage plugin can be created with a sing .. code-block:: console - verdi profile setup core.sqlite_dos -n --profile --email + verdi profile setup core.sqlite_dos -n --profile-name --email replacing ```` with the desired name for the profile and ```` with the email for the default user. @@ -167,7 +167,7 @@ However, since otherwise it functions like normal storage plugins, a profile can .. code-block:: console - verdi profile setup core.sqlite_zip -n --profile --filepath + verdi profile setup core.sqlite_zip -n --profile-name --filepath replacing ```` with the desired name for the profile and ```` the path to the archive file. The created profile can now be loaded like any other profile, and the contents of the provenance graph can be explored as usual. diff --git a/src/aiida/cmdline/commands/cmd_presto.py b/src/aiida/cmdline/commands/cmd_presto.py index d0835b8956..64a17fdac2 100644 --- a/src/aiida/cmdline/commands/cmd_presto.py +++ b/src/aiida/cmdline/commands/cmd_presto.py @@ -96,6 +96,7 @@ def detect_postgres_config( @verdi.command('presto') @click.option( + '-p', '--profile-name', default=lambda: get_default_presto_profile_name(), show_default=True, diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py index 5e89b72d70..057f2de5a9 100644 --- a/src/aiida/cmdline/commands/cmd_profile.py +++ b/src/aiida/cmdline/commands/cmd_profile.py @@ -108,7 +108,7 @@ def command_create_profile( command=command_create_profile, entry_point_group='aiida.storage', shared_options=[ - setup.SETUP_PROFILE(), + setup.SETUP_PROFILE_NAME(), setup.SETUP_PROFILE_SET_AS_DEFAULT(), setup.SETUP_USER_EMAIL(required=False), setup.SETUP_USER_FIRST_NAME(), diff --git a/src/aiida/cmdline/params/options/commands/setup.py b/src/aiida/cmdline/params/options/commands/setup.py index 008f51b3a0..40df742d4e 100644 --- a/src/aiida/cmdline/params/options/commands/setup.py +++ b/src/aiida/cmdline/params/options/commands/setup.py @@ -181,6 +181,17 @@ def get_quicksetup_password(ctx, param, value): cls=options.interactive.InteractiveOption, ) +SETUP_PROFILE_NAME = options.OverridableOption( + '-p', + '--profile-name', + 'profile', + prompt='Profile name', + help='The name of the new profile.', + required=True, + type=types.ProfileParamType(cannot_exist=True), + cls=options.interactive.InteractiveOption, +) + SETUP_PROFILE_SET_AS_DEFAULT = options.OverridableOption( '--set-as-default/--no-set-as-default', prompt='Set as default?', diff --git a/tests/cmdline/commands/test_profile.py b/tests/cmdline/commands/test_profile.py index 7f89cd2f31..07b45c2818 100644 --- a/tests/cmdline/commands/test_profile.py +++ b/tests/cmdline/commands/test_profile.py @@ -173,7 +173,7 @@ def test_delete_storage(run_cli_command, isolated_config, tmp_path, entry_point) else: filepath = tmp_path / 'storage' - options = [entry_point, '-n', '--filepath', str(filepath), '--profile', profile_name, '--email', 'email@host'] + options = [entry_point, '-n', '--filepath', str(filepath), '--profile-name', profile_name, '--email', 'email@host'] result = run_cli_command(cmd_profile.profile_setup, options, use_subprocess=False) assert filepath.exists() assert profile_name in isolated_config.profile_names @@ -204,7 +204,7 @@ def test_setup(config_psql_dos, run_cli_command, isolated_config, tmp_path, entr options = ['--filepath', str(tmp_path)] profile_name = 'temp-profile' - options = [entry_point, '-n', '--profile', profile_name, '--email', 'email@host', *options] + options = [entry_point, '-n', '--profile-name', profile_name, '--email', 'email@host', *options] result = run_cli_command(cmd_profile.profile_setup, options, use_subprocess=False) assert f'Created new profile `{profile_name}`.' in result.output assert profile_name in isolated_config.profile_names @@ -221,7 +221,7 @@ def test_setup_set_as_default(run_cli_command, isolated_config, tmp_path, set_as '-n', '--filepath', str(tmp_path), - '--profile', + '--profile-name', profile_name, '--email', 'email@host', @@ -247,7 +247,7 @@ def test_setup_email_required(run_cli_command, isolated_config, tmp_path, entry_ isolated_config.unset_option('autofill.user.email') - options = [entry_point, '-n', '--filepath', str(tmp_path), '--profile', profile_name] + options = [entry_point, '-n', '--filepath', str(tmp_path), '--profile-name', profile_name] if storage_cls.read_only: result = run_cli_command(cmd_profile.profile_setup, options, use_subprocess=False) @@ -261,7 +261,7 @@ def test_setup_email_required(run_cli_command, isolated_config, tmp_path, entry_ def test_setup_no_use_rabbitmq(run_cli_command, isolated_config): """Test the ``--no-use-rabbitmq`` option.""" profile_name = 'profile-no-broker' - options = ['core.sqlite_dos', '-n', '--email', 'a@a', '--profile', profile_name, '--no-use-rabbitmq'] + options = ['core.sqlite_dos', '-n', '--email', 'a@a', '--profile-name', profile_name, '--no-use-rabbitmq'] result = run_cli_command(cmd_profile.profile_setup, options, use_subprocess=False) assert f'Created new profile `{profile_name}`.' in result.output @@ -276,7 +276,7 @@ def test_configure_rabbitmq(run_cli_command, isolated_config): profile_name = 'profile' # First setup a profile without a broker configured - options = ['core.sqlite_dos', '-n', '--email', 'a@a', '--profile', profile_name, '--no-use-rabbitmq'] + options = ['core.sqlite_dos', '-n', '--email', 'a@a', '--profile-name', profile_name, '--no-use-rabbitmq'] run_cli_command(cmd_profile.profile_setup, options, use_subprocess=False) profile = isolated_config.get_profile(profile_name) assert profile.process_control_backend is None From 56995e1c4c7f0ebc87059b33564e84366c81c5ff Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 28 Jun 2024 11:04:45 +0200 Subject: [PATCH 21/82] Engine: Improve error message when submitting without broker (#6465) The `aiida.engine.launch.submit` method was just raising a vague `AssertionError` in case the runner did not have a communicator, which is the case if it was constructed without a communicator which in turn happens for profiles that do not configure a broker. Since profiles without brokers are now supported and users are bound to try to submit anyway, the error message should be clearer. --- src/aiida/engine/launch.py | 10 +++++++++- tests/engine/test_launch.py | 34 +++++++++++++++++++++++++++------- 2 files changed, 36 insertions(+), 8 deletions(-) diff --git a/src/aiida/engine/launch.py b/src/aiida/engine/launch.py index 013fd4d690..d37cf46905 100644 --- a/src/aiida/engine/launch.py +++ b/src/aiida/engine/launch.py @@ -111,8 +111,16 @@ def submit( raise InvalidOperation('Cannot use top-level `submit` from within another process, use `self.submit` instead') runner = manager.get_manager().get_runner() + + if runner.controller is None: + raise InvalidOperation( + 'Cannot submit because the runner does not have a process controller, probably because the profile does ' + 'not define a broker like RabbitMQ. If a RabbitMQ server is available, the profile can be configured to ' + 'use it with `verdi profile configure-rabbitmq`. Otherwise, use :meth:`aiida.engine.launch.run` instead to ' + 'run the process in the local Python interpreter instead of submitting it to the daemon.' + ) + assert runner.persister is not None, 'runner does not have a persister' - assert runner.controller is not None, 'runner does not have a controller' process_inited = instantiate_process(runner, process, **inputs) diff --git a/tests/engine/test_launch.py b/tests/engine/test_launch.py index fedd42cd98..92fba4bd6b 100644 --- a/tests/engine/test_launch.py +++ b/tests/engine/test_launch.py @@ -20,6 +20,16 @@ ArithmeticAddCalculation = CalculationFactory('core.arithmetic.add') +@pytest.fixture +def arithmetic_add_builder(aiida_code_installed): + builder = ArithmeticAddCalculation.get_builder() + builder.code = aiida_code_installed(default_calc_job_plugin='core.arithmetic.add', filepath_executable='/bin/bash') + builder.x = orm.Int(1) + builder.y = orm.Int(1) + builder.metadata = {'options': {'resources': {'num_machines': 1, 'num_mpiprocs_per_machine': 1}}} + return builder + + @calcfunction def add(term_a, term_b): return term_a + term_b @@ -69,18 +79,28 @@ def add(self): @pytest.mark.usefixtures('started_daemon_client') -def test_submit_wait(aiida_code_installed): +def test_submit_wait(arithmetic_add_builder): """Test the ``wait`` argument of :meth:`aiida.engine.launch.submit`.""" - builder = ArithmeticAddCalculation.get_builder() - builder.code = aiida_code_installed(default_calc_job_plugin='core.arithmetic.add', filepath_executable='/bin/bash') - builder.x = orm.Int(1) - builder.y = orm.Int(1) - builder.metadata = {'options': {'resources': {'num_machines': 1, 'num_mpiprocs_per_machine': 1}}} - node = launch.submit(builder, wait=True, wait_interval=0.1) + node = launch.submit(arithmetic_add_builder, wait=True, wait_interval=0.1) assert node.is_finished, node.process_state assert node.is_finished_ok, node.exit_code +def test_submit_no_broker(arithmetic_add_builder, monkeypatch, manager): + """Test that ``submit`` raises ``InvalidOperation`` if the runner does not have a controller. + + The runner does not have a controller if the runner was not provided a communicator which is the case for profiles + that do not define a broker. + """ + runner = manager.get_runner() + monkeypatch.setattr(runner, '_controller', None) + + with pytest.raises( + exceptions.InvalidOperation, match=r'Cannot submit because the runner does not have a process controller.*' + ): + launch.submit(arithmetic_add_builder) + + def test_await_processes_invalid(): """Test :func:`aiida.engine.launch.await_processes` for invalid inputs.""" with pytest.raises(TypeError): From 6a3a59b29ba64401828d9ab51dc123060868278b Mon Sep 17 00:00:00 2001 From: Alexander Goscinski Date: Fri, 28 Jun 2024 11:07:32 +0200 Subject: [PATCH 22/82] Doc: Fixing several small issues (#6392) The recursive workchain in code snippets can theoretically not run and is just confusing to have as an example for a user. It has been fixed by using different name for the inner workchain. In the classes `NodeCaching` and `ProcessNodeCaching` the `is valid_cache` is a property. To not render it with method brackets, the `:attr:` sphinx directive is used instead of `:meth:`. --- docs/source/internals/engine.rst | 4 ++-- docs/source/topics/provenance/caching.rst | 8 ++++---- .../snippets/workchains/run_workchain_submit_append.py | 5 ++++- .../snippets/workchains/run_workchain_submit_complete.py | 5 ++++- .../snippets/workchains/run_workchain_submit_parallel.py | 5 ++++- .../workchains/run_workchain_submit_parallel_nested.py | 5 ++++- 6 files changed, 22 insertions(+), 10 deletions(-) diff --git a/docs/source/internals/engine.rst b/docs/source/internals/engine.rst index 42652c3132..3e6f30360b 100644 --- a/docs/source/internals/engine.rst +++ b/docs/source/internals/engine.rst @@ -20,14 +20,14 @@ There are several methods which the internal classes of AiiDA use to control the On the level of the generic :class:`orm.Node ` class: -* The :meth:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` property determines whether a particular node can be used as a cache. +* The :attr:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` property determines whether a particular node can be used as a cache. This is used for example to disable caching from failed calculations. * Node classes have a ``_cachable`` attribute, which can be set to ``False`` to completely switch off caching for nodes of that class. This avoids performing queries for the hash altogether. On the level of the :class:`Process ` and :class:`orm.ProcessNode ` classes: -* The :meth:`ProcessNodeCaching.is_valid_cache ` calls :meth:`Process.is_valid_cache `, passing the node itself. +* The :attr:`ProcessNodeCaching.is_valid_cache ` calls :meth:`Process.is_valid_cache `, passing the node itself. This can be used in :class:`~aiida.engine.processes.process.Process` subclasses (e.g. in calculation plugins) to implement custom ways of invalidating the cache. * The :meth:`ProcessNodeCaching._hash_ignored_inputs ` attribute lists the inputs that should be ignored when creating the hash. This is checked by the :meth:`ProcessNodeCaching.get_objects_to_hash ` method. diff --git a/docs/source/topics/provenance/caching.rst b/docs/source/topics/provenance/caching.rst index 6a923d8931..b15a3cbeda 100644 --- a/docs/source/topics/provenance/caching.rst +++ b/docs/source/topics/provenance/caching.rst @@ -146,9 +146,9 @@ This method calls the iterator :meth:`~aiida.orm.nodes.caching.NodeCaching._iter To find the list of `source` nodes that are equivalent to the `target` that is being stored, :meth:`~aiida.orm.nodes.caching.NodeCaching._iter_all_same_nodes` performs the following steps: 1. It queries the database for all nodes that have the same hash as the `target` node. -2. From the result, only those nodes are returned where the property :meth:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` returns ``True``. +2. From the result, only those nodes are returned where the property :attr:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` returns ``True``. -The property :meth:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` therefore allows to control whether a stored node can be used as a `source` in the caching mechanism. +The property :attr:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` therefore allows to control whether a stored node can be used as a `source` in the caching mechanism. By default, for all nodes, the property returns ``True``. However, this can be changed on a per-node basis, by setting it to ``False`` @@ -166,8 +166,8 @@ Setting the property to ``False``, will cause an extra to be stored on the node Through this method, it is possible to guarantee that individual nodes are never used as a `source` for caching. -The :class:`~aiida.engine.processes.process.Process` class overrides the :meth:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` property to give more fine-grained control on process nodes as caching sources. -If either :meth:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` of the base class or :meth:`~aiida.orm.nodes.process.process.ProcessNode.is_finished` returns ``False``, the process node is not a valid source. +The :class:`~aiida.engine.processes.process.Process` class overrides the :attr:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` property to give more fine-grained control on process nodes as caching sources. +If either :attr:`~aiida.orm.nodes.caching.NodeCaching.is_valid_cache` of the base class or :meth:`~aiida.orm.nodes.process.process.ProcessNode.is_finished` returns ``False``, the process node is not a valid source. Likewise, if the process class cannot be loaded from the node, through the :meth:`~aiida.orm.nodes.process.process.ProcessNode.process_class`, the node is not a valid caching source. Finally, if the associated process class implements the :meth:`~aiida.engine.processes.process.Process.is_valid_cache` method, it is called, passing the node as an argument. If that returns ``True``, the node is considered to be a valid caching source. diff --git a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_append.py b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_append.py index 07bb32a7e4..1073e7b59b 100644 --- a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_append.py +++ b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_append.py @@ -1,4 +1,7 @@ from aiida.engine import WorkChain, append_ +from aiida.plugins.factories import CalculationFactory + +SomeOtherWorkChain = CalculationFactory('some.module') class SomeWorkChain(WorkChain): @@ -12,7 +15,7 @@ def define(cls, spec): def submit_workchains(self): for i in range(3): - future = self.submit(SomeWorkChain) + future = self.submit(SomeOtherWorkChain) self.to_context(workchains=append_(future)) def inspect_workchains(self): diff --git a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_complete.py b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_complete.py index 8b7d2f5041..b325ab5c59 100644 --- a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_complete.py +++ b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_complete.py @@ -1,4 +1,7 @@ from aiida.engine import ToContext, WorkChain +from aiida.plugins.factories import CalculationFactory + +SomeOtherWorkChain = CalculationFactory('some.module') class SomeWorkChain(WorkChain): @@ -11,7 +14,7 @@ def define(cls, spec): ) def submit_workchain(self): - future = self.submit(SomeWorkChain) + future = self.submit(SomeOtherWorkChain) return ToContext(workchain=future) def inspect_workchain(self): diff --git a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel.py b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel.py index 1db43470f0..313c1b02e1 100644 --- a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel.py +++ b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel.py @@ -1,4 +1,7 @@ from aiida.engine import WorkChain +from aiida.plugins.factories import CalculationFactory + +SomeOtherWorkChain = CalculationFactory('some.module') class SomeWorkChain(WorkChain): @@ -12,7 +15,7 @@ def define(cls, spec): def submit_workchains(self): for i in range(3): - future = self.submit(SomeWorkChain) + future = self.submit(SomeOtherWorkChain) key = f'workchain_{i}' self.to_context(**{key: future}) diff --git a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel_nested.py b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel_nested.py index 7b42042c83..cf80bd02b5 100644 --- a/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel_nested.py +++ b/docs/source/topics/workflows/include/snippets/workchains/run_workchain_submit_parallel_nested.py @@ -1,4 +1,7 @@ from aiida.engine import WorkChain +from aiida.plugins.factories import CalculationFactory + +SomeOtherWorkChain = CalculationFactory('some.module') class SomeWorkChain(WorkChain): @@ -12,7 +15,7 @@ def define(cls, spec): def submit_workchains(self): for i in range(3): - future = self.submit(SomeWorkChain) + future = self.submit(SomeOtherWorkChain) key = f'workchains.sub{i}' self.to_context(**{key: future}) From 4cecda5177c456cee252c16295416c3842bb5d2d Mon Sep 17 00:00:00 2001 From: Daniel Hollas Date: Fri, 28 Jun 2024 10:11:06 +0100 Subject: [PATCH 23/82] Devops: Disable code coverage in `test-install.yml` (#6479) This should cut down the CI time by at least 10 minutes for these tests. --- .github/workflows/test-install.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-install.yml b/.github/workflows/test-install.yml index 0baab3c074..637b48a445 100644 --- a/.github/workflows/test-install.yml +++ b/.github/workflows/test-install.yml @@ -229,7 +229,7 @@ jobs: env: AIIDA_TEST_PROFILE: test_aiida AIIDA_WARN_v3: 1 - run: pytest --cov aiida --verbose tests -m 'not nightly' + run: pytest --verbose tests -m 'not nightly' - name: Freeze test environment run: pip freeze | sed '1d' | tee requirements-py-${{ matrix.python-version }}.txt From a6cf7fc7e02a48a7e3b9c4ba6ce5e2cd413e6b23 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 28 Jun 2024 13:21:47 +0200 Subject: [PATCH 24/82] Docs: Customize the color scheme through custom style sheet (#6456) Change the default coloring of the `pydata-sphinx-theme` to use the AiiDA primary colors. --- docs/source/_static/aiida-custom.css | 107 +++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) diff --git a/docs/source/_static/aiida-custom.css b/docs/source/_static/aiida-custom.css index 73fcf4945f..7283171f36 100644 --- a/docs/source/_static/aiida-custom.css +++ b/docs/source/_static/aiida-custom.css @@ -1,3 +1,110 @@ +/* AiiDA colors in HTML format +blue: #0096d2 +orange: #fe7d17 +green: #30b808 +*/ + +html[data-theme="light"] { + --pst-color-primary: #0096d2; + --pst-color-secondary: #fe7d17; + --pst-color-surface: #f5f5f5; +} + +html[data-theme="dark"] { + --pst-color-primary: #0096d2; + --pst-color-secondary: #fe7d17; +} + +code { + --pst-color-inline-code: #0096d2; + font-weight: bold; +} + +html[data-theme=light] .highlight .ch, +html[data-theme=light] .highlight .sd { + color: #777777; + font-style: italic +} + +html[data-theme=light] .highlight .s1, +html[data-theme=light] .highlight .si { + color: #30b808; + font-weight: bold; +} + +html[data-theme=light] .highlight .k, +html[data-theme=light] .highlight .kc, +html[data-theme=light] .highlight .kn, +html[data-theme=light] .highlight .ow, +html[data-theme=light] .highlight .mf, +html[data-theme=light] .highlight .mi { + color: #0096d2; + font-weight: bold; +} + +html[data-theme=dark] .highlight .ch, +html[data-theme=dark] .highlight .sd { + color: #999999; + font-style: italic +} + +html[data-theme=dark] .highlight .s1, +html[data-theme=dark] .highlight .si { + color: #30b808; + font-weight: bold; +} + +html[data-theme=dark] .highlight .k, +html[data-theme=dark] .highlight .kc, +html[data-theme=dark] .highlight .kn, +html[data-theme=dark] .highlight .ow, +html[data-theme=dark] .highlight .mf, +html[data-theme=dark] .highlight .mi { + color: #0096d2; + font-weight: bold; +} + +.sd-card-hover:hover { + border-color: var(--pst-color-primary); + transform: none; +} + +.aiida-green { + color: #30b808; +} + +.aiida-blue { + color: #0096d2; +} + +.aiida-orange { + color: #fe7d17; +} + +.aiida-red { + color: rgb(192, 11, 80); +} + +img.logo-shell { + width: 20px; + padding-bottom: 3px; + margin-right: 3px; +} + +.sd-card-footer { + padding-top: 0rem; + border-top: none !important; +} + +.sd-card-footer table { + margin-bottom: 0rem; + border-color: transparent; +} + +.sd-card-footer table td:last-child { + text-align: right; +} + /* Fix CSS of top bar link icons */ a.nav-link.nav-external i { padding-left: 0.3em !important; From 45a8b461a50b90c4df1ab720dff609f72d1a2487 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 28 Jun 2024 15:47:22 +0200 Subject: [PATCH 25/82] CLI: Fail early in `verdi presto` when profile name already exists (#6488) If an explicit profile name is specified with `-p/--profile-name` it should be validated as soon as possible and error if the profile already exists, before anything else is done. This prevents, for example, that a PostgreSQL user and database are created that are then not cleaned up. --- src/aiida/cmdline/commands/cmd_presto.py | 3 +++ tests/cmdline/commands/test_presto.py | 22 +++++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/aiida/cmdline/commands/cmd_presto.py b/src/aiida/cmdline/commands/cmd_presto.py index 64a17fdac2..6fa9518443 100644 --- a/src/aiida/cmdline/commands/cmd_presto.py +++ b/src/aiida/cmdline/commands/cmd_presto.py @@ -174,6 +174,9 @@ def verdi_presto( from aiida.manage.configuration import create_profile, load_profile from aiida.orm import Computer + if profile_name in ctx.obj.config.profile_names: + raise click.BadParameter(f'The profile `{profile_name}` already exists.', param_hint='--profile-name') + postgres_config_kwargs = { 'profile_name': profile_name, 'postgres_hostname': postgres_hostname, diff --git a/tests/cmdline/commands/test_presto.py b/tests/cmdline/commands/test_presto.py index 3ec1d1e5da..80c61eaa4b 100644 --- a/tests/cmdline/commands/test_presto.py +++ b/tests/cmdline/commands/test_presto.py @@ -1,5 +1,7 @@ """Tests for ``verdi presto``.""" +import textwrap + import pytest from aiida.cmdline.commands.cmd_presto import get_default_presto_profile_name, verdi_presto from aiida.manage.configuration import profile_context @@ -50,7 +52,7 @@ def detect_rabbitmq_config(**kwargs): @pytest.mark.requires_rmq @pytest.mark.usefixtures('empty_config') -def test_presto_with_rmq(pytestconfig, run_cli_command, monkeypatch): +def test_presto_with_rmq(pytestconfig, run_cli_command): """Test the ``verdi presto``.""" result = run_cli_command(verdi_presto, ['--non-interactive']) assert 'Created new profile `presto`.' in result.output @@ -91,3 +93,21 @@ def test_presto_overdose(run_cli_command, config_with_profile_factory): config_with_profile_factory(name='presto-10') result = run_cli_command(verdi_presto) assert 'Created new profile `presto-11`.' in result.output + + +@pytest.mark.requires_psql +@pytest.mark.usefixtures('empty_config') +def test_presto_profile_name_exists(run_cli_command, config_with_profile_factory): + """Test ``verdi presto`` fails early if the specified profile name already exists.""" + profile_name = 'custom-presto' + config_with_profile_factory(name=profile_name) + options = ['--non-interactive', '--use-postgres', '--profile-name', profile_name] + result = run_cli_command(verdi_presto, options, raises=True) + # Matching for the complete literal output as a way to test that nothing else of the command was run, such as + # configuring the broker or creating a database for PostgreSQL + assert result.output == textwrap.dedent("""\ + Usage: presto [OPTIONS] + Try 'presto --help' for help. + + Error: Invalid value for --profile-name: The profile `custom-presto` already exists. + """) From 66a2dcedd0a9428b5b2218b8c82bad9c9aff4956 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 28 Jun 2024 16:48:00 +0200 Subject: [PATCH 26/82] CLI: Only configure logging in `set_log_level` callback once (#6493) All `verdi` commands automatically have the `-v/--verbosity` option added. This option has a callback `set_log_level` that is invoked for each subcommand. The callback is supposed to call `configure_logging` to setup the logging configuration. Besides it being unnecessary to call it multiple times for each subcommand, it would actually cause a bug in that once the profile storage would have been loaded (through the callback of the profile option), which would have called `configure_logging` with `with_orm=True` to make sure the `DbLogHandler` was properly configured, another call to `set_log_level` would call `configure_logging` with the default values (where `with_orm=False`) and so the `DbLogHandler` would be removed. This would result in process log messages not being persisted in the database. This would be manifested when running an AiiDA process through a script invoked through `verdi` or any other CLI that uses the verbosity option provided by `aiida-core`. Since the `set_log_level` only has to make sure that the logging is configured at least once, a guard is added to skip the configuration once the `aiida.common.log.CLI_ACTIVE` global has been set by a previous invocation. --- src/aiida/cmdline/params/options/main.py | 7 +++++-- tests/cmdline/params/options/test_verbosity.py | 15 +++++++++------ 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/aiida/cmdline/params/options/main.py b/src/aiida/cmdline/params/options/main.py index aa86a1f0dd..f5eb2d551f 100644 --- a/src/aiida/cmdline/params/options/main.py +++ b/src/aiida/cmdline/params/options/main.py @@ -175,7 +175,7 @@ def decorator(command): return decorator -def set_log_level(_ctx, _param, value): +def set_log_level(ctx, _param, value): """Configure the logging for the CLI command being executed. Note that we cannot use the most obvious approach of directly setting the level on the various loggers. The reason @@ -192,12 +192,15 @@ def set_log_level(_ctx, _param, value): """ from aiida.common import log + if log.CLI_ACTIVE: + return value + log.CLI_ACTIVE = True # If the value is ``None``, it means the option was not specified, but we still configure logging for the CLI # However, we skip this when we are in a tab-completion context. if value is None: - if not _ctx.resilient_parsing: + if not ctx.resilient_parsing: configure_logging() return None diff --git a/tests/cmdline/params/options/test_verbosity.py b/tests/cmdline/params/options/test_verbosity.py index 3544962b38..3573edd54f 100644 --- a/tests/cmdline/params/options/test_verbosity.py +++ b/tests/cmdline/params/options/test_verbosity.py @@ -14,7 +14,7 @@ import pytest from aiida.cmdline.commands.cmd_verdi import verdi from aiida.cmdline.utils import echo -from aiida.common.log import AIIDA_LOGGER, LOG_LEVELS +from aiida.common import log @pytest.fixture @@ -29,10 +29,10 @@ def cmd(): The messages to the ``verdi`` are performed indirect through the utilities of the ``echo`` module. """ - assert 'cli' in [handler.name for handler in AIIDA_LOGGER.handlers] + assert 'cli' in [handler.name for handler in log.AIIDA_LOGGER.handlers] - for log_level in LOG_LEVELS.values(): - AIIDA_LOGGER.log(log_level, 'aiida') + for log_level in log.LOG_LEVELS.values(): + log.AIIDA_LOGGER.log(log_level, 'aiida') echo.echo_debug('verdi') echo.echo_info('verdi') @@ -49,7 +49,7 @@ def verify_log_output(output: str, log_level_aiida: int, log_level_verdi: int): :param log_level_aiida: The expected log level of the ``aiida`` logger. :param log_level_verdi: The expected log level of the ``verdi`` logger. """ - for log_level_name, log_level in LOG_LEVELS.items(): + for log_level_name, log_level in log.LOG_LEVELS.items(): prefix = log_level_name.capitalize() if log_level >= log_level_aiida: @@ -73,7 +73,7 @@ def test_default(run_cli_command): verify_log_output(result.output, logging.REPORT, logging.REPORT) -@pytest.mark.parametrize('option_log_level', [level for level in LOG_LEVELS.values() if level != logging.NOTSET]) +@pytest.mark.parametrize('option_log_level', [level for level in log.LOG_LEVELS.values() if level != logging.NOTSET]) @pytest.mark.usefixtures('reset_log_level') def test_explicit(run_cli_command, option_log_level): """Test explicitly settings a verbosity""" @@ -92,6 +92,9 @@ def test_config_option_override(run_cli_command, isolated_config): result = run_cli_command(cmd, raises=True, use_subprocess=False) verify_log_output(result.output, logging.ERROR, logging.WARNING) + # Manually reset the ``aiida.common.log.CLI_ACTIVE`` global otherwise the verbosity callback is a no-op + log.CLI_ACTIVE = None + # If ``--verbosity`` is explicitly defined, it override both both config options. result = run_cli_command(cmd, ['--verbosity', 'INFO'], raises=True, use_subprocess=False) verify_log_output(result.output, logging.INFO, logging.INFO) From 0ee0a0c6ae13588e82edf1cf9e8cb9857c94c31b Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 28 Jun 2024 23:01:36 +0200 Subject: [PATCH 27/82] Docs: Rework the installation section (#6455) The `verdi setup` and `verdi quicksetup` commands have been deprecated and replaced by `verdi profile setup` and `verdi presto`. The installation docs were heavily outdated and the flow was scattered. The biggest change is that there now is a "quick install guide" that relies on `verdi presto` to provide an install route that is fool proof and will work on almost any system in a minimal amount of commands. Then there is the "complete installation guide" that provides all the details necessary to fully customize an installation. --- docs/source/conf.py | 6 - docs/source/howto/daemon.rst | 13 + docs/source/howto/index.rst | 1 + docs/source/howto/installation.rst | 10 +- docs/source/howto/interact.rst | 22 +- docs/source/howto/ssh.rst | 2 +- docs/source/index.rst | 45 +- docs/source/installation/docker.rst | 181 ++++++++ docs/source/installation/guide_complete.rst | 408 ++++++++++++++++++ docs/source/installation/guide_quick.rst | 85 ++++ docs/source/installation/index.rst | 97 +++++ .../troubleshooting.rst | 14 +- docs/source/intro/cheatsheet.rst | 16 - docs/source/intro/get_started.rst | 98 ----- docs/source/intro/index.rst | 23 +- docs/source/intro/install_conda.rst | 163 ------- docs/source/intro/install_system.rst | 298 ------------- docs/source/intro/installation.rst | 295 ------------- docs/source/intro/run_docker.rst | 236 ---------- docs/source/redirects.txt | 10 +- docs/source/reference/cheatsheet.rst | 16 + .../cheatsheet}/cheatsheet.png | Bin .../cheatsheet}/cheatsheet.svg | 0 .../cheatsheet}/cheatsheet_v.pdf | Bin docs/source/reference/index.rst | 1 + docs/source/topics/storage.rst | 4 +- docs/source/tutorials/basic.md | 2 +- 27 files changed, 879 insertions(+), 1167 deletions(-) create mode 100644 docs/source/howto/daemon.rst create mode 100644 docs/source/installation/docker.rst create mode 100644 docs/source/installation/guide_complete.rst create mode 100644 docs/source/installation/guide_quick.rst create mode 100644 docs/source/installation/index.rst rename docs/source/{intro => installation}/troubleshooting.rst (97%) delete mode 100644 docs/source/intro/cheatsheet.rst delete mode 100644 docs/source/intro/get_started.rst delete mode 100644 docs/source/intro/install_conda.rst delete mode 100644 docs/source/intro/install_system.rst delete mode 100644 docs/source/intro/installation.rst delete mode 100644 docs/source/intro/run_docker.rst create mode 100644 docs/source/reference/cheatsheet.rst rename docs/source/{intro/_cheatsheet => reference/cheatsheet}/cheatsheet.png (100%) rename docs/source/{intro/_cheatsheet => reference/cheatsheet}/cheatsheet.svg (100%) rename docs/source/{intro/_cheatsheet => reference/cheatsheet}/cheatsheet_v.pdf (100%) diff --git a/docs/source/conf.py b/docs/source/conf.py index 9cfaae4847..745ed79c30 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -49,15 +49,9 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [ - 'datatypes/**', 'developer_guide/**', - 'get_started/**', - 'howto/installation_more/index.rst', 'import_export/**', 'internals/global_design.rst', - 'internals/orm.rst', - 'scheduler/index.rst', - 'working_with_aiida/**', ] # The name of the Pygments (syntax highlighting) style to use. diff --git a/docs/source/howto/daemon.rst b/docs/source/howto/daemon.rst new file mode 100644 index 0000000000..3076547206 --- /dev/null +++ b/docs/source/howto/daemon.rst @@ -0,0 +1,13 @@ + +.. _how-to:manage-daemon: + +How to manage the daemon +------------------------ + +The AiiDA daemon process runs in the background and takes care of processing your submitted calculations and workflows, checking their status, retrieving their results once they are finished and storing them in the AiiDA database. + +The AiiDA daemon is controlled using three simple commands: + +* ``verdi daemon start``: start the daemon +* ``verdi daemon status``: check the status of the daemon +* ``verdi daemon stop``: stop the daemon diff --git a/docs/source/howto/index.rst b/docs/source/howto/index.rst index 28fd41d2df..b405bc3f8a 100644 --- a/docs/source/howto/index.rst +++ b/docs/source/howto/index.rst @@ -6,6 +6,7 @@ How-To Guides :maxdepth: 1 interact + daemon plugins_install run_codes run_workflows diff --git a/docs/source/howto/installation.rst b/docs/source/howto/installation.rst index a5dc6c00a1..50547b8f21 100644 --- a/docs/source/howto/installation.rst +++ b/docs/source/howto/installation.rst @@ -14,7 +14,7 @@ Creating profiles ----------------- Each AiiDA installation can have multiple profiles, each of which can have its own individual database and file repository to store the contents of the :ref:`provenance graph`. Profiles allow you to run multiple projects completely independently from one another with just a single AiiDA installation and at least one profile is required to run AiiDA. -A new profile can be created using :ref:`verdi quicksetup` or :ref:`verdi setup`, which works similar to the former but gives more control to the user. +A new profile can be created using :ref:`verdi presto` or :ref:`verdi profile setup`, which works similar to the former but gives more control to the user. Listing profiles ---------------- @@ -296,7 +296,7 @@ Isolating multiple instances An AiiDA instance is defined as the installed source code plus the configuration folder that stores the configuration files with all the configured profiles. It is possible to run multiple AiiDA instances on a single machine, simply by isolating the code and configuration in a virtual environment. -To isolate the code, make sure to install AiiDA into a virtual environment, e.g., with conda or venv, as described :ref:`here `. +To isolate the code, make sure to install AiiDA into a virtual environment, e.g., with conda or venv. Whenever you activate this particular environment, you will be running the particular version of AiiDA (and all the plugins) that you installed specifically for it. This is separate from the configuration of AiiDA, which is stored in the configuration directory which is always named ``.aiida`` and by default is stored in the home directory. @@ -619,12 +619,12 @@ Alternatively to the CLI command, one can also manually create a backup. This re .. _how-to:installation:backup:restore: Restoring data from a backup -================================== +============================ Restoring a backed up AiiDA profile requires: * restoring the profile information in the AiiDA ``config.json`` file. Simply copy the`profiles` entry from - the backed up `config.json`to the one of the running AiiDA instance (see `verdi status` for exact location). + the backed up ``config.json`` to the one of the running AiiDA instance (see ``verdi status`` for exact location). Some information (e.g. the database parameters) might need to be updated. * restoring the data of of the backed up profile according to the ``config.json`` entry. @@ -642,7 +642,7 @@ To test if the restoration worked, run ``verdi -p status`` to ver **PostgreSQL database** - To restore the PostgreSQL database from the ``db.psql`` file that was backed up, first you should create an empty database following the instructions described in :ref:`database ` skipping the ``verdi setup`` phase. + To restore the PostgreSQL database from the ``db.psql`` file that was backed up, first you should create an empty database following the instructions described in :ref:`the installation guide `. The backed up data can then be imported by calling: .. code-block:: console diff --git a/docs/source/howto/interact.rst b/docs/source/howto/interact.rst index 530bcef915..ab6c8c6d85 100644 --- a/docs/source/howto/interact.rst +++ b/docs/source/howto/interact.rst @@ -125,6 +125,18 @@ Interactive notebooks ===================== Similar to :ref:`interactive shells `, AiiDA is also directly compatbile with interactive Python notebooks, such as `Jupyter `_. +To install the required Python packages, install ``aiida-core`` with the ``notebook`` extra, e.g. run: + +.. code-block:: console + + pip install aiida-core[notebook] + +You should now be able to start a Jupyter notebook server: + +.. code-block:: console + + jupyter notebook + To use AiiDA's Python API in a notebook, first a profile has to be loaded: .. code-block:: ipython @@ -142,8 +154,14 @@ The same can be accomplished using the following magic statement: %load_ext aiida %aiida -This magic line will replicate the same environment as :ref:`the interactive shell ` provided by ``verdi shell``. -However, it does require some one-time installation, as detailed in the section on how to :ref:`intro:install:jupyter`. +This magic line replicates the same environment as :ref:`the interactive shell ` provided by ``verdi shell``. + +It is also possible to run ``verdi`` commands inside the notebook, for example: + +.. code-block:: ipython + + %verdi status + .. _how-to:interact-restapi: diff --git a/docs/source/howto/ssh.rst b/docs/source/howto/ssh.rst index 9a2e24a366..59fe9093b6 100644 --- a/docs/source/howto/ssh.rst +++ b/docs/source/howto/ssh.rst @@ -269,6 +269,6 @@ Using kerberos tokens If the remote machine requires authentication through a Kerberos token (that you need to obtain before using ssh), you typically need to * install ``libffi`` (``sudo apt-get install libffi-dev`` under Ubuntu) -* install the ``ssh_kerberos`` extra during the installation of aiida-core (see :ref:`intro:install:setup`). +* install the ``ssh_kerberos`` extra during the installation of aiida-core (see :ref:`installation:guide-complete:python-package:optional-requirements`). If you provide all necessary ``GSSAPI`` options in your ``~/.ssh/config`` file, ``verdi computer configure`` should already pick up the appropriate values for all the gss-related options. diff --git a/docs/source/index.rst b/docs/source/index.rst index b9a218b023..e4c9d7b94d 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,9 +1,5 @@ :sd_hide_title: -################################# -Welcome to AiiDA's documentation! -################################# - .. grid:: :reverse: :gutter: 2 3 3 3 @@ -23,7 +19,8 @@ Welcome to AiiDA's documentation! .. rubric:: AiiDA - An open-source Python infrastructure to help researchers with automating, managing, persisting, sharing and reproducing the complex workflows associated with modern computational science and all associated data (see :ref:`features`). + An open-source Python infrastructure to help researchers with automating, managing, persisting, sharing and + reproducing the complex workflows associated with modern computational science and all associated data (see :ref:`features`). **aiida-core version:** |release| @@ -32,15 +29,32 @@ Welcome to AiiDA's documentation! .. grid:: 1 2 2 2 :gutter: 3 - .. grid-item-card:: :fa:`rocket;mr-1` Getting Started + .. grid-item-card:: :fa:`circle-play;mr-1` Introduction + :text-align: center + :shadow: md + + Overview of what AiiDA is and what it can do. + + +++++++++++++++++++++++++++++++++++++++++++++ + + .. button-ref:: intro/index + :ref-type: doc + :click-parent: + :expand: + :color: primary + :outline: + + To the introduction + + .. grid-item-card:: :fa:`rocket;mr-1` Installation :text-align: center :shadow: md - AiiDA installation, configuration and troubleshooting. + Installation guides and troubleshooting. +++++++++++++++++++++++++++++++++++++++++++++ - .. button-ref:: intro/get_started + .. button-ref:: installation/index :ref-type: doc :click-parent: :expand: @@ -87,7 +101,7 @@ Welcome to AiiDA's documentation! :text-align: center :shadow: md - Background information on AiiDA's underlying concepts. + Background information on AiiDA concepts. +++++++++++++++++++++++++++++++++++++++++++++ @@ -104,7 +118,7 @@ Welcome to AiiDA's documentation! :text-align: center :shadow: md - Comprehensive documentation of AiiDA components: command-line interface, Python interface, and RESTful API. + Comprehensive documentation of CLI, Python API and REST API. +++++++++++++++++++++++++++++++++++++++++++++ @@ -121,7 +135,7 @@ Welcome to AiiDA's documentation! :text-align: center :shadow: md - Notes on AiiDA's design and architecture aimed at core developers. + Detailed information on AiiDA's design and architecture. +++++++++++++++++++++++++++++++++++++++++++++ @@ -154,15 +168,16 @@ Welcome to AiiDA's documentation! :hidden: intro/index + installation/index tutorials/index howto/index topics/index reference/index internals/index -*********** +=========== How to cite -*********** +=========== If you use AiiDA for your research, please cite the following work: @@ -175,9 +190,9 @@ If the ADES concepts are referenced, please also cite: .. highlights:: Pizzi, Giovanni, Andrea Cepellotti, Riccardo Sabatini, Nicola Marzari, and Boris Kozinsky. *AiiDA: automated interactive infrastructure and database for computational science*, Computational Materials Science **111**, 218-230 (2016); DOI: `10.1016/j.commatsci.2015.09.013 `_ -**************** +================ Acknowledgements -**************** +================ AiiDA is supported by the `MARVEL National Centre of Competence in Research`_, the `MaX European Centre of Excellence`_ and by a number of other supporting projects, partners and institutions, whose complete list is available on the `AiiDA website acknowledgements page`_. diff --git a/docs/source/installation/docker.rst b/docs/source/installation/docker.rst new file mode 100644 index 0000000000..d1a2a2f22e --- /dev/null +++ b/docs/source/installation/docker.rst @@ -0,0 +1,181 @@ +.. _installation:docker: + +====== +Docker +====== + +The AiiDA team maintains a number of `Docker `_ images on `Docker Hub `_. +These images contain a fully pre-configured AiiDA environment which make it easy to get started using AiiDA if you are familiar with Docker. + +Currently, there are three image variants: + +.. grid:: auto + :gutter: 3 + + .. grid-item-card:: :fa:`bullseye;mr-1` aiida-core-base + :text-align: center + :shadow: md + + This is the base image. + It comes just with the ``aiida-core`` package installed. + It expects that the RabbitMQ and PostgreSQL services are provided. + + + .. grid-item-card:: :fa:`puzzle-piece;mr-1` aiida-core-with-services + :text-align: center + :shadow: md + + This images builds on top of ``aiida-core-base`` but also installs RabbitMQ and PostgreSQL as services inside the image. + This image is therefore complete and ready to be used. + + + .. grid-item-card:: :fa:`code;mr-1` aiida-core-dev + :text-align: center + :shadow: md + + This image builds on top of ``aiida-core-with-services`` with the only difference that the ``aiida-core`` package is installed from source in editable mode. + This makes this image suitable for development of the ``aiida-core`` package. + + +Start a container +================= + +To start a container from an image, run: + +.. code-block:: console + + docker run -it --name aiida aiidateam/aiida-core-with-services:latest bash + +In this example, the ``aiida-core-with-services`` image is started where ``latest`` refers to the latest tag. +The ``--name`` option is optional but is recommended as it makes it easier to restart the same container at a later point in time. +The ``-it`` option is used to run the container in interactive mode and to allocate a pseudo-TTY. +After the container start up has finished, a bash shell inside the container is opened. + +An AiiDA profile is automatically created when the container is started. +By default the profile is created using the ``core.psql_dos`` storage plugin and a default user is created. +See section :ref:`container configuration ` how to customize certain parts of this setup. + +To confirm that everything is up and running as required, run: + +.. code-block:: console + + verdi status + +which should show something like:: + + ✔ version: AiiDA v2.5.1 + ✔ config: /home/aiida/.aiida + ✔ profile: default + ✔ storage: Storage for 'default' [open] @ postgresql://aiida:***@localhost:5432 + ✔ rabbitmq: Connected to RabbitMQ v3.10.18 as amqp://guest:guest@127.0.0.1:5672 + ✔ daemon: Daemon is running with PID 324 + +If all checks show green check marks, the container is ready to go. +The container can be shut down by typing ``exit`` or pressing ``CTRL+d``. +The container can be restarted at a later time, see :ref:`restarting a container ` for details. +Any data that was created in a previous session is still available. + +.. caution:: + + When the container is not just stopped but *deleted*, any data stored in the container, including the data stored in the profile's storage, is permanently localhost + To ensure the data is not lost, it should be persisted on a volume that is mounted to the container. + Refer to the section on :ref:`persisting data ` for more details. + + +.. _installation:docker:restarting-container: + +Restarting a container +====================== + +After shutting down a container, it can be restarted with: + +.. code-block:: console + + docker start -i aiida + +The name ``aiida`` here is the reference given with the ``--name`` option when the container was originally created. +To open an interactive bash shell inside the container, run: + +.. code-block:: console + + docker exec -it aiida bash + + +.. _installation:docker:persisting-data: + +Persisting data +=============== + +The preferred way to persistently store data across Docker containers is to `create a volume `__. +To create a simple volume, run: + +.. code-block:: console + + docker volume create container-home-data + +In this case, one needs to specifically mount the volume the very first time that the container is created: + +.. code-block:: console + + docker run -it --name aiida -v container-home-data:/home/aiida aiidateam/aiida-core-with-services:latest bash + +By mounting the volume, any data that gets stored in the ``/home/aiida`` path within the container is stored in the ``container-home-data`` volume and therefore persists even if the container is deleted. + +When installing packages with pip, use the ``--user`` flag to store the Python packages installed in the mounted volume (if you mount the home specifically to a volume as mentioned above) permanently. +The packages will be installed in the ``/home/aiida/.local`` directory of the container, which is mounted on the ``container-home-data`` volume. + +You can also mount a folder in the container to a local directory, please refer to the `Docker documentation `__ for more information. + + +.. _installation:docker:container-configuration: + +Container configuration +======================= + +Upon container creation, the following environment variables can be set to configure the default profile that is created: + +* ``AIIDA_PROFILE_NAME``: the name of the profile to be created (default: ``default``) +* ``AIIDA_USER_EMAIL``: the email of the default user to be created (default: ``aiida@localhost``) +* ``AIIDA_USER_FIRST_NAME``: the first name of the default user to be created (default: ``Giuseppe``) +* ``AIIDA_USER_LAST_NAME``: the last name of the default user to be created (default: ``Verdi``) +* ``AIIDA_USER_INSTITUTION``: the institution of the default user to be created (default: ``Khedivial``) + +These environment variables can be set when starting the container with the ``-e`` option. + +.. note:: + + The ``AIIDA_CONFIG_FILE`` variable points to a path inside the container. + Therefore, if you want to use a custom configuration file, it needs to be mounted from the host path to the container path. + +.. _installation:docker:container-backup: + +Container backup +================ + +To backup the data of AiiDA, you can follow the instructions in the `Backup and restore `__ section. +However, Docker provides a convenient way to backup the container data by taking a snapshot of the entire container or the mounted volume(s). + +The following is adapted from the `Docker documentation `__. +If you don't have a volume mounted to the container, you can backup the whole container by committing the container to an image: + +.. code-block:: console + + docker container commit aiida aiida-container-backup + +The above command will create from the container ``aiida`` a new image named ``aiida-container-backup``, containing all the data and modifications made in the container. +The container can then be exported to a tarball and for it to be stored permanently: + +.. code-block:: console + + docker save -o aiida-container-backup.tar aiida-container-backup + +To restore the container, pull the image, or load from the tarball: + +.. code-block:: console + + docker load -i aiida-container-backup.tar + +This creates a container that can then be started with ``docker start``. + +Any `named volumes `__, can be backed up independently. +Refer to `Backup, restore, or migrate data volumes `__ for more information. diff --git a/docs/source/installation/guide_complete.rst b/docs/source/installation/guide_complete.rst new file mode 100644 index 0000000000..49136bbba7 --- /dev/null +++ b/docs/source/installation/guide_complete.rst @@ -0,0 +1,408 @@ +.. _installation:guide-complete: + +=========================== +Complete installation guide +=========================== + +The :ref:`quick installation guide ` is designed to make the setup as simple and portable as possible. +However, the resulting setup has some :ref:`limitations ` concerning the available functionality and performance. +This guide provides detailed information and instructions to set up a feature-complete and performant installation. + +Setting up a working installation of AiiDA, involves the following steps: + +#. :ref:`Install the Python Package ` +#. :ref:`(Optional) RabbitMQ ` +#. :ref:`Create a profile ` + + +.. _installation:guide-complete:python-package: + +Install Python Package +====================== + +.. important:: + AiiDA requires a recent version of Python. + Please refer to the `Python Package Index (PyPI) `_ for the minimum required version. + +To install AiiDA, the ``aiida-core`` Python package needs to be installed which can be done in a number of ways: + + +.. tab-set:: + + .. tab-item:: pip + + Installing ``aiida-core`` from PyPI. + + #. Install `pip `_. + #. Install ``aiida-core``: + + .. code-block:: console + + pip install aiida-core + + .. tab-item:: conda + + Installing ``aiida-core`` using Conda. + + #. Install `conda `_. + + #. Create an environment and install ``aiida-core``: + + .. code-block:: console + + conda create -n aiida -c conda-forge aiida-core + + .. tip:: + As of conda v23.10, the `dependency solver `_ has been significantly improved. + If you are experiencing long installation times, you may want to consider updating conda. + + .. tab-item:: source + + Installing ``aiida-core`` directory from source. + + #. Install `git `_ + #. Clone the repository from Github + + .. code-block:: console + + git clone https://github.com/aiidateam/aiida-core + + #. Install `pip `_. + #. Install ``aiida-core``: + + .. code-block:: console + + cd aiida-core + pip install -e . + + The ``-e`` flag installs the package in editable mode which is recommended for development. + Any changes made to the source files are automatically picked up by the installation. + + +.. _installation:guide-complete:python-package:optional-requirements: + +Optional requirements +--------------------- + +The ``aiida-core`` Python package defines a number of optional requirements, subdivided in the following categories: + +* ``atomic_tools`` : Requirements to deal with atomic data and structures +* ``docs`` : Requirements to build the documentation +* ``notebook`` : Requirements to run AiiDA in Jupyter notebooks +* ``pre-commit`` : Requirements to automatically format and lint source code for development +* ``rest`` : Requirements to run the REST API +* ``ssh_kerberos`` : Requirements for enabling SSH authentication through Kerberos +* ``tests`` : Requirements to run the test suite +* ``tui`` : Requirements to provide a textual user interface (TUI) + +These optional requirements can be installed using pip by adding them as comma separated list, for example: + +.. code-block:: console + + pip install aiida-core[atomic_tools,docs] + + +.. _installation:guide-complete:rabbitmq: + +RabbitMQ +======== + +`RabbitMQ `_ is an optional but recommended service for AiiDA. +It is a messsage broker that is required to run AiiDA's daemon. +The daemon is a system process that runs in the background that manages one or multiple daemon workers that can run AiiDA processes. +This way, the daemon helps AiiDA to scale as it is possible to run many processes in parallel on the daemon workers instead of blockingly in a single Python interpreter. +To facilitate communication with the daemon workers, RabbitMQ is required. + +Although it is possible to run AiiDA without a daemon it does provide significant benefits and therefore it is recommended to install RabbitMQ. + +.. tab-set:: + + .. tab-item:: conda + + #. Install `conda `_. + + #. Create an environment and install ``aiida-core.services``: + + .. code-block:: console + + conda create -n aiida -c conda-forge aiida-core.services + + .. important:: + + The ``aiida-core.services`` package ensures that RabbitMQ is installed in the conda environment. + However, it is not a _service_, in the sense that it is not automatically started, but has to be started manually. + + .. code-block:: console + + rabbitmq-server -detached + + Note that this has to be done each time after the machine has been rebooted. + The server can be stopped with: + + .. code-block:: console + + rabbitmqctl stop + + + .. tab-item:: Ubuntu + + #. Install RabbitMQ through the ``apt`` package manager: + + .. code-block:: console + + sudo apt install rabbitmq-server + + This should automatically install startup scripts such that the server is automatically started when the machine boots. + + + .. tab-item:: MacOS X + + #. Install `Homebrew `. + + #. Install RabbitMQ: + + .. code-block:: console + + brew install rabbitmq + brew services start rabbitmq + + .. important:: + + The service has to manually be started each time the machine reboots. + + .. tab-item:: Other + + For all other cases, please refer to the `official documentation `_ of RabbitMQ. + + + +.. _installation:guide-complete:create-profile: + +Create a profile +================ + +After the ``aiida-core`` package is installed, a profile needs to be created. +A profile defines where the data generated by AiiDA is to be stored. +The data storage can be customized through plugins and so the required configuration changes based on the selected storage plugin. + +To create a new profile, run: + +.. code-block:: console + + verdi profile setup + +where ```` is the entry point name of the storage plugin selected for the profile. +To list the available storage plugins, run: + +.. code-block:: console + + verdi plugin list aiida.storage + +AiiDA ships with a number of storage plugins and it is recommended to select one of the following: + +.. grid:: 1 2 2 2 + :gutter: 3 + + .. grid-item-card:: :fa:`feather;mr-1` ``core.sqlite_dos`` + :text-align: center + :shadow: md + + Use this for use-cases to explore AiiDA where performance is not critical. + + This storage plugin does not require any services, making it easy to install and use. + + +++++++++++++++++++++++++++++++++++++++++++++ + + .. button-ref:: installation:guide-complete:create-profile:core-sqlite-dos + :click-parent: + :expand: + :color: primary + :outline: + + Create a ``core.sqlite_dos`` profile + + .. grid-item-card:: :fa:`bolt;mr-1` ``core.psql_dos`` + :text-align: center + :shadow: md + + Use this for production work where database performance is important. + + This storage plugin uses PostgreSQL for the database and provides the greatest performance. + + +++++++++++++++++++++++++++++++++++++++++++++ + + .. button-ref:: installation:guide-complete:create-profile:core-psql-dos + :click-parent: + :expand: + :color: primary + :outline: + + Create a ``core.psql_dos`` profile + + +.. seealso:: + + See the :ref:`topic on storage ` to see a more detailed overview of the storage plugins provided by ``aiida-core`` with their strengths and weaknesses. + +Other packages may provide additional storage plugins, which are also installable through ``verdi profile setup``. + + +.. _installation:guide-complete:create-profile:common-options: + +Common options +-------------- + +The exact options available for the ``verdi profile setup`` command depend on the selected storage plugin, but there are a number of common options and functionality: + +* ``--profile``: The name of the profile. +* ``--set-as-default``: Whether the new profile should be defined as the new default. +* ``--email``: Email for the default user that is created. +* ``--first-name``: First name for the default user that is created. +* ``--last-name``: Last name for the default user that is created. +* ``--institution``: Institution for the default user that is created. +* ``--use-rabbitmq/--no-use-rabbitmq``: Whether to configure the RabbitMQ broker. + Required to enable the daemon and submitting processes to it. + The default is ``--use-rabbitmq``, in which case the command tries to connect to RabbitMQ running on the localhost with default connection parameters. + If this fails, a warning is issued and the profile is configured without a broker. + Once the profile is created, RabbitMQ can still be enabled through ``verdi profile configure-rabbitmq`` which allows to customize the connection parameters. +* ``--non-interactive``: By default, the command prompts to specify a value for all options. + Alternatively, the ``--non-interactive`` flag can be specified, in which case the command never prompts and the options need to be specified directly on the command line. + This is useful when using ``verdi profile setup`` is used in non-interactive environments, such as scripts. +* ``--config``: Instead of passing all options through command line options, the value can be defined in a YAML file and pass its filepath through this option. + + +.. _installation:guide-complete:create-profile:core-sqlite-dos: + +``core.sqlite_dos`` +------------------- + +This storage plugin uses `SQLite `_ and the `disk-objectstore `_ to store data. +The ``disk-objectstore`` is a Python package that is automatically installed as a dependency when installing ``aiida-core``, which was covered in the :ref:`Python package installation section `. +The installation instructions for SQLite depend on your system; please visit the `SQLite website `_ for details. + +Once the prerequisistes are met, create a profile with: + +.. code-block:: console + + verdi profile setup core.sqlite_dos + +The options specific to the ``core.sqlite_dos`` storage plugin are: + +* ``--filepath``: Filepath of the directory in which to store data for this backend. + + +.. _installation:guide-complete:create-profile:core-psql-dos: + +``core.psql_dos`` +----------------- + +This storage plugin uses `PostgreSQL `_ and the `disk-objectstore `_ to store data. +The ``disk-objectstore`` is a Python package that is automatically installed as a dependency when installing ``aiida-core``, which was covered in the :ref:`Python package installation section `. +The storage plugin can connect to a PostgreSQL instance running on the localhost or on a server that can be reached over the internet. +Instructions for installing PostgreSQL is beyond the scope of this guide. + +.. tip:: + + The creation of the PostgreSQL user and database as explained below is implemented in an automated way in the ``verdi presto`` command. + Instead of performing the steps below manually and running ``verdi profile setup core.psql_dos`` manually, it is possible to run: + + .. code-block:: + + verdi presto --use-postgres + +Before creating a profile, a database (and optionally a custom database user) has to be created. +First, connect to PostgreSQL using ``psql``, the `native command line client for PostgreSQL `_: + +.. code-block:: console + + psql -h -U -W + +If PostgreSQL is installed on the localhost, ```` can be replaced with ``localhost``, and the default ```` is ``postgres``. +While possible to use the ``postgres`` default user for the AiiDA profile, it is recommended to create a custom user: + +.. code-block:: sql + + CREATE USER aiida-user WITH PASSWORD ''; + +replacing ```` with a secure password. +The name ``aiida-user`` is just an example name and can be customized. +Note the selected username and password as they are needed when creating the profile later on. + +After the user has been created, create a database: + +.. code-block:: sql + + CREATE DATABASE aiida-database OWNER aiida-user ENCODING 'UTF8' LC_COLLATE='en_US.UTF-8' LC_CTYPE='en_US.UTF-8'; + +Again, the selected database name ``aiida-database`` is purely an example and can be customized. +Make sure that the ``OWNER`` is set to the user that was created in the previous step. +Next, grant all privileges on this database to the user: + +.. code-block:: sql + + GRANT ALL PRIVILEGES ON DATABASE aiida-database to aiida-user; + +After the database has been created, the interactive ``psql`` shell can be closed. +To test if the database was created successfully, run the following command: + +.. code-block:: console + + psql -h -d -U -W + +replacing ```` and ```` with the chosen names for the database and user in the previous steps, and providing the chosen password when prompted. + +Once the database has been created, create a profile with: + +.. code-block:: console + + verdi profile setup core.psql_dos + +The options specific to the ``core.psql_dos`` storage plugin are: + +* ``--database-engine`` The engine to use to connect to the database. +* ``--database-hostname`` The hostname of the PostgreSQL server. +* ``--database-port`` The port of the PostgreSQL server. +* ``--database-username`` The username with which to connect to the PostgreSQL server. +* ``--database-password`` The password with which to connect to the PostgreSQL server. +* ``--database-name`` The name of the database in the PostgreSQL server. +* ``--repository-uri`` URI to the file repository. + +.. _installation:guide-complete:validate-installation: + + +Validate installation +===================== + +Once a profile has been created, validate that everything is correctly set up with: + +.. code-block:: console + + verdi status + +The output should look something like the following:: + + ✔ version: AiiDA v2.5.1 + ✔ config: /path/.aiida + ✔ profile: profile-name + ✔ storage: SqliteDosStorage[/path/.aiida/repository/profile-name]: open, + ✔ broker: RabbitMQ v3.8.2 @ amqp://guest:guest@127.0.0.1:5672?heartbeat=600 + ⏺ daemon: The daemon is not running. + +If no lines show red crosses, AiiDA has been correctly installed and is ready to go. +When a new profile is created, the daemon will not yet be running, but it can be started using: + +.. code-block:: console + + verdi daemon start + +.. note:: + + The storage information depends on the storage plugin that was selected. + The broker may be shown as not having been configured which occurs for profiles created with the :ref:`quick installation method `. + This is fine, however, :ref:`some functionality is not supported ` for broker-less profiles. + + +.. admonition:: Not all green? + :class: warning + + If the status reports any problems, please refer to the :ref:`troubleshooting section `. diff --git a/docs/source/installation/guide_quick.rst b/docs/source/installation/guide_quick.rst new file mode 100644 index 0000000000..af9aaa8dc0 --- /dev/null +++ b/docs/source/installation/guide_quick.rst @@ -0,0 +1,85 @@ +.. _installation:guide-quick: + +======================== +Quick installation guide +======================== + +First, install the ``aiida-core`` Python package: + +.. code-block:: console + + pip install aiida-core + +.. attention:: + + AiiDA requires a recent version of Python. + Please refer to the `Python Package Index `_ for the minimum required version. + +Next, set up a profile where all data is stored: + +.. code-block:: console + + verdi presto + +Verify that the installation was successful: + +.. code-block:: console + + verdi status + +If none of the lines show a red cross, indicating a problem, the installation was successful and you are good to go. + +.. admonition:: What next? + :class: hint + + If you are a new user, we recommend to start with the :ref:`basic tutorial `. + Alternatively, check out the :ref:`next steps guide `. + +.. admonition:: Problems during installation? + :class: warning + + If you encountered any issues, please refer to the :ref:`troubleshooting section `. + +.. warning:: + + Not all AiiDA functionality is supported by the quick installation. + Please refer to the :ref:`section below ` for more information. + + +.. _installation:guide-quick:limitations: + +Quick install limitations +========================= + +Functionality +------------- + +Part of AiiDA's functionality requires a `message broker `_, with the default implementation using `RabbitMQ `_. +The message broker is used to allow communication with the :ref:`daemon `. +Since RabbitMQ is a separate service and is not always trivial to install, the quick installation guide sets up a profile that does not require it. +As a result, the daemon cannot be started and processes cannot be submitted to it but can only be run locally. + +.. note:: + The ``verdi presto`` command automatically checks if RabbitMQ is running on the localhost. + If it can successfully connect, it configures the profile with the message broker and therefore the daemon functionality will be available. + +.. tip:: + The connection parameters of RabbitMQ can be (re)configured after the profile is set up with ``verdi profile configure-rabbitmq``. + This can be useful when the RabbitMQ setup is different from the default that AiiDA checks for and the automatic configuration of ``verdi presto`` failed. + + +Performance +----------- + +The quick installation guide by default creates a profile that uses `SQLite `_ for the database. +Since SQLite does not require running a service, it is easy to install and use on essentially any system. +However, for certain use cases it is not going to be the most performant solution. +AiiDA also supports `PostgreSQL `_ which is often going to be more performant compared to SQLite. + +.. tip:: + If a PostgreSQL service is available, run ``verdi presto --use-postgres`` to set up a profile that uses PostgreSQL instead of SQLite. + The command tries to connect to the service and automatically create a user account and database to use for the new profile. + AiiDA provides defaults that work for most setups where PostgreSQL is installed on the localhost. + Should this fail, the connection parameters can be customized using the ``--postgres-hostname``, ``--postgres-port``, ``--postgres-username``, ``--postgres-password`` options. + +Please refer to the :ref:`complete installation guide ` for instructions to set up a feature-complete and performant installation. diff --git a/docs/source/installation/index.rst b/docs/source/installation/index.rst new file mode 100644 index 0000000000..38e350ab7a --- /dev/null +++ b/docs/source/installation/index.rst @@ -0,0 +1,97 @@ +.. _installation: + +.. toctree:: + :maxdepth: 2 + :hidden: + + guide_quick + guide_complete + docker + troubleshooting + +============ +Installation +============ + +.. grid:: 1 2 2 2 + :gutter: 3 + + .. grid-item-card:: :fa:`rocket;mr-1` Quick install + :text-align: center + :shadow: md + + Install AiiDA in the most simple way that should work on most systems. + Choose this method if you are new to AiiDA and simply want to try it out. + + +++++++++++++++++++++++++++++++++++++++++++++ + + .. button-ref:: guide_quick + :ref-type: doc + :click-parent: + :expand: + :color: primary + :outline: + + To the quick installation guide + + .. grid-item-card:: :fa:`info-circle;mr-1` Complete installation guide + :text-align: center + :shadow: md + + Install AiiDA with full control over the configuration. + Choose this method if you are an advanced user or you want to optimize the setup for your system. + + +++++++++++++++++++++++++++++++++++++++++++++ + + .. button-ref:: guide_complete + :ref-type: doc + :click-parent: + :expand: + :color: primary + :outline: + + To the complete installation guide + + +Preinstalled environments +========================= + +Instead of installing AiiDA manually, there are also solutions that provide an environment with AiiDA and its requirements installed and pre-configured: + +.. grid:: 1 2 2 2 + :gutter: 3 + + .. grid-item-card:: :fa:`cube;mr-1` Docker + :text-align: center + :shadow: md + + AiiDA provides a number of Docker containers that come with the batteries included. + This is a great option if you are already familiar with Docker. + + +++++++++++++++++++++++++++++++++++++++++++++ + + .. button-ref:: docker + :ref-type: doc + :click-parent: + :expand: + :color: primary + :outline: + + To the Docker installation guide + + .. grid-item-card:: :fa:`cloud;mr-1` Virtual machine + :text-align: center + :shadow: md + + Quantum Mobile is a Virtual Machine for computational materials science. + It comes with AiiDA installed as well as several materials simulation codes. + + +++++++++++++++++++++++++++++++++++++++++++++ + + .. button-link:: https://quantum-mobile.readthedocs.io/en/latest/ + :click-parent: + :expand: + :color: primary + :outline: + + To the Quantum Mobile website diff --git a/docs/source/intro/troubleshooting.rst b/docs/source/installation/troubleshooting.rst similarity index 97% rename from docs/source/intro/troubleshooting.rst rename to docs/source/installation/troubleshooting.rst index b90232fc2a..e70b82a17b 100644 --- a/docs/source/intro/troubleshooting.rst +++ b/docs/source/installation/troubleshooting.rst @@ -1,4 +1,4 @@ -.. _intro:troubleshooting: +.. _installation:troubleshooting: *************** Troubleshooting @@ -21,12 +21,12 @@ In the example output, all service have a green check mark and so should be runn If all services are up and running and you are still experiencing problems or if you have trouble with the installation of aiida-core and related services, consider the commonly encountered problems below. In case you are still experiencing problems, you can request support by opening a post on the `Discourse server `_. -.. _intro:troubleshooting:installation: +.. _installation:troubleshooting:installation: Installation issues ------------------- -.. _intro:troubleshooting:installation:rabbitmq: +.. _installation:troubleshooting:installation:rabbitmq: RabbitMQ incompatibility ........................ @@ -136,16 +136,16 @@ A way to do it is to add a line similar to the following to the ``~/.bashrc`` an .. _Stackoverflow link: http://stackoverflow.com/questions/21079820/how-to-find-pg-config-pathlink -.. _intro:troubleshooting:installation:postgresql-autodetect-issues: +.. _installation:troubleshooting:installation:postgresql-autodetect-issues: Autodetection of the PostgreSQL setup ..................................... -Sometimes AiiDA fails to autodetect the local configuration of PostgreSQL when running ``verdi quicksetup``. +Sometimes AiiDA fails to autodetect the local configuration of PostgreSQL when running ``verdi presto --use-postgres``. In that case try to: - 1. Create the database manually in PostgreSQL (see :ref:`here`). - 2. Then run the full ``verdi setup`` command (see :ref:`here`). + 1. Create the database manually in PostgreSQL (see :ref:`here`). + 2. Then run the full ``verdi profile setup core.psql_dos``. RabbitMQ Installation (Unix) diff --git a/docs/source/intro/cheatsheet.rst b/docs/source/intro/cheatsheet.rst deleted file mode 100644 index f91c7cc698..0000000000 --- a/docs/source/intro/cheatsheet.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. _intro:cheatsheet: - -===================== -The AiiDA cheat sheet -===================== - -The AiiDA cheat sheet gives a broad overview of the most commonly used `verdi` commands, the inheritance hierarchy of the main AiiDA classes, their attributes and methods, as well as a showcase of the `QueryBuilder`. - -When clicking on the embedded image, the pdf version will be opened in the browser. Where applicable, text elements contain hyperlinks to the relevant sections of the documentation. - -The file can also be :download:`downloaded <_cheatsheet/cheatsheet_v.pdf>` in two-page layout for printing. - -Happy exploring! - -.. image:: ./_cheatsheet/cheatsheet.png - :target: ../_static/cheatsheet_h.pdf diff --git a/docs/source/intro/get_started.rst b/docs/source/intro/get_started.rst deleted file mode 100644 index 52d1bb1e89..0000000000 --- a/docs/source/intro/get_started.rst +++ /dev/null @@ -1,98 +0,0 @@ -.. _intro:get_started: - -**************** -Getting started -**************** - -An AiiDA installation consists of three core components (plus any external codes you wish to run): - -* |aiida-core|: The main Python package and the associated ``verdi`` command line interface -* |PostgreSQL|: The service that manages the database that AiiDA uses to store data. -* |RabbitMQ|: The message broker used for communication within AiiDA. - -.. toctree:: - :maxdepth: 1 - :hidden: - - install_system - install_conda - run_docker - installation - -.. _intro:install:setup: -.. _intro:get_started:setup: - -Setup -===== - -There are multiple routes to setting up a working AiiDA environment. -Which of those is optimal depends on your environment and use case. -If you are unsure, use the :ref:`system-wide installation ` method. - -.. grid:: 1 2 2 2 - :gutter: 3 - - .. grid-item-card:: :fa:`desktop;mr-1` System-wide installation - - .. button-ref:: intro:get_started:system-wide-install - :ref-type: ref - :click-parent: - :class: btn-link - - Install all software directly on your workstation or laptop. - - Install the prerequisite services using standard package managers (apt, homebrew, etc.) with administrative privileges. - - .. grid-item-card:: :fa:`folder;mr-1` Installation into Conda environment - - .. button-ref:: intro:get_started:conda-install - :ref-type: ref - :click-parent: - :class: btn-link - - Install all software into an isolated conda environment. - - This method does not require administrative privileges, but involves manual management of start-up and shut-down of services. - - .. grid-item-card:: :fa:`cube;mr-1` Run via docker container - - .. button-ref:: intro:get_started:docker - :ref-type: ref - :click-parent: - :class: btn-link - - Run AiiDA and prerequisite services as a single docker container. - - Does not require the separate installation of prerequisite services. - Especially well-suited to get directly started on the **tutorials**. - - .. grid-item-card:: :fa:`cloud;mr-1` Run via virtual machine - - .. button-link:: https://quantum-mobile.readthedocs.io/ - :click-parent: - :class: btn-link - - Use a virtual machine with all the required software pre-installed. - - `Materials Cloud `__ provides both downloadable and web based VMs, - also incorporating pre-installed Materials Science codes. - -.. _intro:get_started:next: - -What's next? -============ - -After successfully completing one of the above outlined setup routes, if you are new to AiiDA, we recommed you go through the :ref:`Basic Tutorial `, -or see our :ref:`Next steps guide `. - -If however, you encountered some issues, proceed to the :ref:`troubleshooting section `. - -.. admonition:: In-depth instructions - :class: seealso title-icon-read-more - - For more detailed instructions on configuring AiiDA, :ref:`see the configuration how-to `. - -.. |aiida-core| replace:: `aiida-core `__ -.. |PostgreSQL| replace:: `PostgreSQL `__ -.. |RabbitMQ| replace:: `RabbitMQ `__ -.. |Homebrew| replace:: `Homebrew `__ diff --git a/docs/source/intro/index.rst b/docs/source/intro/index.rst index 779def1ae1..eb62171c21 100644 --- a/docs/source/intro/index.rst +++ b/docs/source/intro/index.rst @@ -1,15 +1,13 @@ +.. _intro: + +.. toctree:: + :maxdepth: 1 + :hidden: + ============ Introduction ============ -.. _intro:about: - - -************* -What is AiiDA -************* - - AiiDA is an open-source Python infrastructure to help researchers with automating, managing, persisting, sharing and reproducing the complex workflows associated with modern computational science and all associated data. AiiDA is built to support and streamline the four core pillars of the ADES model: Automation, Data, Environment, and Sharing (described `here `__). Some of the key features of AiiDA include: @@ -24,12 +22,3 @@ AiiDA is built to support and streamline the four core pillars of the ADES model * **Open source:** AiiDA is released under the `MIT open-source license `__. See also the `list of AiiDA-powered scientific publications `__ and `testimonials from AiiDA users `__. - - -.. toctree:: - :maxdepth: 1 - - get_started - ../tutorials/index - cheatsheet - troubleshooting diff --git a/docs/source/intro/install_conda.rst b/docs/source/intro/install_conda.rst deleted file mode 100644 index 1f8a3bfd82..0000000000 --- a/docs/source/intro/install_conda.rst +++ /dev/null @@ -1,163 +0,0 @@ -.. _intro:get_started:conda-install: - -*********************************** -Installation into Conda environment -*********************************** - -This installation route installs all necessary software -- including the prerequisite services PostgreSQL and RabbitMQ -- into a Conda environment. -This is the recommended method for users on shared systems and systems where the user has no administrative privileges. -If you want to install AiiDA onto you own personal workstation/laptop, it is recommanded to use the :ref:`system-wide installation `. - -.. important:: - - This installation method installs **all** software into a conda environment, including PostgreSQL and RabbitMQ. - See the :ref:`system-wide installation ` to use Conda only to install the AiiDA (core) Python package. - -.. grid:: 1 - :gutter: 3 - - .. grid-item-card:: Install prerequisite services + AiiDA (core) - - *Install the aiida-core package and all required services in a Conda environment.* - - #. We strongly recommend using ``mamba`` instead of the default ``conda`` (or environment resolution may time out). - Consider using `Mambaforge `_ when starting from scratch, or ``conda install -c conda-forge mamba``. - - #. Open a terminal and execute: - - .. code-block:: console - - $ mamba create -n aiida -c conda-forge aiida-core aiida-core.services - $ mamba activate aiida - - .. grid-item-card:: Start-up services and initialize data storage - - Before working with AiiDA, you must first initialize a database storage area on disk. - - .. code-block:: console - - (aiida) $ initdb -D mylocal_db - - This *database cluster* (located inside a folder named ``mylocal_db``) may contain a collection of databases (one per profile) that is managed by a single running server process. - We start this process with: - - .. code-block:: console - - (aiida) $ pg_ctl -D mylocal_db -l logfile start - - .. tip:: - - The default port ``5432`` may already be in use by another process. - In this case, you can pass the ``-o "-F -p "`` option to the ``pg_ctl`` command, ```` being the desired port number. - Then for the ``psql`` command, you can pass the ``-p `` option. - - .. admonition:: Further Reading - :class: seealso title-icon-read-more - - - `Creating a Database Cluster `__. - - `Starting the Database Server `__. - - - - Then, start the RabbitMQ server: - - .. code-block:: console - - (aiida) $ rabbitmq-server -detached - - .. important:: - - The services started this way will use the default ports on the machine. - Conflicts may happen if there are more than one user running AiiDA this way on the same machine, or you already have the server running in a system-wide installation. - To get around this issue, you can explicitly define the ports to be used. - - .. grid-item-card:: Setup profile - - Next, set up an AiiDA configuration profile and related data storage, with the ``verdi quicksetup`` command. - - .. code-block:: console - - (aiida) $ verdi quicksetup - Info: enter "?" for help - Info: enter "!" to ignore the default and set no value - Profile name: me - Email Address (for sharing data): me@user.com - First name: my - Last name: name - Institution: where-i-work - - .. tip:: - - In case of non-default ports are used for the *database cluster* and the RabbitMQ server, you can pass them using ``--db-port`` and ``--broker-port`` options respectively. - - - .. admonition:: Is AiiDA unable to auto-detect the PostgreSQL setup? - :class: attention title-icon-troubleshoot - - If you get an error saying that AiiDA has trouble autodetecting the PostgreSQL setup, you will need to do the manual setup explained in the :ref:`troubleshooting section`. - - Once the profile is up and running, you can start the AiiDA daemon(s): - - .. code-block:: console - - (aiida) $ verdi daemon start 2 - - .. important:: - - The verdi daemon(s) must be restarted after a system reboot. - - .. tip:: - - Do not start more daemons then there are physical processors on your system. - - .. grid-item-card:: Check setup - - To check that everything is set up correctly, execute: - - .. code-block:: console - - (aiida) $ verdi status - ✓ version: AiiDA v2.0.0 - ✓ config: /path/to/.aiida - ✓ profile: default - ✓ storage: Storage for 'default' @ postgresql://username:***@localhost:5432/db_name / file:///path/to/repository - ✓ rabbitmq: Connected as amqp://127.0.0.1?heartbeat=600 - ✓ daemon: Daemon is running as PID 2809 since 2019-03-15 16:27:52 - - .. admonition:: Missing a checkmark or ecountered some other issue? - :class: attention title-icon-troubleshoot - - :ref:`See the troubleshooting section `. - - .. button-ref:: intro:get_started:next - :ref-type: ref - :expand: - :color: primary - :outline: - :class: sd-font-weight-bold - - What's next? - - .. grid-item-card:: Shut-down services - - After finishing with your aiida session, particularly if switching between profiles, you may wish to power down the daemon and the services: - - .. code-block:: console - - (aiida) $ verdi daemon stop - (aiida) $ pg_ctl -D mylocal_db stop - (aiida) $ rabbitmqctl stop - - .. grid-item-card:: Restart the services - - If you want to restart the services and the daemon: - - .. code-block:: console - - (aiida) $ pg_ctl -D mylocal_db start - (aiida) $ rabbitmq-server -detached - (aiida) $ verdi daemon start - - .. tip:: - - If different ports are used, you have to pass them here as well. diff --git a/docs/source/intro/install_system.rst b/docs/source/intro/install_system.rst deleted file mode 100644 index 48464cc981..0000000000 --- a/docs/source/intro/install_system.rst +++ /dev/null @@ -1,298 +0,0 @@ -.. _intro:get_started:system-wide-install: - -************************ -System-wide installation -************************ - -The system-wide installation will install the prerequisite services (PostgreSQL and RabbitMQ) via standard package managers such that their startup and shut-down is largely managed by the operating system. -The AiiDA (core) Python package is then installed either with Conda or pip. - -.. warning:: RabbitMQ v3.5 and below are EOL and not supported at all. For versions RabbitMQ v3.8.15 and up, AiiDA is not compatible with default server configurations. For details refer to the :ref:`dedicated troubleshooting section`. - -This is the *recommended* installation method to setup AiiDA on a personal laptop or workstation for the majority of users. - -.. grid:: 1 - :gutter: 3 - - .. grid-item-card:: Install prerequisite services - - AiiDA is designed to run on `Unix `_ operating systems and requires a `bash `_ or `zsh `_ shell, and Python >= 3.7. - - .. tab-set:: - - .. tab-item:: Ubuntu - - *AiiDA is tested on Ubuntu versions 16.04, 18.04, and 20.04.* - - Open a terminal and execute: - - .. code-block:: console - - $ sudo apt install git python3-dev python3-pip postgresql postgresql-server-dev-all postgresql-client rabbitmq-server - - .. tab-item:: MacOS X (Homebrew) - - The recommended installation method for Mac OS X is to use `Homebrew `__. - - #. Follow `this guide `__ to install Homebrew on your system if not installed yet. - - #. Open a terminal and execute: - - .. code-block:: console - - $ brew install postgresql rabbitmq git python - $ brew services start postgresql - $ brew services start rabbitmq - - .. tab-item:: Windows Subsystem for Linux - - *The following instructions are for setting up AiiDA on WSL 1/2 in combination with Ubuntu.* - - #. Installing RabbitMQ: - - * (WSL 1) Install and start the `Windows native RabbitMQ `_. - - * (WSL 2) Install RabbitMQ inside the the WSL: - - .. code-block:: console - - $ sudo apt install rabbitmq-server - - then start the ``rabbitmq`` server: - - .. code-block:: console - - $ sudo service rabbitmq-server start - - #. Install Python and PostgreSQL: - - .. code-block:: console - - $ sudo apt install postgresql postgresql-server-dev-all postgresql-client git python3-dev python-pip - - then start the PostgreSQL server: - - .. code-block:: console - - $ sudo service postgresql start - - .. dropdown:: How to setup WSL to automatically start services after system boot. - - Create a file ``start_aiida_services.sh`` containing the following lines: - - .. code-block:: console - - $ service postgresql start - $ service rabbitmq-server start # Only for WSL 2! - - and store it in your preferred location, e.g., the home directory. - Then make the file executable, and editable only by root users with: - - .. code-block:: console - - $ chmod a+x,go-w /path/to/start_aiida_services.sh - $ sudo chown root:root /path/to/start_aiida_services.sh - - Next, run - - .. code-block:: console - - $ sudo visudo - - and add the line - - .. code-block:: sh - - ALL=(root) NOPASSWD: /path/to/start_aiida_services.sh - - replacing ```` with your Ubuntu username. - This will allow you to run *only* this specific ``.sh`` file with ``root`` access (without password), without lowering security on the rest of your system. - - Now you can use the Windows Task Scheduler to automatically execute this file on startup: - - #. Open Task Scheduler. - - #. In the "Actions" menu, click "Create Task". - - #. In "General/Security options", select "Run whether user is logged on or not". - - #. In the "Triggers" tab, click "New...". - - #. In the "Begin the task:" dropdown, select "At startup". - - #. Click "OK" to confirm. - - #. In the "Actions" tab, click "New...". - - #. In the "Action" dropdown, select "Start a program". - - #. In the "Program/script" text field, add ``C:\Windows\System32\bash.exe``. - - #. In the "Add arguments (optional)" text field, add ``-c "sudo /path/to/start_aiida_services.sh"``. - - #. Click "OK" to confirm. - - #. Click "OK" to confirm the task. - - You can tweak other details of this task to fit your needs. - - .. tab-item:: Other - - #. Install RabbitMQ following the `instructions applicable to your system `__. - #. Install PostgreSQL following the `instructions applicable to your system `__. - - .. tip:: - - Alternatively use the :ref:`pure conda installation method `. - - .. grid-item-card:: Install AiiDA (core) - - .. tab-set:: - - .. tab-item:: pip + venv - - *Install the aiida-core package from PyPI into a virtual environment.* - - Open a terminal and execute: - - .. code-block:: console - - $ python -m venv ~/envs/aiida - $ source ~/envs/aiida/bin/activate - (aiida) $ pip install aiida-core - - .. important:: - - Make sure the ``python`` executable is for a Python version that is supported by AiiDA. - You can see the version using: - - .. code-block:: console - - $ python --version - - You can find the supported Python versions for the latest version of AiiDA `on the PyPI page `__. - - .. tip:: - - See the `venv documentation `__ if the activation command fails. - The exact command for activating a virtual environment differs slightly based on the used shell. - - .. dropdown:: :fa:`plus-circle` Installation extras - - There are additional optional packages that you may want to install, which are grouped in the following categories: - - * ``atomic_tools``: packages that allow importing and manipulating crystal structure from various formats - * ``ssh_kerberos``: adds support for ssh transport authentication through Kerberos - * ``REST``: allows a REST server to be ran locally to serve AiiDA data - * ``docs``: tools to build the documentation - * ``notebook``: jupyter notebook - to allow it to import AiiDA modules - * ``tests``: python modules required to run the automatic unit tests - * ``pre-commit``: pre-commit tools required for developers to enable automatic code linting and formatting - - In order to install any of these package groups, simply append them as a comma separated list in the ``pip`` install command, for example: - - .. code-block:: console - - (aiida) $ pip install aiida-core[atomic_tools,docs] - - .. dropdown:: :fa:`wrench` Kerberos on Ubuntu - - If you are installing the optional ``ssh_kerberos`` and you are on Ubuntu you might encounter an error related to the ``gss`` package. - To fix this you need to install the ``libffi-dev`` and ``libkrb5-dev`` packages: - - .. code-block:: console - - $ sudo apt-get install libffi-dev libkrb5-dev - - .. tab-item:: Conda - - *Install the aiida-core package in a Conda environment.* - - #. Make sure that conda is installed, e.g., by following `the instructions on installing Miniconda `__. - - #. Open a terminal and execute: - - .. code-block:: console - - $ conda create -yn aiida -c conda-forge aiida-core - $ conda activate aiida - - .. tab-item:: From source - - *Install the aiida-core package directly from the cloned repository.* - - Open a terminal and execute: - - .. code-block:: console - - $ git clone https://github.com/aiidateam/aiida-core.git - $ cd aiida-core/ - $ python -m venv ~/envs/aiida - $ source ~/envs/aiida/bin/activate - (aiida) $ pip install . - - .. grid-item-card:: Setup profile - - Next, set up an AiiDA configuration profile and related data storage, with the ``verdi quicksetup`` command. - - .. code-block:: console - - (aiida) $ verdi quicksetup - Info: enter "?" for help - Info: enter "!" to ignore the default and set no value - Profile name: me - Email Address (for sharing data): me@user.com - First name: my - Last name: name - Institution: where-i-work - - .. admonition:: Is AiiDA unable to auto-detect the PostgreSQL setup? - :class: attention title-icon-troubleshoot - - If you get an error saying that AiiDA has trouble autodetecting the PostgreSQL setup, you will need to do the manual setup explained in the :ref:`troubleshooting section`. - - .. grid-item-card:: Start verdi daemons - - Start the verdi daemon(s) that are used to run AiiDA workflows. - - .. code-block:: console - - (aiida) $ verdi daemon start 2 - - .. important:: - - The verdi daemon(s) must be restarted after a system reboot. - - .. tip:: - - Do not start more daemons then there are physical processors on your system. - - .. grid-item-card:: Check setup - - To check that everything is set up correctly, execute: - - .. code-block:: console - - (aiida) $ verdi status - ✓ version: AiiDA v2.0.0 - ✓ config: /path/to/.aiida - ✓ profile: default - ✓ storage: Storage for 'default' @ postgresql://username:***@localhost:5432/db_name / file:///path/to/repository - ✓ rabbitmq: Connected as amqp://127.0.0.1?heartbeat=600 - ✓ daemon: Daemon is running as PID 2809 since 2019-03-15 16:27:52 - - At this point you should now have a working AiiDA environment, from which you can add and retrieve data. - - .. admonition:: Missing a checkmark or encountered some other issue? - :class: attention title-icon-troubleshoot - - :ref:`See the troubleshooting section `. - - .. button-ref:: intro:get_started:next - :ref-type: ref - :expand: - :color: primary - :outline: - :class: sd-font-weight-bold - - What's next? diff --git a/docs/source/intro/installation.rst b/docs/source/intro/installation.rst deleted file mode 100644 index 479ba53666..0000000000 --- a/docs/source/intro/installation.rst +++ /dev/null @@ -1,295 +0,0 @@ -.. _intro:install: -.. _intro:advanced-config: - -********************** -Advanced configuration -********************** - -This chapter covers topics that go beyond the :ref:`standard setup of AiiDA `. -If you are new to AiiDA, we recommend you first go through the :ref:`Basic Tutorial `, -or see our :ref:`Next steps guide `. - -.. _intro:install:database: - -Creating the database ---------------------- - -AiiDA uses a database to store the nodes, node attributes and other information, allowing the end user to perform fast queries of the results. -Currently, the highly performant `PostgreSQL`_ database is supported as a database backend. - -.. _PostgreSQL: https://www.postgresql.org/downloads - -.. admonition:: Find out more about the database - :class: seealso title-icon-read-more - - - `Creating a Database Cluster `__. - - `Starting the Database Server `__. - - :ref:`The database topic `. - -To manually create the database for AiiDA, you need to run the program ``psql`` to interact with postgres. -On most operating systems, you need to do so as the ``postgres`` user that was created upon installing the software. -To assume the role of ``postgres`` run as root: - -.. code-block:: console - - $ su - postgres - -(or, equivalently, type ``sudo su - postgres``, depending on your distribution) and launch the postgres program: - -.. code-block:: console - - $ psql - -.. tip:: - - If you have installed PostgreSQL through Conda and you see an error like ``psql: FATAL: role "" does not exist`` or ``psql: FATAL: database "" does not exist``, the default role and database apparently do no exist. - The command ``psql -l`` prints the list of existing databases and the associated roles. - You can try connecting to one of those by using the ``-d`` and ``-U`` option to specify the database and role, respectively, for example, ``psql -d template0 -U some-role``. - -Create a new database user account for AiiDA by running: - -.. code-block:: sql - - CREATE USER aiida WITH PASSWORD ''; - -replacing ```` with a password of your choice. - -You will need to provide the password again when you configure AiiDA to use this database through ``verdi setup``. -If you want to change the password you just created use the command: - -.. code-block:: sql - - ALTER USER aiida PASSWORD ''; - -Next, we create the database itself. We enforce the UTF-8 encoding and specific locales: - -.. code-block:: sql - - CREATE DATABASE aiidadb OWNER aiida ENCODING 'UTF8' LC_COLLATE='en_US.UTF-8' LC_CTYPE='en_US.UTF-8' TEMPLATE=template0; - -and grant all privileges on this DB to the previously-created ``aiida`` user: - -.. code-block:: sql - - GRANT ALL PRIVILEGES ON DATABASE aiidadb to aiida; - -You have now created a database for AiiDA and you can close the postgres shell by typing ``\q``. -To test if the database was created successfully, you can run the following command as a regular user in a bash terminal: - -.. code-block:: console - - $ psql -h localhost -d aiidadb -U aiida -W - -and type the password you inserted before, when prompted. -If everything worked well, you should get no error and see the prompt of the ``psql`` shell. - -If you use the same names as in the example commands above, then during the ``verdi setup`` phase the following parameters will apply to the newly created database: - -.. code-block:: console - - $ Database engine: postgresql_psycopg2 - $ Database host: localhost - $ Database port: 5432 - $ AiiDA Database name: aiidadb - $ AiiDA Database user: aiida - $ AiiDA Database password: - -.. admonition:: Don't forget to backup your database! - :class: tip title-icon-tip - - See the :ref:`Database backup how-to `), and :ref:`how to move your database `. - -Database setup using 'peer' authentication ------------------------------------------- - -On Ubuntu Linux, the default PostgreSQL setup is configured to use ``peer`` authentication, which allows password-less login via local Unix sockets. -In this mode, PostgreSQL compares the Unix user connecting to the socket with its own database of users and allows a connection if a matching user exists. - -.. note:: - - This is an alternative route to set up your database - the standard approach will work on Ubuntu just as well. - -Below we are going to take advantage of the command-line utilities shipped on Ubuntu to simplify creating users and databases compared to issuing the SQL commands directly. - -Assume the role of ``postgres``: - -.. code-block:: console - - $ sudo su postgres - -Create a database user with the **same name** as the UNIX user who will be running AiiDA (usually your login name): - -.. code-block:: console - - $ createuser - -replacing ```` with your username. - -Next, create the database itself with your user as the owner: - -.. code-block:: console - - $ createdb -O aiidadb - -Exit the shell to go back to your login user. -To test if the database was created successfully, try: - -.. code-block:: console - - $ psql aiidadb - -During the ``verdi setup`` phase, use ``!`` to leave host empty and specify your Unix user name as the *AiiDA Database user*.: - -.. code-block:: console - - $ Database engine: postgresql_psycopg2 - $ Database host: ! - $ Database port: 5432 - $ AiiDA Database name: aiidadb - $ AiiDA Database user: - $ AiiDA Database password: "" - - -RabbitMQ configuration ----------------------- - -In most normal setups, RabbitMQ will be installed and run as a service on the same machine that hosts AiiDA itself. -In that case, using the default configuration proposed during a profile setup will work just fine. -However, when the installation of RabbitMQ is not standard, for example it runs on a different port, or even runs on a completely different machine, all relevant connection details can be configured with ``verdi setup``. - -The following parameters can be configured: - -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ -| Parameter | Option | Default | Explanation | -+==============+===========================+===============+=========================================================================================================================+ -| Protocol | ``--broker-protocol`` | ``amqp`` | The protocol to use, can be either ``amqp`` or ``amqps`` for SSL enabled connections. | -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ -| Username | ``--broker-username`` | ``guest`` | The username with which to connect. The ``guest`` account is available and usable with a default RabbitMQ installation. | -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ -| Password | ``--broker-password`` | ``guest`` | The password with which to connect. The ``guest`` account is available and usable with a default RabbitMQ installation. | -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ -| Host | ``--broker-host`` | ``127.0.0.1`` | The hostname of the RabbitMQ server. | -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ -| Port | ``--broker-port`` | ``5672`` | The port to which the server listens. | -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ -| Virtual host | ``--broker-virtual-host`` | ``''`` | Optional virtual host. Should not contain the leading forward slash, this will be added automatically by AiiDA. | -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ -| Parameters | not available | n.a. | These are additional broker parameters that are typically encoded as URL parameters, for example, to specify SSL | -| | | | parameters such as the filepath to the certificate that is to be used. The parameters are currently not definable | -| | | | through the CLI but have to be added manually in the ``config.json``. A key ``broker_parameters`` should be added that | -| | | | is a dictionary, which can contain fields: ``cafile``, ``capath``, ``cadata``, ``certfile``, ``keyfile`` and | -| | | | ``no_verify_ssl``. | -+--------------+---------------------------+---------------+-------------------------------------------------------------------------------------------------------------------------+ - - -.. _intro:install:verdi_setup: - -verdi setup ------------ - -After the database has been created, do: - -.. code-block:: console - - $ verdi setup --profile - -where `` is a profile name of your choosing. -The ``verdi setup`` command will guide you through the setup process through a series of prompts. - -The first information asked is your email, which will be used to associate the calculations to you. -In AiiDA, the email is your username, and acts as a unique identifier when importing/exporting data from AiiDA. - -.. note:: - - The password, in the current version of AiiDA, is not used (it will be used only in the REST API and in the web interface). - If you leave the field empty, no password will be set and no access will be granted to the user via the REST API and the web interface. - -Then, the following prompts will help you configure the database. Typical settings are: - -.. code-block:: console - - $ Default user email: richard.wagner@leipzig.de - $ Database engine: postgresql_psycopg2 - $ PostgreSQL host: localhost - $ PostgreSQL port: 5432 - $ AiiDA Database name: aiida_dev - $ AiiDA Database user: aiida - $ AiiDA Database password: - $ AiiDA repository directory: /home/wagner/.aiida/repository/ - [...] - Configuring a new user with email 'richard.wagner@leipzig.de' - $ First name: Richard - $ Last name: Wagner - $ Institution: BRUHL, LEIPZIG - $ The user has no password, do you want to set one? [y/N] y - $ Insert the new password: - $ Insert the new password (again): - -.. admonition:: Don't forget to backup your data! - :class: tip title-icon-tip - - See the :ref:`installation backup how-to `. - -.. _intro:install:start_daemon: - -Managing the daemon -------------------- - -The AiiDA daemon process runs in the background and takes care of processing your submitted calculations and workflows, checking their status, retrieving their results once they are finished and storing them in the AiiDA database. - -The AiiDA daemon is controlled using three simple commands: - -* ``verdi daemon start``: start the daemon -* ``verdi daemon status``: check the status of the daemon -* ``verdi daemon stop``: stop the daemon - -.. note:: - - While operational, the daemon logs its activity to a file in ``~/.aiida/daemon/log/`` (or, more generally, ``$AIIDA_PATH/.aiida/daemon/log``). - Get the latest log messages via ``verdi daemon logshow``. - -.. _intro:install:jupyter: - -Using AiiDA in Jupyter ----------------------- - - 1. Install the AiiDA ``notebook`` extra **inside** the AiiDA python environment, e.g. by running ``pip install aiida-core[notebook]``. - - -With this setup, you're ready to use AiiDA in Jupyter notebooks. - -Start a Jupyter notebook server: - -.. code-block:: console - - $ jupyter notebook - -This will open a tab in your browser. Click on ``New -> Python``. - -To load the `aiida` magics extension, simply run: - -.. code-block:: ipython - - %load_ext aiida - -Now you can load a profile (the default unless specified) by: - -.. code-block:: ipython - - %aiida - -After executing the cell by ``Shift-Enter``, you should receive the message "Loaded AiiDA DB environment." -Otherwise, you can load the profile manually as you would in a Python script: - -.. code-block:: python - - from aiida import load_profile, orm - load_profile() - qb = orm.QueryBuilder() - # ... - -You can also run `verdi` CLI commands, using the currently loaded profile, by: - -.. code-block:: ipython - - %verdi status diff --git a/docs/source/intro/run_docker.rst b/docs/source/intro/run_docker.rst deleted file mode 100644 index 227dd4b263..0000000000 --- a/docs/source/intro/run_docker.rst +++ /dev/null @@ -1,236 +0,0 @@ -.. _intro:get_started:docker: -.. _intro:install:docker: - -**************************** -Run AiiDA via a Docker image -**************************** - -The AiiDA team maintains a `Docker `__ image on `Docker Hub `__. -This image contains a fully pre-configured AiiDA environment which makes it particularly useful for learning and developing purposes. - -.. caution:: - - All data stored in a container will persist only over the lifetime of that particular container (i.e., removing the container will also purge the data) unless you use volumes to persist the data, see :ref:`Advanced usage ` for more details. - -.. grid:: 1 - :gutter: 3 - - .. grid-item-card:: Install Docker on your PC - - Docker is available for Windows, Mac and Linux and can be installed in different ways. - - .. tab-set:: - - .. tab-item:: Colima on MacOS - - `Colima `_ is a new open-source project that makes it easy to run Docker on MacOS. - It is a lightweight alternative to Docker Engine with a focus on simplicity and performance. - - Colima is the recommended way. - With colima, you can have multiple Docker environments running at the same time, each with its own Docker daemon and resource allocation thus avoiding conflicts. - - To install the colima, on MacOS run: - - .. code-block:: console - - $ brew install colima - - Or check Check `here `__ for other installation options. - - After installation, start the docker daemon by: - - .. code-block:: console - - $ colima start - - .. tab-item:: Docker CE on Linux - - The bare minimum to run Docker on Linux is to install the `Docker Engine `_. - If you don't need a graphical user interface, this is the recommended way to install Docker. - - .. note:: - - You will need `root` privileges to perform the `post-installation steps `_. - Otherwise, you will need to use `sudo` for every Docker command. - - - - .. grid-item-card:: Start/stop container and use AiiDA interactively - - Start the image with the `docker command line interface (docker CLI) `_. - - There are differnt tags available for the AiiDA image, the ``latest`` tag is the image with the most recent stable version of ``aiida-core`` installed in the container. - You can replace the ``latest`` tag with the ``aiida-core`` or services version you want to use, check the `Docker Hub `_ for available tags. - - .. tab-set:: - - .. tab-item:: Docker CLI - - Use the Docker CLI to run the AiiDA container. - - .. code-block:: console - - $ docker run -it --name aiida-container-demo aiidateam/aiida-core-with-services:latest bash - - The ``-it`` option is used to run the container in interactive mode and to allocate a pseudo-TTY. - You will be dropped into a bash shell inside the container. - - You can specify a name for the container with the ``--name`` option for easier reference later on. - For the quick test, you can also use the ``--rm`` option to remove the container when it exits. - In the following examples, we will use the name ``aiida-container-demo`` for the container. - - To exit and stop the container, type ``exit`` or press ``Ctrl+D``. - - Please note the ``run`` sub-command is used to both create and start a container. - In order to start a container which is already created, you should use ``start``, by running: - - .. code-block:: console - - $ docker start -i aiida-container-demo - - If you need another shell inside the container, run: - - .. code-block:: console - - $ docker exec -it aiida-container-demo bash - - By default, an AiiDA profile is automatically set up inside the container. - To disable this default profile being created, set the ``SETUP_DEFAULT_AIIDA_PROFILE`` environment variable to ``false``. - - The following environment variables can be set to configure the default AiiDA profile: - - * ``AIIDA_PROFILE_NAME``: the name of the profile to be created (default: ``default``) - * ``AIIDA_USER_EMAIL``: the email of the default user to be created (default: ``aiida@localhost``) - * ``AIIDA_USER_FIRST_NAME``: the first name of the default user to be created (default: ``Giuseppe``) - * ``AIIDA_USER_LAST_NAME``: the last name of the default user to be created (default: ``Verdi``) - * ``AIIDA_USER_INSTITUTION``: the institution of the default user to be created (default: ``Khedivial``) - * ``AIIDA_CONFIG_FILE``: the path to the AiiDA configuration file used for other profile configuration parameters (default: ``/aiida/assets/config-quick-setup.yaml``). - - These environment variables can be set when starting the container with the ``-e`` option. - - Please note that the ``AIIDA_CONFIG_FILE`` variable points to a path inside the container. - Therefore, if you want to use a custom configuration file, it needs to be mounted from the host path to the container path. - - .. grid-item-card:: Check setup - - The profile named ``default`` is created under the ``aiida`` user. - - To check the status of AiiDA environment setup, execute the following command inside the container shell: - - .. code-block:: console - - $ verdi status - ✓ config dir: /home/aiida/.aiida - ✓ profile: On profile default - ✓ repository: /home/aiida/.aiida/repository/default - ✓ postgres: Connected as aiida_qs_aiida_477d3dfc78a2042156110cb00ae3618f@localhost:5432 - ✓ rabbitmq: Connected as amqp://127.0.0.1?heartbeat=600 - ✓ daemon: Daemon is running as PID 1795 since 2020-05-20 02:54:00 - - -Advanced usage -============== - -.. _intro:install:docker:advanced_usage: - -Congratulations! You have a working AiiDA environment, and can start using it. - -If you use the Docker image for development or production, you will likely need additional settings such as clone the repository and install `aiida-core` in the editable mode to make it work as expected. -See `development wiki `_ for more details. - -.. dropdown:: Copy files from your computer to the container - - .. tab-set:: - - .. tab-item:: Docker CLI - - Use the ``docker cp`` command if you need to copy files from your computer to the container or vice versa. - - For example, to copy a file named ``test.txt`` from your current working directory to the ``/home/aiida`` path in the container, run: - - .. code-block:: console - - $ docker cp test.txt aiida-container-demo:/home/aiida - - -.. dropdown:: Persist data across different containers - - The lifetime of the data stored in a container is limited to the lifetime of that particular container. - - If you stop the container (``docker stop`` or simply ``Ctrl+D`` from the container) and start it again, any data you created will persist. - However, if you remove the container, **all data will be removed as well**. - - .. code-block:: console - - $ docker rm aiida-container-demo - - The preferred way to persistently store data across Docker containers is to `create a volume `__. - - .. tab-set:: - - .. tab-item:: Docker CLI - - To create a simple volume, run: - - .. code-block:: console - - $ docker volume create container-home-data - - In this case, one needs to specifically mount the volume very first time that the container is being created: - - .. code-block:: console - - $ docker run -it --name aiida-container-demo -v container-home-data:/home/aiida aiidateam/aiida-core-with-services:latest bash - - Starting the container with the above command ensures that any data stored in the ``/home/aiida`` path within the container is stored in the ``container-home-data`` volume and therefore persists even if the container is removed. - - When installing packages with pip, use the ``--user`` flag to store the Python packages installed in the mounted volume (if you mount the home specifically to a volume as mentioned above) permanently. - The packages will be installed in the ``/home/aiida/.local`` directory of the container, which is mounted on the ``container-home-data`` volume. - - You can also mount a folder in container to a local directory, please refer to the `Docker documentation `__ for more information. - -.. dropdown:: Backup the container - - To backup the data of AiiDA, you can follow the instructions in the `Backup and restore `__ section. - However, Docker provides a convenient way to backup the container data by taking a snapshot of the entire container or the mounted volume(s). - - The following is adapted from the `Docker documentation `__. - - If you don't have a volume mounted to the container, you can backup the whole container by committing the container to an image: - - .. code-block:: console - - $ docker container commit aiida-container-demo aiida-container-backup - - The above command will create a new image named ``aiida-container-backup`` containing all the data and modifications you made in the container. - - Then, you can export the container to a local tarball and store it permanently: - - .. code-block:: console - - $ docker save -o aiida-container-backup.tar aiida-container-backup - - To restore the container, pull the image, or load from the tarball, run: - - .. code-block:: console - - $ docker load -i aiida-container-backup.tar - - You'll find a container in the list and you can then start it with ``docker start``. - - If you used a `named volume `__, you can backup the volume independently. - - .. tab-set:: - - .. tab-item:: Docker CLI - - Please check `Backup, restore, or migrate data volumes `__ for more information. - -.. button-ref:: intro:get_started:next - :ref-type: ref - :expand: - :color: primary - :outline: - :class: sd-font-weight-bold - - What's next? diff --git a/docs/source/redirects.txt b/docs/source/redirects.txt index 855d62ec73..50f8ed2029 100644 --- a/docs/source/redirects.txt +++ b/docs/source/redirects.txt @@ -1,18 +1,18 @@ developer_guide/core/transport.rst topics/transport.rst developer_guide/core/extend_restapi.rst internals/rest_api.rst -get_started/index.rst intro/get_started.rst +get_started/index.rst installation/index.rst get_started/computers.rst howto/run_codes.rst get_started/codes.rst howto/run_codes.rst howto/plugins.rst howto/plugins_develop.rst howto/exploring.rst howto/query.rst import_export/main.rst internals/storage/sqlite_zip.rst internals/data_storage.rst internals/storage/sqlite_zip.rst -install/quick_installation.rst intro/get_started.rst -install/prerequisites.rst intro/get_started.rst -install/installation.rst intro/get_started.rst +install/quick_installation.rst installation/index.rst +install/prerequisites.rst installation/index.rst +install/installation.rst installation/index.rst install/configuration.rst howto/installation.rst install/updating_installation.rst howto/installation.rst -install/troubleshooting.rst intro/troubleshooting.rst +install/troubleshooting.rst installation/troubleshooting.rst restapi/index.rst reference/rest_api.rst verdi/verdi_user_guide.rst topics/cli.rst working_with_aiida/index.rst howto/index.rst diff --git a/docs/source/reference/cheatsheet.rst b/docs/source/reference/cheatsheet.rst new file mode 100644 index 0000000000..7f636c882e --- /dev/null +++ b/docs/source/reference/cheatsheet.rst @@ -0,0 +1,16 @@ +.. _reference:cheatsheet: + +================= +AiiDA cheat sheet +================= + +The AiiDA cheat sheet gives a broad overview of the most commonly used ``verdi`` commands, the inheritance hierarchy of the main AiiDA classes, their attributes and methods, as well as a showcase of the ``QueryBuilder``. + +When clicking on the embedded image, the pdf version will be opened in the browser. Where applicable, text elements contain hyperlinks to the relevant sections of the documentation. + +The file can also be :download:`downloaded ` in two-page layout for printing. + +Happy exploring! + +.. image:: ./cheatsheet/cheatsheet.png + :target: ../_static/cheatsheet_h.pdf diff --git a/docs/source/intro/_cheatsheet/cheatsheet.png b/docs/source/reference/cheatsheet/cheatsheet.png similarity index 100% rename from docs/source/intro/_cheatsheet/cheatsheet.png rename to docs/source/reference/cheatsheet/cheatsheet.png diff --git a/docs/source/intro/_cheatsheet/cheatsheet.svg b/docs/source/reference/cheatsheet/cheatsheet.svg similarity index 100% rename from docs/source/intro/_cheatsheet/cheatsheet.svg rename to docs/source/reference/cheatsheet/cheatsheet.svg diff --git a/docs/source/intro/_cheatsheet/cheatsheet_v.pdf b/docs/source/reference/cheatsheet/cheatsheet_v.pdf similarity index 100% rename from docs/source/intro/_cheatsheet/cheatsheet_v.pdf rename to docs/source/reference/cheatsheet/cheatsheet_v.pdf diff --git a/docs/source/reference/index.rst b/docs/source/reference/index.rst index f0814e41bc..8553b84363 100644 --- a/docs/source/reference/index.rst +++ b/docs/source/reference/index.rst @@ -8,4 +8,5 @@ Reference command_line api/index rest_api + cheatsheet _changelog.md diff --git a/docs/source/topics/storage.rst b/docs/source/topics/storage.rst index 59d6761360..0d3b68d656 100644 --- a/docs/source/topics/storage.rst +++ b/docs/source/topics/storage.rst @@ -122,9 +122,9 @@ The command requires the PostgreSQL database to already exist and to be able to .. tip:: - Try the ``verdi quicksetup`` command to have the PostgreSQL database automatically created. + Try the ``verdi presto --use-postgres`` command to have the PostgreSQL database automatically created. Certain systems require root access to do so, causing the command to fail if it cannot obtain root access. - In this case, the database should be created manually (see :ref:`intro:install:database` for details). + In this case, the database should be created manually (see :ref:`installation:guide-complete:create-profile:core-psql-dos` for details). Once created, a profile can be created using the database with the command ``verdi profile setup core.psql_dos``. diff --git a/docs/source/tutorials/basic.md b/docs/source/tutorials/basic.md index 05bede4d50..f446cf2169 100644 --- a/docs/source/tutorials/basic.md +++ b/docs/source/tutorials/basic.md @@ -31,7 +31,7 @@ At the end of this tutorial, you will know how to: :::{important} If you are working on your own machine, note that the tutorial assumes that you have a working AiiDA installation and have set up your AiiDA profile in the current Python environment. -If this is not the case, consult the {ref}`getting started page`. +If this is not the case, consult the {ref}`getting started page`. ::: :::{tip} From 737da386b8564bc78e2f4a7fd9e2ae312accf03c Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 28 Jun 2024 23:43:57 +0200 Subject: [PATCH 28/82] Daemon: Fix `DbLogHandler` not being configured (#6491) Processes run through the daemon would no longer have their log messages attached to the database. This would result in `verdi process report` returning nothing. The problem is that the `start_worker` function would call `configure_logging` without setting `with_orm=True` and so the `DbLogHandler` would essentially be undone. An integration test is added as a regression test. --- src/aiida/engine/daemon/worker.py | 2 +- src/aiida/workflows/arithmetic/multiply_add.py | 2 +- tests/engine/daemon/test_worker.py | 16 ++++++++++++++++ tests/tools/dumping/test_processes.py | 2 +- 4 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/aiida/engine/daemon/worker.py b/src/aiida/engine/daemon/worker.py index 172155f078..913e44d9b7 100644 --- a/src/aiida/engine/daemon/worker.py +++ b/src/aiida/engine/daemon/worker.py @@ -44,7 +44,7 @@ def start_daemon_worker(foreground: bool = False) -> None: write to the daemon log file. """ daemon_client = get_daemon_client() - configure_logging(daemon=not foreground, daemon_log_file=daemon_client.daemon_log_file) + configure_logging(with_orm=True, daemon=not foreground, daemon_log_file=daemon_client.daemon_log_file) LOGGER.debug(f'sys.executable: {sys.executable}') LOGGER.debug(f'sys.path: {sys.path}') diff --git a/src/aiida/workflows/arithmetic/multiply_add.py b/src/aiida/workflows/arithmetic/multiply_add.py index c4f2e1eeda..b1d29b9aae 100644 --- a/src/aiida/workflows/arithmetic/multiply_add.py +++ b/src/aiida/workflows/arithmetic/multiply_add.py @@ -49,7 +49,7 @@ def add(self): """Add two numbers using the `ArithmeticAddCalculation` calculation job plugin.""" inputs = {'x': self.ctx.product, 'y': self.inputs.z, 'code': self.inputs.code} future = self.submit(ArithmeticAddCalculation, **inputs) - + self.report(f'Submitted the `ArithmeticAddCalculation`: {future}') return ToContext(addition=future) def validate_result(self): diff --git a/tests/engine/daemon/test_worker.py b/tests/engine/daemon/test_worker.py index 6b923e403a..f8807fccab 100644 --- a/tests/engine/daemon/test_worker.py +++ b/tests/engine/daemon/test_worker.py @@ -10,6 +10,8 @@ import pytest from aiida.engine.daemon.worker import shutdown_worker +from aiida.orm import Log +from aiida.workflows.arithmetic.multiply_add import MultiplyAddWorkChain @pytest.mark.requires_rmq @@ -24,3 +26,17 @@ async def test_shutdown_worker(manager): finally: # Reset the runner of the manager, because once closed it cannot be reused by other tests. manager._runner = None + + +@pytest.mark.usefixtures('aiida_profile_clean', 'started_daemon_client') +def test_logging_configuration(aiida_code_installed, submit_and_await): + """Integration test to verify that the daemon has the logging properly configured including the ``DbLogHandler``. + + This is a regression test to make sure that the ``DbLogHandler`` is properly configured for daemon workers, which + ensures that log messages are written to the log table in the database for the corresponding node. + """ + code = aiida_code_installed('add') + node = submit_and_await(MultiplyAddWorkChain, x=1, y=2, z=3, code=code) + logs = Log.collection.get_logs_for(node) + assert len(logs) == 1 + assert 'Submitted the `ArithmeticAddCalculation`' in next(log.message for log in logs) diff --git a/tests/tools/dumping/test_processes.py b/tests/tools/dumping/test_processes.py index 371dcb80a9..aab1a48abb 100644 --- a/tests/tools/dumping/test_processes.py +++ b/tests/tools/dumping/test_processes.py @@ -465,4 +465,4 @@ def test_generate_parent_readme(tmp_path, generate_workchain_multiply_add): # Check for outputs of `verdi process status/report/show` assert 'Finished [0] [3:result]' in contents assert 'Property Value' in contents - assert 'No log messages' in contents + assert 'There are 1 log messages for this calculation' in contents From 04926fe20da15065f8f086f1ff3cb14cc163aa08 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Sat, 29 Jun 2024 09:20:33 +0200 Subject: [PATCH 29/82] Engine: Catch `NotImplementedError`in `get_process_state_change_timestamp` (#6489) The `get_process_state_change_timestamp` utility calls the method `get_global_variable` on the storage backend to get the timestamp of the latest process state change, which is typically stored in the `db_dbsetting` table. However, not all storage plugins implement, most notably the `core.sqlite_zip` plugin. Since this is read-only, the settings table is never used and requesting the timestamp of the last process state change does not make sense. Since this utility is used in `verdi process list`, the command would error since the `NotImplementedError` was not caught. This is now the case and `verdi process list` will show "never" as the last state change. --- src/aiida/engine/utils.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/aiida/engine/utils.py b/src/aiida/engine/utils.py index 44b8319ddc..888089dc64 100644 --- a/src/aiida/engine/utils.py +++ b/src/aiida/engine/utils.py @@ -316,9 +316,13 @@ def get_process_state_change_timestamp(process_type: Optional[str] = None) -> Op for process_type_key in process_types: key = PROCESS_STATE_CHANGE_KEY.format(process_type_key) try: - time_stamp = backend.get_global_variable(key) - if time_stamp is not None: - timestamps.append(datetime.fromisoformat(str(time_stamp))) + try: + timestamp = backend.get_global_variable(key) + except NotImplementedError: + pass + else: + if timestamp is not None: + timestamps.append(datetime.fromisoformat(str(timestamp))) except KeyError: continue From 1d104d06b95da36c71cab132c7b6fec52a005e18 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Sat, 29 Jun 2024 22:00:24 +0200 Subject: [PATCH 30/82] ORM: Cache the logger adapter for `ProcessNode` (#6492) The logger adapter was recreated each time the `logger` property of the `ProcessNode` was invoked. It is now created once in the `logger` property. The created logger adapter is assigned to the `_logger_adapter` attribute such that it can simply be returned at the next invocation. The initialization of the adapter cannot be done in the constructor as that route is not taken if an existing node is loaded from the database. Finally, the `logger` property only creates and returns the adapter when the node is stored. Otherwise it simply returns the base logger instance. This is because the logger adapter only works for stored nodes and if it were instantiated at the point when the node is unstored, it would not be regenerated once the node is stored, and so the `DbLogHandler` will never be able to persist log messages to the database. --- src/aiida/orm/nodes/process/process.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/aiida/orm/nodes/process/process.py b/src/aiida/orm/nodes/process/process.py index a1223a86fb..003aa231e4 100644 --- a/src/aiida/orm/nodes/process/process.py +++ b/src/aiida/orm/nodes/process/process.py @@ -251,7 +251,16 @@ def logger(self): """ from aiida.orm.utils.log import create_logger_adapter - return create_logger_adapter(self._logger, self) + # If the node is not yet stored, there is no point in creating the logger adapter yet, as the ``DbLogHandler`` + # it configures, is only triggered for stored nodes, otherwise it cannot link the log message to the node. + if not self.pk: + return self._logger + + # First time the property is called after the node is stored, create the logger adapter + if not hasattr(self, '_logger_adapter'): + self._logger_adapter = create_logger_adapter(self._logger, self) + + return self._logger_adapter @classmethod def recursive_merge(cls, left: dict[Any, Any], right: dict[Any, Any]) -> None: From 310ff1db77bc75b6cadedf77394b96af05456f43 Mon Sep 17 00:00:00 2001 From: Ali Khosravi Date: Sat, 29 Jun 2024 22:02:52 +0200 Subject: [PATCH 31/82] Docs: Clarify `Transport.copy` requires `recursive=True` if source is a directory (#6495) --- src/aiida/transports/plugins/ssh.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/aiida/transports/plugins/ssh.py b/src/aiida/transports/plugins/ssh.py index 7035290a61..c62f17a67b 100644 --- a/src/aiida/transports/plugins/ssh.py +++ b/src/aiida/transports/plugins/ssh.py @@ -1118,11 +1118,13 @@ def copy(self, remotesource, remotedestination, dereference=False, recursive=Tru Flags used: ``-r``: recursive copy; ``-f``: force, makes the command non interactive; ``-L`` follows symbolic links - :param remotesource: file to copy from + :param remotesource: file to copy from :param remotedestination: file to copy to :param dereference: if True, copy content instead of copying the symlinks only Default = False. - :param recursive: if True copy directories recursively, otherwise only copy the specified file(s) + :param recursive: if True copy directories recursively. + Note that if the `remotesource` is a directory, `recursive` should always be set to True. + Default = True. :type recursive: bool :raise OSError: if the cp execution failed. From a44e6433d197244589c12c49031abfb442ec809f Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Sun, 30 Jun 2024 08:38:14 +0200 Subject: [PATCH 32/82] Tests: Fix `tests.orm.nodes.test_node:test_delete_through_backend` (#6496) The test was failing with a `core.sqlite_dos` storage plugin for the test profile. The problem is that the last assert was checking that the logs for `data_two` were deleted because `data_two` itself had been deleted. However, since it was deleted, the ORM instance can no longer be used either, which was causing an exception. Instead, its pk should be recorded before deleting the node, and the final check should just use the pk directly. It is not quite clear why this test was not failing for the default `core.psql_dos` storage plugin that is used for tests. It should not be backend specific since both use SQLAlchemy for the ORM. --- tests/orm/nodes/test_node.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/tests/orm/nodes/test_node.py b/tests/orm/nodes/test_node.py index 6b757d995a..ddcc586b84 100644 --- a/tests/orm/nodes/test_node.py +++ b/tests/orm/nodes/test_node.py @@ -837,17 +837,13 @@ def test_tab_completable_properties(self): class TestNodeDelete: """Tests for deleting nodes.""" - # TODO: Why is this failing for SQLite?? - # sqlalchemy.orm.exc.ObjectDeletedError: Instance '' has been deleted, - # or its row is otherwise not present. - # https://github.com/aiidateam/aiida-core/issues/6436 - @pytest.mark.requires_psql def test_delete_through_backend(self): """Test deletion works correctly through the backend.""" backend = get_manager().get_profile_storage() data_one = Data().store() data_two = Data().store() + data_two_pk = data_two.pk calculation = CalculationNode() calculation.base.links.add_incoming(data_one, LinkType.INPUT_CALC, 'input_one') calculation.base.links.add_incoming(data_two, LinkType.INPUT_CALC, 'input_two') @@ -866,7 +862,7 @@ def test_delete_through_backend(self): assert len(Log.collection.get_logs_for(data_one)) == 1 assert Log.collection.get_logs_for(data_one)[0].pk == log_one.pk - assert len(Log.collection.get_logs_for(data_two)) == 0 + assert len(Log.collection.find({'dbnode_id': data_two_pk})) == 0 def test_delete_collection_logs(self): """Test deletion works correctly through objects collection.""" From 24cfbe27e7408b78fca8e6f69799ebad3659400b Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Sun, 30 Jun 2024 08:39:28 +0200 Subject: [PATCH 33/82] `QueryBuilder`: Remove implementation for `has_key` in SQLite storage (#6497) The SQLite based storage plugins implemented the `has_key` operator for the `QueryBuilder`, however, the implementation is incorrect. At the very least the negation operator does not work. Since this can silently return incorrect query results, it is best to remove the implementation and raise an `NotImplementedError`. The same is done for `contains` which was not yet implemented but also didn't yet raise an explicit exception. --- src/aiida/storage/sqlite_zip/orm.py | 13 ++++--------- tests/cmdline/commands/test_calcjob.py | 12 +++++------- tests/orm/test_querybuilder.py | 1 + tests/storage/sqlite/test_orm.py | 5 +++-- tests/test_nodes.py | 1 + 5 files changed, 14 insertions(+), 18 deletions(-) diff --git a/src/aiida/storage/sqlite_zip/orm.py b/src/aiida/storage/sqlite_zip/orm.py index 494a733afb..ad0412f006 100644 --- a/src/aiida/storage/sqlite_zip/orm.py +++ b/src/aiida/storage/sqlite_zip/orm.py @@ -284,17 +284,12 @@ def _cast_json_type(comparator: JSON.Comparator, value: Any) -> Tuple[ColumnElem type_filter, casted_entity = _cast_json_type(database_entity, value) return case((type_filter, casted_entity.ilike(value, escape='\\')), else_=False) - # if operator == 'contains': - # to-do, see: https://github.com/sqlalchemy/sqlalchemy/discussions/7836 + if operator == 'contains': + # to-do, see: https://github.com/sqlalchemy/sqlalchemy/discussions/7836 + raise NotImplementedError('The operator `contains` is not implemented for SQLite-based storage plugins.') if operator == 'has_key': - return case( - ( - func.json_type(database_entity) == 'object', - func.json_each(database_entity).table_valued('key', joins_implicitly=True).c.key == value, - ), - else_=False, - ) + raise NotImplementedError('The operator `has_key` is not implemented for SQLite-based storage plugins.') if operator == 'in': type_filter, casted_entity = _cast_json_type(database_entity, value[0]) diff --git a/tests/cmdline/commands/test_calcjob.py b/tests/cmdline/commands/test_calcjob.py index 0b42ac0096..9fa6467d7f 100644 --- a/tests/cmdline/commands/test_calcjob.py +++ b/tests/cmdline/commands/test_calcjob.py @@ -241,7 +241,10 @@ def test_calcjob_outputcat(self): retrieved.base.repository._repository.put_object_from_filelike(io.BytesIO(b'5\n'), 'aiida.out') retrieved.base.repository._update_repository_metadata() - def test_calcjob_cleanworkdir_basic(self, pytestconfig): + # This currently fails with sqlite backend since the filtering relies on the `has_key` filter which is not + # implemented in SQLite, see https://github.com/aiidateam/aiida-core/pull/6497 + @pytest.mark.requires_psql + def test_calcjob_cleanworkdir_basic(self): """Test verdi calcjob cleanworkdir""" # Specifying no filtering options and no explicit calcjobs should exit with non-zero status options = [] @@ -261,17 +264,12 @@ def test_calcjob_cleanworkdir_basic(self, pytestconfig): # The flag should have been set assert self.result_job.outputs.remote_folder.base.extras.get('cleaned') is True - # TODO: This currently fails with sqlite backend, - # since the filtering relies on the `has_key` filter which is not implemented in SQLite. - # https://github.com/aiidateam/aiida-core/issues/6256 - marker_opt = pytestconfig.getoption('-m') - if 'not requires_psql' in marker_opt or 'presto' in marker_opt: - pytest.xfail('Known sqlite backend failure') # Do it again should fail as the calcjob has been cleaned options = ['-f', str(self.result_job.uuid)] result = self.cli_runner.invoke(command.calcjob_cleanworkdir, options) assert result.exception is not None, result.output + @pytest.mark.requires_psql def test_calcjob_cleanworkdir_advanced(self): # Check applying both p and o filters for flag_p in ['-p', '--past-days']: diff --git a/tests/orm/test_querybuilder.py b/tests/orm/test_querybuilder.py index e39f20a7b9..862474bc76 100644 --- a/tests/orm/test_querybuilder.py +++ b/tests/orm/test_querybuilder.py @@ -1537,6 +1537,7 @@ def test_iterall_with_store_group(self): for pk, pk_clone in zip(pks, [e[1] for e in sorted(pks_clone)]): assert orm.load_node(pk) == orm.load_node(pk_clone) + @pytest.mark.requires_psql @pytest.mark.usefixtures('aiida_profile_clean') def test_iterall_persistence(self, manager): """Test that mutations made during ``QueryBuilder.iterall`` context are automatically committed and persisted. diff --git a/tests/storage/sqlite/test_orm.py b/tests/storage/sqlite/test_orm.py index 549c433511..21c75f1302 100644 --- a/tests/storage/sqlite/test_orm.py +++ b/tests/storage/sqlite/test_orm.py @@ -89,8 +89,9 @@ ({'attributes.integer': {'in': [5, 6, 7]}}, 0), ({'attributes.integer': {'in': [1, 2, 3]}}, 1), # object operators - ({'attributes.dict': {'has_key': 'k'}}, 0), - ({'attributes.dict': {'has_key': 'key1'}}, 1), + # Reenable when ``has_key`` operator is implemented, see https://github.com/aiidateam/aiida-core/issues/6498 + # ({'attributes.dict': {'has_key': 'k'}}, 0), + # ({'attributes.dict': {'has_key': 'key1'}}, 1), ), ids=json.dumps, ) diff --git a/tests/test_nodes.py b/tests/test_nodes.py index 6f64ab6d2d..bd971d37db 100644 --- a/tests/test_nodes.py +++ b/tests/test_nodes.py @@ -162,6 +162,7 @@ def init_profile(self, aiida_localhost): """Initialize the profile.""" self.computer = aiida_localhost + @pytest.mark.requires_psql def test_with_subclasses(self): from aiida.plugins import DataFactory From 297c3c9821b22c511e80b82e504064eff326130a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 07:46:55 +0200 Subject: [PATCH 34/82] Devops: Bump `docker/bake-action` from 4 to 5 (#6500) Updates `docker/bake-action` from 4 to 5 - [Release notes](https://github.com/docker/bake-action/releases) - [Commits](https://github.com/docker/bake-action/compare/v4...v5) --- .github/workflows/docker-build-test.yml | 2 +- .github/workflows/docker-build.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-build-test.yml b/.github/workflows/docker-build-test.yml index a30ce9de63..7c48b38493 100644 --- a/.github/workflows/docker-build-test.yml +++ b/.github/workflows/docker-build-test.yml @@ -41,7 +41,7 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Build images - uses: docker/bake-action@v4 + uses: docker/bake-action@v5 with: # Load to Docker engine for testing load: true diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index 6201b93776..b278ec8349 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -54,7 +54,7 @@ jobs: - name: Build and upload to ghcr.io 📤 id: build - uses: docker/bake-action@v4 + uses: docker/bake-action@v5 with: push: true workdir: .docker/ From 61ae1a55b94c50979b4e47bb7572e1d4c9b2391f Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Mon, 1 Jul 2024 11:12:25 +0200 Subject: [PATCH 35/82] Dependencies: Update the requirements files (#6501) --- requirements/requirements-py-3.10.txt | 17 +++++++---------- requirements/requirements-py-3.11.txt | 19 ++++++++----------- requirements/requirements-py-3.12.txt | 7 ++++--- requirements/requirements-py-3.9.txt | 21 +++++++++------------ 4 files changed, 28 insertions(+), 36 deletions(-) diff --git a/requirements/requirements-py-3.10.txt b/requirements/requirements-py-3.10.txt index 3955a57530..7bd1c23ce4 100644 --- a/requirements/requirements-py-3.10.txt +++ b/requirements/requirements-py-3.10.txt @@ -4,14 +4,15 @@ # # pip-compile --extra=atomic_tools --extra=docs --extra=notebook --extra=rest --extra=tests --no-annotate --output-file=requirements/requirements-py-3.10.txt pyproject.toml # +accessible-pygments==0.0.5 aiida-export-migration-tests==0.9.0 aio-pika==6.8.1 aiormq==3.3.1 alabaster==0.7.13 alembic==1.11.1 aniso8601==9.0.1 +annotated-types==0.7.0 anyio==3.7.0 -appnope==0.1.3 archive-path==0.4.2 argon2-cffi==21.3.0 argon2-cffi-bindings==21.2.0 @@ -34,7 +35,7 @@ click==8.1.3 click-spinner==0.1.10 comm==0.1.3 contourpy==1.1.0 -coverage==7.4.1 +coverage[toml]==7.4.1 cryptography==41.0.1 cycler==0.11.0 debugpy==1.6.7 @@ -44,7 +45,6 @@ deprecation==2.1.0 disk-objectstore==1.1.0 docstring-parser==0.15 docutils==0.20.1 -emmet-core==0.57.1 exceptiongroup==1.1.1 executing==1.2.0 fastjsonschema==2.17.1 @@ -66,6 +66,7 @@ ipywidgets==8.0.6 itsdangerous==2.1.2 jedi==0.18.2 jinja2==3.1.2 +joblib==1.4.2 jsonschema[format-nongpl]==3.2.0 jupyter==1.0.0 jupyter-cache==0.6.1 @@ -80,9 +81,8 @@ jupyterlab-widgets==3.0.7 kiwipy[rmq]==0.7.7 kiwisolver==1.4.4 latexcodec==2.0.1 -linkify-it-py==2.0.2 mako==1.2.4 -markdown-it-py[linkify,plugins]==3.0.0 +markdown-it-py==3.0.0 markupsafe==2.1.3 matplotlib==3.7.1 matplotlib-inline==0.1.6 @@ -91,7 +91,6 @@ mdurl==0.1.2 mistune==3.0.1 monty==2023.9.25 mpmath==1.3.0 -msgpack==1.0.5 multidict==6.0.4 myst-nb==1.0.0 myst-parser==2.0.0 @@ -133,6 +132,7 @@ pybtex==0.24.0 pycifrw==4.4.5 pycparser==2.21 pydantic==2.4.0 +pydantic-core==2.10.0 pydata-sphinx-theme==0.15.1 pygments==2.15.1 pymatgen==2023.9.25 @@ -147,7 +147,7 @@ pytest-benchmark==4.0.0 pytest-cov==4.1.0 pytest-datadir==1.4.1 pytest-regressions==2.4.2 -pytest-rerunfailures==12.0.0 +pytest-rerunfailures==12.0 pytest-timeout==2.2.0 python-dateutil==2.8.2 python-json-logger==2.0.7 @@ -161,7 +161,6 @@ qtpy==2.3.1 requests==2.31.0 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 -rich==13.4.2 ruamel-yaml==0.17.32 ruamel-yaml-clib==0.2.7 scipy==1.10.1 @@ -193,7 +192,6 @@ sympy==1.12 tabulate==0.9.0 tenacity==8.2.2 terminado==0.17.1 -textual==0.29.0 tinycss2==1.2.1 tomli==2.0.1 tornado==6.3.2 @@ -201,7 +199,6 @@ tqdm==4.65.0 traitlets==5.9.0 typing-extensions==4.6.3 tzdata==2023.3 -uc-micro-py==1.0.2 uncertainties==3.1.7 upf-to-json==0.9.5 urllib3==2.0.3 diff --git a/requirements/requirements-py-3.11.txt b/requirements/requirements-py-3.11.txt index feedaae17a..db6593c6ba 100644 --- a/requirements/requirements-py-3.11.txt +++ b/requirements/requirements-py-3.11.txt @@ -2,16 +2,17 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --extra=atomic_tools --extra=docs --extra=notebook --extra=rest --extra=tests --no-annotate --output-file=requirements/requirements-py-3.11.txt pyproject.toml +# pip-compile --extra=atomic_tools --extra=docs --extra=notebook --extra=rest --extra=tests --no-annotate --output-file=requirements/requirements-py-3.9.txt pyproject.toml # +accessible-pygments==0.0.5 aiida-export-migration-tests==0.9.0 aio-pika==6.8.1 aiormq==3.3.1 alabaster==0.7.13 alembic==1.11.1 aniso8601==9.0.1 +annotated-types==0.7.0 anyio==3.7.0 -appnope==0.1.3 archive-path==0.4.2 argon2-cffi==21.3.0 argon2-cffi-bindings==21.2.0 @@ -34,7 +35,7 @@ click==8.1.3 click-spinner==0.1.10 comm==0.1.3 contourpy==1.1.0 -coverage==7.4.1 +coverage[toml]==7.4.1 cryptography==41.0.1 cycler==0.11.0 debugpy==1.6.7 @@ -44,7 +45,6 @@ deprecation==2.1.0 disk-objectstore==1.1.0 docstring-parser==0.15 docutils==0.20.1 -emmet-core==0.57.1 executing==1.2.0 fastjsonschema==2.17.1 flask==2.3.2 @@ -65,6 +65,7 @@ ipywidgets==8.0.6 itsdangerous==2.1.2 jedi==0.18.2 jinja2==3.1.2 +joblib==1.4.2 jsonschema[format-nongpl]==3.2.0 jupyter==1.0.0 jupyter-cache==0.6.1 @@ -79,9 +80,8 @@ jupyterlab-widgets==3.0.7 kiwipy[rmq]==0.7.7 kiwisolver==1.4.4 latexcodec==2.0.1 -linkify-it-py==2.0.2 mako==1.2.4 -markdown-it-py[linkify,plugins]==3.0.0 +markdown-it-py==3.0.0 markupsafe==2.1.3 matplotlib==3.7.1 matplotlib-inline==0.1.6 @@ -90,7 +90,6 @@ mdurl==0.1.2 mistune==3.0.1 monty==2023.9.25 mpmath==1.3.0 -msgpack==1.0.5 multidict==6.0.4 myst-nb==1.0.0 myst-parser==2.0.0 @@ -132,6 +131,7 @@ pybtex==0.24.0 pycifrw==4.4.5 pycparser==2.21 pydantic==2.4.0 +pydantic-core==2.10.0 pydata-sphinx-theme==0.15.1 pygments==2.15.1 pymatgen==2023.9.25 @@ -146,7 +146,7 @@ pytest-benchmark==4.0.0 pytest-cov==4.1.0 pytest-datadir==1.4.1 pytest-regressions==2.4.2 -pytest-rerunfailures==12.0.0 +pytest-rerunfailures==12.0 pytest-timeout==2.2.0 python-dateutil==2.8.2 python-json-logger==2.0.7 @@ -160,7 +160,6 @@ qtpy==2.3.1 requests==2.31.0 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 -rich==13.4.2 ruamel-yaml==0.17.32 ruamel-yaml-clib==0.2.7 scipy==1.10.1 @@ -192,14 +191,12 @@ sympy==1.12 tabulate==0.9.0 tenacity==8.2.2 terminado==0.17.1 -textual==0.29.0 tinycss2==1.2.1 tornado==6.3.2 tqdm==4.65.0 traitlets==5.9.0 typing-extensions==4.6.3 tzdata==2023.3 -uc-micro-py==1.0.2 uncertainties==3.1.7 upf-to-json==0.9.5 urllib3==2.0.3 diff --git a/requirements/requirements-py-3.12.txt b/requirements/requirements-py-3.12.txt index 3246ddc471..78f4e3a8f5 100644 --- a/requirements/requirements-py-3.12.txt +++ b/requirements/requirements-py-3.12.txt @@ -11,6 +11,7 @@ aiormq==3.3.1 alabaster==0.7.13 alembic==1.12.0 aniso8601==9.0.1 +annotated-types==0.7.0 anyio==4.0.0 archive-path==0.4.2 argon2-cffi==23.1.0 @@ -34,7 +35,7 @@ click==8.1.7 click-spinner==0.1.10 comm==0.1.4 contourpy==1.1.1 -coverage==7.4.1 +coverage[toml]==7.4.1 cryptography==41.0.5 cycler==0.12.1 debugpy==1.8.0 @@ -130,6 +131,7 @@ pybtex==0.24.0 pycifrw==4.4.5 pycparser==2.21 pydantic==2.4.0 +pydantic-core==2.10.0 pydata-sphinx-theme==0.15.1 pygments==2.16.1 pymatgen==2023.10.11 @@ -144,7 +146,7 @@ pytest-benchmark==4.0.0 pytest-cov==4.1.0 pytest-datadir==1.5.0 pytest-regressions==2.5.0 -pytest-rerunfailures==12.0.0 +pytest-rerunfailures==12.0 pytest-timeout==2.2.0 python-dateutil==2.8.2 python-json-logger==2.0.7 @@ -184,7 +186,6 @@ sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.9 sphinxext-rediraffe==0.2.7 sqlalchemy==2.0.23 -sqlalchemy-utils==0.37.9 stack-data==0.6.3 sympy==1.12 tabulate==0.9.0 diff --git a/requirements/requirements-py-3.9.txt b/requirements/requirements-py-3.9.txt index 5b0d89b5bc..a576ca238d 100644 --- a/requirements/requirements-py-3.9.txt +++ b/requirements/requirements-py-3.9.txt @@ -4,14 +4,15 @@ # # pip-compile --extra=atomic_tools --extra=docs --extra=notebook --extra=rest --extra=tests --no-annotate --output-file=requirements/requirements-py-3.9.txt pyproject.toml # +accessible-pygments==0.0.5 aiida-export-migration-tests==0.9.0 aio-pika==6.8.1 aiormq==3.3.1 alabaster==0.7.13 alembic==1.11.1 aniso8601==9.0.1 +annotated-types==0.7.0 anyio==3.7.0 -appnope==0.1.3 archive-path==0.4.2 argon2-cffi==21.3.0 argon2-cffi-bindings==21.2.0 @@ -34,7 +35,7 @@ click==8.1.3 click-spinner==0.1.10 comm==0.1.3 contourpy==1.1.0 -coverage==7.4.1 +coverage[toml]==7.4.1 cryptography==41.0.1 cycler==0.11.0 debugpy==1.6.7 @@ -44,8 +45,7 @@ deprecation==2.1.0 disk-objectstore==1.1.0 docstring-parser==0.15 docutils==0.20.1 -emmet-core==0.57.1 -exceptiongroup==1.1.1 +exceptiongroup==1.2.1 executing==1.2.0 fastjsonschema==2.17.1 flask==2.3.2 @@ -59,7 +59,7 @@ greenlet==2.0.2 idna==3.4 imagesize==1.4.1 importlib-metadata==6.8.0 -importlib-resources==5.12.0 +importlib-resources==6.4.0 iniconfig==2.0.0 ipykernel==6.23.2 ipython==8.14.0 @@ -68,6 +68,7 @@ ipywidgets==8.0.6 itsdangerous==2.1.2 jedi==0.18.2 jinja2==3.1.2 +joblib==1.4.2 jsonschema[format-nongpl]==3.2.0 jupyter==1.0.0 jupyter-cache==0.6.1 @@ -82,9 +83,8 @@ jupyterlab-widgets==3.0.7 kiwipy[rmq]==0.7.7 kiwisolver==1.4.4 latexcodec==2.0.1 -linkify-it-py==2.0.2 mako==1.2.4 -markdown-it-py[linkify,plugins]==3.0.0 +markdown-it-py==3.0.0 markupsafe==2.1.3 matplotlib==3.7.1 matplotlib-inline==0.1.6 @@ -93,7 +93,6 @@ mdurl==0.1.2 mistune==3.0.1 monty==2023.9.25 mpmath==1.3.0 -msgpack==1.0.5 multidict==6.0.4 myst-nb==1.0.0 myst-parser==2.0.0 @@ -135,6 +134,7 @@ pybtex==0.24.0 pycifrw==4.4.5 pycparser==2.21 pydantic==2.4.0 +pydantic-core==2.10.0 pydata-sphinx-theme==0.15.1 pygments==2.15.1 pymatgen==2023.9.25 @@ -149,7 +149,7 @@ pytest-benchmark==4.0.0 pytest-cov==4.1.0 pytest-datadir==1.4.1 pytest-regressions==2.4.2 -pytest-rerunfailures==12.0.0 +pytest-rerunfailures==12.0 pytest-timeout==2.2.0 python-dateutil==2.8.2 python-json-logger==2.0.7 @@ -163,7 +163,6 @@ qtpy==2.3.1 requests==2.31.0 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 -rich==13.4.2 ruamel-yaml==0.17.32 ruamel-yaml-clib==0.2.7 scipy==1.10.1 @@ -195,7 +194,6 @@ sympy==1.12 tabulate==0.9.0 tenacity==8.2.2 terminado==0.17.1 -textual==0.29.0 tinycss2==1.2.1 tomli==2.0.1 tornado==6.3.2 @@ -203,7 +201,6 @@ tqdm==4.65.0 traitlets==5.9.0 typing-extensions==4.6.3 tzdata==2023.3 -uc-micro-py==1.0.2 uncertainties==3.1.7 upf-to-json==0.9.5 urllib3==2.0.3 From 076cd79bfb584872d6d1796d68b856ac8e5a5085 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Mon, 1 Jul 2024 12:03:55 +0200 Subject: [PATCH 36/82] Devops: Update pre-commit step in CD workflow to match CI The CI workflow updated the Python version and dependency requirements for the `pre-commit` job, however, did not apply the same changes to the CD workflow. This can cause differences in pre-commit causing the CI to pass but the CD to fail. --- .github/workflows/release.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4a3f0e8a19..80f7e35326 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -40,8 +40,9 @@ jobs: - name: Install aiida-core and pre-commit uses: ./.github/actions/install-aiida-core with: - python-version: '3.10' + python-version: '3.11' extras: '[pre-commit]' + from-requirements: 'false' - name: Run pre-commit run: pre-commit run --all-files || ( git status --short ; git diff ; exit 1 ) From 2ccfeeebb1fff157e87599855704b25b17d64f22 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Wed, 17 Apr 2024 23:32:57 +0200 Subject: [PATCH 37/82] Release `v2.6.0` --- CHANGELOG.md | 253 ++++++++++++++++++++++++++++++++++++++++++ docs/source/conf.py | 2 +- src/aiida/__init__.py | 2 +- 3 files changed, 255 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a51a1320b0..b40c27e1dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,258 @@ # Changelog +## v2.6.0 - 2024-07-01 + +This minor release comes with a number of features that are focused on user friendliness and ease-of-use of the CLI and the API. +The caching mechanism has received a number of improvements guaranteeing even greater savings of computational time. +For existing calculations to be valid cache sources in the new version, their hash has to be regenerated (see [Improvements and changes to caching](#improvements-and-changes-to-caching) for details). + +- [Making RabbitMQ optional](#making-rabbitmq-optional) +- [Simplifying profile setup](#simplifying-profile-setup) +- [Improved test fixtures without services](#improved-test-fixtures-without-services) +- [Improvements and changes to caching](#improvements-and-changes-to-caching) +- [Programmatic syntax for query builder filters and projections](#programmatic-syntax-for-query-builder-filters-and-projections) +- [Automated profile storage backups](#automated-profile-storage-backups) +- [Full list of changes](#full-list-of-changes) + - [Features](#features) + - [Performance](#performance) + - [Changes](#changes) + - [Fixes](#fixes) + - [Deprecations](#deprecations) + - [Dependencies](#dependencies) + - [Refactoring](#refactoring) + - [Documentation](#documentation) + - [Devops](#devops) + + +### Making RabbitMQ optional + +The RabbitMQ message broker service is now optional for running AiiDA. +The requirement was added in AiiDA v1.0 when the engine was completely overhauled. +Although it significantly improved the scaling and responsiveness, it also made it more difficult to start using AiiDA. +As of v2.6, profiles can now be configured without RabbitMQ, at the cost that the daemon can not be used and all processes have to be run locally. + +### Simplifying profile setup + +With the removal of RabbitMQ as a hard requirement, combined with storage plugins that replace PostgreSQL with the serverless SQLite that were introduced in v2.5, it is now possible to setup a profile that requires no services. +A new command is introduced, `verdi presto`, that automatically creates a profile with sensible defaults. +This now in principle makes it possible to run just the two following commands on any operating system: +``` +pip install aiida-core +verdi presto +``` +and get a working AiiDA installation that is ready to go. +As a bonus, it also configures the localhost as a `Computer`. +See the [documentation for more details](https://aiida.readthedocs.io/projects/aiida-core/en/v2.6.0/installation/guide_quick.html). + +### Improved test fixtures without services + +Up till now, running tests would always require a fully functional profile, which meant that PostgreSQL and RabbitMQ had to be available. +As described in the section above, it is now possible to set up a profile without these services. +This new feature is leveraged to provide a set of `pytest` fixtures that provide a test profile that can be used essentially on any system that just has AiiDA installed. +To start writing tests, simply create a `conftest.py` and import the fixtures with: +```python +pytest_plugins = 'aiida.tools.pytest_fixtures' +``` +The new fixtures include the `aiida_profile` fixture which is session-scoped and automatically loaded. +The fixture creates a temporary test profile at the start of the test session and automatically deletes it when the session ends. +For more information and an overview of all available fixtures, please refer to [the documentation on `pytest` fixtures](https://aiida.readthedocs.io/projects/aiida-core/en/v2.6.0/topics/plugins.html#plugin-test-fixtures). + +### Improvements and changes to caching + +A number of fixes and changes to the caching mechanism were introduced (see the [changes](#changes) subsection of the [full list of changes](#full-list-of-changes) for a more detailed overview). +For existing calculations to be valid cache sources in the new version, their hash has to be regenerated by running `verdi node rehash`. +Note that this can take a while for large databases. + +Since its introduction, the cache would essentially be reset each time AiiDA or any of the plugin packages would be updated, since the version of these packages were included in the calculation of the node hashes. +This was originally done out of precaution to err on the safe-side and limit the possibility of false-positives in cache hits. +However, this strategy has turned out to be unnecessarily cautious and severely limited the effectiveness of caching. + +The package version information is no longer included in the hash and therefore no longer impacts the caching. +This change does now make it possible for false positives if the implementation of a `CalcJob` or `Parser` plugin changes signficantly. +Therefore, a mechanism is introduced to give control to these plugins to effectively reset the cache of existing nodes. +Please refer to the [documentation on controlling caching](https://aiida.readthedocs.io/projects/aiida-core/en/v2.6.0/topics/provenance/caching.html#calculation-jobs-and-parsers) for more details. + +### Programmatic syntax for query builder filters and projections + +In the `QueryBuilder`, fields to filter on or project always had to be provided with strings: +```python +QueryBuilder().append(Node, filters={'label': 'some-label'}, project=['id', 'ctime']) +``` +and it is not always trivial to know what fields exist that _can_ be filtered on and can be projected. +In addition, there was a discrepancy for some fields, most notably the `pk` property, which had to be converted to `id` in the query builder syntax. + +These limitations have been solved as each class in AiiDA's ORM now defines the `fields` property, which allows to discover these fields programmatically. +The example above would convert to: +```python +QueryBuilder().append(Node, filters={Node.fields.label: 'some-label'}, project=[Node.fields.pk, Node.fields.ctime]) +``` +The `fields` property provides tab-completion allowing easy discovery of available fields for an ORM class in IDEs and interactive shells. +The fields also allow to express logical conditions programmatically and more. +For more details, please refer to the [documentation on programmatic field syntax](https://aiida.readthedocs.io/projects/aiida-core/en/v2.6.0/howto/query.html#programmatic-syntax-for-filters). + +Data plugins can also define custom fields, adding on top of the fields inherited from their base class(es). +The [documentation on data plugin fields](https://aiida.readthedocs.io/projects/aiida-core/en/v2.6.0/topics/data_types.html#fields) provides more information, but the API is currently in beta and guaranteed to be changed in an upcoming version. +It is therefore recommended for plugin developers to hold off making use of this new API. + +### Automated profile storage backups + +A generic mechanism has been implemented to allow easily backing up the data of a profile. +The command `verdi storage backup` automatically maintains a directory structure of previous backups allowing efficient incremental backups. +Note that the exact details of the backup mechanism is dependent on the storage plugin that is used by the profile and not all storage plugins necessarily implement it. +For now the storage plugins `core.psql_dos`, and `core.sqlite_dos` implement the functionality. +For more information, please refer [to the documentation](https://aiida.readthedocs.io/projects/aiida-core/en/v2.6.0/howto/installation.html#backing-up-your-installation). +Please refer to [this section of the documentation](https://aiida.readthedocs.io/projects/aiida-core/en/v2.6.0/howto/installation.html#restoring-data-from-a-backup) for instructions to restore from a backup. + +### Full list of changes + +#### Features +- `CalcJob`: Allow to define order of copying of input files [[6898ff4d8]](https://github.com/aiidateam/aiida-core/commit/6898ff4d8c263cf08707c61411a005f6a7f731dd) +- `SqliteDosStorage`: Implement the backup functionality [[18e447c77]](https://github.com/aiidateam/aiida-core/commit/18e447c77f48a18f361e458186cd87b2355aea75) +- `SshTransport`: Return `FileNotFoundError` if destination parent does not exist [[d86bb38bf]](https://github.com/aiidateam/aiida-core/commit/d86bb38bf9a0ced8029f8a4b895e1a6be1ccb339) +- Add improved more configurable versions of `pytest` fixtures [[e3a60460e]](https://github.com/aiidateam/aiida-core/commit/e3a60460ef1208a5c46ecd6af35d891a88ee784e) +- Add the `orm.Entity.fields` interface for `QueryBuilder` [[4b9abe2bd]](https://github.com/aiidateam/aiida-core/commit/4b9abe2bd0bb82449547a3377c2b6dbc7c174123) +- CLI: `verdi computer test` make unexpected output check optional [[589a3b2c0]](https://github.com/aiidateam/aiida-core/commit/589a3b2c03d44cebd26e88243ca34fcdb0e23ff4) +- CLI: `verdi node graph generate` root nodes as arguments [[06f8f4cfb]](https://github.com/aiidateam/aiida-core/commit/06f8f4cfb0731ff699d5c01ad85418b6db0f6778) +- CLI: Add `--most-recent-node` option to `verdi process watch` [[72692fa5c]](https://github.com/aiidateam/aiida-core/commit/72692fa5cb667e2a7462770af18b7cedeaf8b3f0) +- CLI: Add `--sort/--no-sort` to `verdi code export` [[80c606890]](https://github.com/aiidateam/aiida-core/commit/80c60689063f1517c3de91d86eef80f7852667e3) +- CLI: Add `verdi process dump` and the `ProcessDumper` [[6291accf0]](https://github.com/aiidateam/aiida-core/commit/6291accf0538eafe7426e89bc4c1e9eb90ce0385) +- CLI: Add RabbitMQ options to `verdi profile setup` [[f553f805e]](https://github.com/aiidateam/aiida-core/commit/f553f805e86d766da6208eb1682f7cf12c7907ac) +- CLI: Add the `-M/--most-recent-node` option [[5aae874aa]](https://github.com/aiidateam/aiida-core/commit/5aae874aaa44459ce8cf3ddd3bf1a82d8a2e8d37) +- CLI: Add the `verdi computer export` command [[9e3ebf6ea]](https://github.com/aiidateam/aiida-core/commit/9e3ebf6ea55d883c7857a1dbafe398b9579cca03) +- CLI: Add the `verdi node list` command [[cf091e80f]](https://github.com/aiidateam/aiida-core/commit/cf091e80ff2b6aa03f41b56ba1976abb97298972) +- CLI: Add the `verdi presto` command [[6b6e1520f]](https://github.com/aiidateam/aiida-core/commit/6b6e1520f2d3807e366dd672e7917f381ea7b524) +- CLI: Add the `verdi profile configure-rabbitmq` command [[202a3ece9]](https://github.com/aiidateam/aiida-core/commit/202a3ece9705289a1f12c85e64cf90307ca85c39) +- CLI: Allow `verdi computer delete` to delete associated nodes [[348777571]](https://github.com/aiidateam/aiida-core/commit/3487775711e7412fb2cb82600fb266316d6ce12a) +- CLI: Allow multiple root nodes in `verdi node graph generate` [[f16c432af]](https://github.com/aiidateam/aiida-core/commit/f16c432af107b1f9c01a12e03cbd0a9ecc2744ad) +- Engine: Allow `CalcJob` monitors to return outputs [[b7e59a0db]](https://github.com/aiidateam/aiida-core/commit/b7e59a0dbc0dd629be5c8178e98c70e7a2c116e9) +- Make `postgres_cluster` and `config_psql_dos` fixtures configurable [[35d7ca63b]](https://github.com/aiidateam/aiida-core/commit/35d7ca63b44f051a26d3f96d84e043919eb3f101) +- Process: Add the `metadata.disable_cache` input [[4626b11f8]](https://github.com/aiidateam/aiida-core/commit/4626b11f85cd0d95a17d8f5766a90b88ddddd689) +- Storage: Add backup mechanism to the interface [[bf79f23ee]](https://github.com/aiidateam/aiida-core/commit/bf79f23eef66d362a34aac170577ba8f5c2088ba) +- Transports: fix overwrite behaviour for `puttree`/`gettree` [[a55451703]](https://github.com/aiidateam/aiida-core/commit/a55451703aa8f8d330b25bc5da95d41caf0db9ac) + +#### Performance +- CLI: Speed up tab-completion by lazily importing `Config` [[9524cda0b]](https://github.com/aiidateam/aiida-core/commit/9524cda0b8c742fb5bf740d7b0035e326eace28f) +- Improve import time of `aiida.orm` and `aiida.storage` [[fb9b6cc3b]](https://github.com/aiidateam/aiida-core/commit/fb9b6cc3b3df244549fdd78576c34f6d9dfd4568) +- ORM: Cache the logger adapter for `ProcessNode` [[1d104d06b]](https://github.com/aiidateam/aiida-core/commit/1d104d06b95da36c71cab132c7b6fec52a005e18) + +#### Changes +- Caching: `NodeCaching._get_objects_to_hash` return type to `dict` [[c9c7c4bd8]](https://github.com/aiidateam/aiida-core/commit/c9c7c4bd8e1cd306271b5cf267095d3cbd8aafe2) +- Caching: Add `CACHE_VERSION` attribute to `CalcJob` and `Parser` [[39d0f312d]](https://github.com/aiidateam/aiida-core/commit/39d0f312d212a642d1537ca89e7622e48a23e701) +- Caching: Include the node's class in objects to hash [[68ce11161]](https://github.com/aiidateam/aiida-core/commit/68ce111610c40e3d9146e128c0a698fc60b6e5e5) +- Caching: Make `NodeCaching._get_object_to_hash` public [[e33000402]](https://github.com/aiidateam/aiida-core/commit/e330004024ad5121f9bc82cbe972cd283f25fec8) +- Caching: Remove core and plugin information from hash calculation [[4c60bbef8]](https://github.com/aiidateam/aiida-core/commit/4c60bbef852eef55a06b48b813d3fbcc8fb5a43f) +- Caching: Rename `get_hash` to `compute_hash` [[b544f7cf9]](https://github.com/aiidateam/aiida-core/commit/b544f7cf95a0e6e698224f36c1bea57d1cd99e7d) +- CLI: Always do hard reset in `verdi daemon restart` [[8ac642410]](https://github.com/aiidateam/aiida-core/commit/8ac6424108d1528bd3279c81da62dd44855b6ebc) +- CLI: Change `--profile` to `-p/--profile-name` for `verdi profile setup` [[8ea203cd9]](https://github.com/aiidateam/aiida-core/commit/8ea203cd9b1d2fbb4a3b38ba67beec97bb8c7145) +- CLI: Let `-v/--verbosity` only affect `aiida` and `verdi` loggers [[487c6bf04]](https://github.com/aiidateam/aiida-core/commit/487c6bf047030ee19deed49d5fbf9a093253538e) +- Engine: Set the `to_aiida_type` as default inport port serializer [[2fa7a5305]](https://github.com/aiidateam/aiida-core/commit/2fa7a530511a94ead83d79669efed71706a0a472) +- `QueryBuilder`: Remove implementation for `has_key` in SQLite storage [[24cfbe27e]](https://github.com/aiidateam/aiida-core/commit/24cfbe27e7408b78fca8e6f69799ebad3659400b) + +#### Fixes +- `BandsData`: Use f-strings in `_prepare_gnuplot` [[dba117437]](https://github.com/aiidateam/aiida-core/commit/dba117437782abc6d11f9ef208923f7e70f79ed2) +- `BaseRestartWorkChain`: Fix handler overrides used only first iteration [[65786a6bd]](https://github.com/aiidateam/aiida-core/commit/65786a6bda1c74dfb4aea90becd0664de6b1abde) +- `SlurmScheduler`: Make detailed job info fields dynamic [[4f9774a68]](https://github.com/aiidateam/aiida-core/commit/4f9774a689b81a446fac37ad8281b2d854eefa7a) +- `SqliteDosStorage`: Fix exception when importing archive [[af0c260bb]](https://github.com/aiidateam/aiida-core/commit/af0c260bb1c32c3b33c50175d790907774561b3e) +- `StructureData`: Fix the pbc constraints of `get_pymatgen_structure` [[adcce4bcd]](https://github.com/aiidateam/aiida-core/commit/adcce4bcd0b59c8371be73058a060bedcaba40f6) +- Archive: Automatically create nested output directories [[212f6163b]](https://github.com/aiidateam/aiida-core/commit/212f6163b03b8762509ae2230c30172af8c02fed) +- Archive: Respect `filter_size` in query for existing nodes [[ef60b66aa]](https://github.com/aiidateam/aiida-core/commit/ef60b66aa3ce76d654abe5e7caafef3f221defd0) +- CLI: Ensure deprecation warnings are printed before any prompts [[deb293d0e]](https://github.com/aiidateam/aiida-core/commit/deb293d0e6a566256fac5069881de4846d77f6d1) +- CLI: Fix `verdi archive create --dry-run` for empty file repository [[cc96c9d04]](https://github.com/aiidateam/aiida-core/commit/cc96c9d043c6616a068a5498f557fa21a728eb96) +- CLI: Fix `verdi plugin list` incorrectly not displaying description [[e952d7717]](https://github.com/aiidateam/aiida-core/commit/e952d7717c1d8001555e8d19f54f4fa349da6c6e) +- CLI: Fix `verdi process [show|report|status|watch|call-root]` no output [[a56a1389d]](https://github.com/aiidateam/aiida-core/commit/a56a1389dee5cb9ae70a5511d77aad248ea21731) +- CLI: Fix `verdi process list` if no available workers [[b44afcb3c]](https://github.com/aiidateam/aiida-core/commit/b44afcb3c1a7efa452d4e72aa6f8a615f652aaa4) +- CLI: Fix `verdi quicksetup` when profiles exist where storage is not `core.psql_dos` [[6cb91c181]](https://github.com/aiidateam/aiida-core/commit/6cb91c18163ac6228ed4a64c1c467dfd0398a624) +- CLI: Fix dry-run resulting in critical error in `verdi archive import` [[36991c6c8]](https://github.com/aiidateam/aiida-core/commit/36991c6c84f4ba0b4553e8cd6689bbc1815dbd35) +- CLI: Fix logging not showing in `verdi daemon worker` [[9bd8585bd]](https://github.com/aiidateam/aiida-core/commit/9bd8585bd5e7989e24646a0018710e86836e5a9f) +- CLI: Fix the `ctx.obj.profile` attribute not being initialized [[8a286f26e]](https://github.com/aiidateam/aiida-core/commit/8a286f26e8d303c498ac2eabd49be5f1f4ced9ef) +- CLI: Hide misleading message for `verdi archive create --test-run` [[7e42d7aa7]](https://github.com/aiidateam/aiida-core/commit/7e42d7aa7d16fa9e81cbd300ada14e4dea2426ce) +- CLI: Improve error message of `PathOrUrl` and `FileOrUrl` [[ffc6e4f70]](https://github.com/aiidateam/aiida-core/commit/ffc6e4f706277854dbd454d6f3164cec31e7819a) +- CLI: Only configure logging in `set_log_level` callback once [[66a2dcedd]](https://github.com/aiidateam/aiida-core/commit/66a2dcedd0a9428b5b2218b8c82bad9c9aff4956) +- CLI: Unify help of `verdi process` commands [[d91e0a58d]](https://github.com/aiidateam/aiida-core/commit/d91e0a58dabfd242b5f886d692c8761499a6719c) +- Config: Set existing user as default for read-only storages [[e66592509]](https://github.com/aiidateam/aiida-core/commit/e665925097bb3344fde4bcc66ee185a2d9207ac3) +- Config: Use UUID in `Manager.load_profile` to identify profile [[b01038bf1]](https://github.com/aiidateam/aiida-core/commit/b01038bf1fca7d33c4915aee904acea89a847614) +- Daemon: Log the worker's path and Python interpreter [[ae2094169]](https://github.com/aiidateam/aiida-core/commit/ae209416996ec361c474aeaf0fa06f49dd59f296) +- Docker: Start and stop daemon only when a profile exists [[0a5b20023]](https://github.com/aiidateam/aiida-core/commit/0a5b200236419d8caf8e05bb04ba80d03a438e03) +- Engine: Add positional inputs for `Process.submit` [[d1131fe94]](https://github.com/aiidateam/aiida-core/commit/d1131fe9450972080207db6e9615784490b3252b) +- Engine: Catch `NotImplementedError`in `get_process_state_change_timestamp` [[04926fe20]](https://github.com/aiidateam/aiida-core/commit/04926fe20da15065f8f086f1ff3cb14cc163aa08) +- Engine: Fix paused work chains not showing it in process status [[40b22d593]](https://github.com/aiidateam/aiida-core/commit/40b22d593875b97355996bbfc15e2850ad1f0495) +- Fix passwords containing `@` not being accepted for Postgres databases [[d14c14db2]](https://github.com/aiidateam/aiida-core/commit/d14c14db2f82d3a678e9747bd463ec1a61642120) +- ORM: Correct field type of `InstalledCode` and `PortableCode` models [[0079cc1e4]](https://github.com/aiidateam/aiida-core/commit/0079cc1e4b46c61edf2323b2d42af46367fe04b6) +- ORM: Fix `ProcessNode.get_builder_restart` [[0dee9d8ef]](https://github.com/aiidateam/aiida-core/commit/0dee9d8efba5c48615e8510f5ada706724b4a2e8) +- ORM: Fix deprecation warning being shown for new code types [[a9155713b]](https://github.com/aiidateam/aiida-core/commit/a9155713bbb10e57fe91cd320e2a12391d098a46) +- Runner: Close event loop in `Runner.close()` [[53cc45837]](https://github.com/aiidateam/aiida-core/commit/53cc458377685e54179eb1e1b73bb0383c8dae13) + +#### Deprecations +- CLI: Deprecate `verdi profile setdefault` and rename to `verdi profile set-default` [[ab48a4f62]](https://github.com/aiidateam/aiida-core/commit/ab48a4f627b4c9eec9133b5efa9fb888ce2c4914) +- CLI: Deprecate accepting `0` for `default_mpiprocs_per_machine` [[acec0c190]](https://github.com/aiidateam/aiida-core/commit/acec0c190cbb45ba267c6eb8ee7ceba18cf3302b) +- CLI: Deprecate the `deprecated_command` decorator [[4c11c0616]](https://github.com/aiidateam/aiida-core/commit/4c11c0616c583236119f838a1780a606c58b4ee2) +- CLI: Remove the deprecated `verdi database` command [[3dbde9e31]](https://github.com/aiidateam/aiida-core/commit/3dbde9e311781509b738202ad6f1de3bbd4b7a82) +- ORM: Undo deprecation of `Code.get_description` [[1b13014b1]](https://github.com/aiidateam/aiida-core/commit/1b13014b14274024dcb6bb0a721eb62665567987) + +### Dependencies +- Update `tabulate>=0.8.0,<0.10.0` [[6db2f4060]](https://github.com/aiidateam/aiida-core/commit/6db2f4060d4ece4552f5fe757c0f7d938810f4d1) + +#### Refactoring +- Abstract message broker functionality [[69389e038]](https://github.com/aiidateam/aiida-core/commit/69389e0387369d8437e1219487b88430b7b2e679) +- Config: Refactor `get_configuration_directory_from_envvar` [[65739f524]](https://github.com/aiidateam/aiida-core/commit/65739f52446087439ba93158eb948b58ed081ce5) +- Config: Refactor the `create_profile` function and method [[905e93444]](https://github.com/aiidateam/aiida-core/commit/905e93444cf996461e679cd458511d1c471a7e02) +- Engine: Refactor handling of `remote_folder` and `retrieved` outputs [[28adacaf8]](https://github.com/aiidateam/aiida-core/commit/28adacaf8ae21357bf6e5a2a48c43ed56d3bd78b) +- ORM: Switch to `pydantic` for code schema definition [[06189d528]](https://github.com/aiidateam/aiida-core/commit/06189d528c2362516f42e0d48840882812b97fe4) +- Replace deprecated `IOError` with `OSError` [[7f9129fd1]](https://github.com/aiidateam/aiida-core/commit/7f9129fd193374bdbeaa7ba4dd8c3cdf706db97d) +- Storage: Move profile locking to the abstract base class [[ea5f51bcb]](https://github.com/aiidateam/aiida-core/commit/ea5f51bcb6af172eb1a754df3981003bf7bad959) + +#### Documentation +- Add more instructions on how to use docker image [[aaf44afcc]](https://github.com/aiidateam/aiida-core/commit/aaf44afcce0f90fff2eb38bc47d28b4adf87db24) +- Add the updated cheat sheet [[09f9058a7]](https://github.com/aiidateam/aiida-core/commit/09f9058a7444f3ac1d3f243b608fa3f24f771f27) +- Add tips for common problems with conda PostgreSQL setup [[cd5313825]](https://github.com/aiidateam/aiida-core/commit/cd5313825afdb1771ca19d899567e4ed4774a2bc) +- Customize the color scheme through custom style sheet [[a6cf7fc7e]](https://github.com/aiidateam/aiida-core/commit/a6cf7fc7e02a48a7e3b9c4ba6ce5e2cd413e6b23) +- Docs: Clarify `Transport.copy` requires `recursive=True` if source is a directory [[310ff1db7]](https://github.com/aiidateam/aiida-core/commit/310ff1db77bc75b6cadedf77394b96af05456f43) +- Fix example of the `entry_points` fixture [[081fc5547]](https://github.com/aiidateam/aiida-core/commit/081fc5547370e1b5a19b1fb507681091c632bb7a) +- Fixing several small issues [[6a3a59b29]](https://github.com/aiidateam/aiida-core/commit/6a3a59b29ba64401828d9ab51dc123060868278b) +- Minor cheatsheet update for v2.6 release [[c3cc169c4]](https://github.com/aiidateam/aiida-core/commit/c3cc169c487a88e2357b7377e897f0521c23f05a) +- Reorganize the tutorial content [[5bd960efa]](https://github.com/aiidateam/aiida-core/commit/5bd960efae5a7f916b978a420f5f43501c9bc529) +- Rework the installation section [[0ee0a0c6a]](https://github.com/aiidateam/aiida-core/commit/0ee0a0c6ae13588e82edf1cf9e8cb9857c94c31b) +- Standardize usage of `versionadded` directive [[bf5dac848]](https://github.com/aiidateam/aiida-core/commit/bf5dac8484638d7ba5c492e91975b5fcc0cc9770) +- Update twitter logo [[5e4f60d83]](https://github.com/aiidateam/aiida-core/commit/5e4f60d83160774ca83defe4bf1f6c6381aaa1a0) +- Use uv installer in readthedocs build [[be0db3cc4]](https://github.com/aiidateam/aiida-core/commit/be0db3cc49506294ae1845b6e746e40cd76f39a9) + +#### Devops +- Add `check-jsonschema` pre-commit hook for GHA workflows [[14c5bb0f7]](https://github.com/aiidateam/aiida-core/commit/14c5bb0f764f0fd7933df205aa22d61c85ec0cf2) +- Add Dependabot config for maintaining GH actions [[0812f4b9e]](https://github.com/aiidateam/aiida-core/commit/0812f4b9eeffdff5a8c3d0802aea94c8919d9922) +- Add docker image `aiida-core-dev` for development [[6d0984109]](https://github.com/aiidateam/aiida-core/commit/6d0984109478ec1c0fd96dfd1d3f2b54e0b75dd2) +- Add Python 3.12 tox environment [[6b0d43960]](https://github.com/aiidateam/aiida-core/commit/6b0d4396068a43b6823eca8c78b9048044b0b4b8) +- Add the `slurm` service to nightly workflow [[5460a0414]](https://github.com/aiidateam/aiida-core/commit/5460a0414d55e3531eb86e6906ee963a6b712aae) +- Add typing to `aiida.common.hashing` [[ba21ba1d4]](https://github.com/aiidateam/aiida-core/commit/ba21ba1d40a76df73a2e27ce6f1a4f68aba7fb9a) +- Add workflow to build Docker images on PRs from forks [[23d2aa5ee]](https://github.com/aiidateam/aiida-core/commit/23d2aa5ee3c08438cfc4b4734e9670e19c090150) +- Address internal deprecation warnings [[ceed7d55d]](https://github.com/aiidateam/aiida-core/commit/ceed7d55dfb7df8dbe52c4557d145593d83f788a) +- Allow unit test suite to be ran against SQLite [[0dc8bbcb2]](https://github.com/aiidateam/aiida-core/commit/0dc8bbcb261b745683bc542c1aced2412ebd66a0) +- Bump the gha-dependencies group with 4 updates [[ccb56286c]](https://github.com/aiidateam/aiida-core/commit/ccb56286c40f6be0d61a0c62442993e43faf1ba6) +- Dependencies: Update the requirements files [[61ae1a55b]](https://github.com/aiidateam/aiida-core/commit/61ae1a55b94c50979b4e47bb7572e1d4c9b2391f) +- Disable code coverage in `test-install.yml` [[4cecda517]](https://github.com/aiidateam/aiida-core/commit/4cecda5177c456cee252c16295416c3842bb5d2d) +- Do not pin the mamba version [[82bba1307]](https://github.com/aiidateam/aiida-core/commit/82bba130792f6c965f0ede8b221eee70fd01d9f1) +- Fix Docker build not defining `REGISTRY` [[e7953fd4d]](https://github.com/aiidateam/aiida-core/commit/e7953fd4dd14875e380b125b99f86c12ce15359b) +- Fix publishing to DockerHub using incorrect secret name [[9c9ff7986]](https://github.com/aiidateam/aiida-core/commit/9c9ff79865225b125ba5f9fe23969d4c2c8fb9b2) +- Fix Slack notification for nightly tests [[082589f45]](https://github.com/aiidateam/aiida-core/commit/082589f456201fbd79d3df809e2cfc5fb5f27922) +- Fix the `test-install.yml` workflow [[22ea06362]](https://github.com/aiidateam/aiida-core/commit/22ea06362e9de5d314f103332da2e25ae6080f61) +- Fix the Docker builds [[3404c0192]](https://github.com/aiidateam/aiida-core/commit/3404c01925da941c08f246e231b6587f53ce445b) +- Increase timeout for the `test-install` workflow [[e36a3f11f]](https://github.com/aiidateam/aiida-core/commit/e36a3f11fdd165eea3af9f3337382e1bbd181390) +- Move RabbitMQ CI to nightly and update versions [[b47a56698]](https://github.com/aiidateam/aiida-core/commit/b47a56698e8fdf350a10c7abfd8ba00443fabd8d) +- Refactor the GHA Docker build [[e47932ee9]](https://github.com/aiidateam/aiida-core/commit/e47932ee9e0833dca546c7c7b5b584f2687d9073) +- Remove `verdi tui` from CLI reference documentation [[1b4a19a44]](https://github.com/aiidateam/aiida-core/commit/1b4a19a44461271aea58e54acd93e896220b413d) +- Run Docker workflow only for pushes to origin [[b1a714155]](https://github.com/aiidateam/aiida-core/commit/b1a714155ec9e51263e453a4354934fa91d04f33) +- Tests: Convert hierarchy functions into fixtures [[a02abc470]](https://github.com/aiidateam/aiida-core/commit/a02abc4701e81b75284164510be966ff0fd04dab) +- Tests: extend `node_and_calc_info` fixture to `core.ssh` [[9cf28f208]](https://github.com/aiidateam/aiida-core/commit/9cf28f20875fe6c3b0f2844bff16415b1dfc7b6f) +- Tests: Remove test classes for transport plugins [[b77e51f8c]](https://github.com/aiidateam/aiida-core/commit/b77e51f8c15c7ddea80d7d6328737abb705c6ce8) +- Tests: Unskip test in `tests/cmdline/commands/test_archive_import.py` [[7b7958c7a]](https://github.com/aiidateam/aiida-core/commit/7b7958c7aee150162cb4db0562a7352764a94c04) +- Update codecov action [[fc2a84d9b]](https://github.com/aiidateam/aiida-core/commit/fc2a84d9bd045d48d46511153dfde389070bf552) +- Update deprecated `whitelist_externals` option in tox config [[8feef5189]](https://github.com/aiidateam/aiida-core/commit/8feef5189ab9a2ba4b358bb6937d5d7c3f555ad8) +- Update pre-commit hooks [[3dda84ff3]](https://github.com/aiidateam/aiida-core/commit/3dda84ff3057a97e422b809a6adc778cbf60c125) +- Update pre-commit requirement `ruff==0.3.5` [[acd54543d]](https://github.com/aiidateam/aiida-core/commit/acd54543dffca05df7189f36c71afd2bb0065f34) +- Update requirements `mypy` and `pre-commit`[[04b3260a0]](https://github.com/aiidateam/aiida-core/commit/04b3260a098f061301edb0f56f1675fe9283b41b) +- Update requirements to address deprecation warnings [[566f681f7]](https://github.com/aiidateam/aiida-core/commit/566f681f72426a9a08200ff1d86c604f4c37bbcf) +- Use `uv` to install package in CI and CD [[73a734ae3]](https://github.com/aiidateam/aiida-core/commit/73a734ae3cd0977a97c631f97ddb781fa293864a) +- Use recursive dependencies for `pre-commit` extra [[6564e78dd]](https://github.com/aiidateam/aiida-core/commit/6564e78ddb349b89f6a3e9bfa81ce357ce865961) + ## v2.5.1 - 2024-01-31 diff --git a/docs/source/conf.py b/docs/source/conf.py index 745ed79c30..d017051cb3 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -97,7 +97,7 @@ ipython_mplbackend = '' myst_enable_extensions = ['colon_fence', 'deflist'] -myst_heading_anchors = 3 +myst_heading_anchors = 4 nb_execution_show_tb = 'READTHEDOCS' in os.environ nb_merge_streams = True nb_mime_priority_overrides = [ diff --git a/src/aiida/__init__.py b/src/aiida/__init__.py index 2edab0cd02..4d2bcab3a9 100644 --- a/src/aiida/__init__.py +++ b/src/aiida/__init__.py @@ -27,7 +27,7 @@ 'For further information please visit http://www.aiida.net/. All rights reserved.' ) __license__ = 'MIT license, see LICENSE.txt file.' -__version__ = '2.5.1.post0' +__version__ = '2.6.0' __authors__ = 'The AiiDA team.' __paper__ = ( 'S. P. Huber et al., "AiiDA 1.0, a scalable computational infrastructure for automated reproducible workflows and ' From 9fe8fd2e0b88e746ee2156eccb71b7adbab6b2c5 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Mon, 1 Jul 2024 13:33:46 +0200 Subject: [PATCH 38/82] Fixtures: Make `pgtest` truly an optional dependency (#6502) The pytest fixtures were improved to allow running with a `core.sqlite_dos` storage for the test profile, making PostgreSQL completely optional. However, the current fixture still imports the `pgtest` package at module level making it a requirement, despite it only being relevant when running the tests with a `core.psql_dos` storage plugin. Here the import is moved inside the `PostgresCluster._create` method which is only called when the test suite actually uses a PSQL based storage plugin. --- src/aiida/tools/pytest_fixtures/storage.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiida/tools/pytest_fixtures/storage.py b/src/aiida/tools/pytest_fixtures/storage.py index 2f13cf25a1..4565e621b4 100644 --- a/src/aiida/tools/pytest_fixtures/storage.py +++ b/src/aiida/tools/pytest_fixtures/storage.py @@ -7,7 +7,6 @@ from uuid import uuid4 import pytest -from pgtest.pgtest import PGTest if t.TYPE_CHECKING: from pgtest.pgtest import PGTest @@ -19,6 +18,8 @@ def __init__(self): self.cluster = None def _create(self): + from pgtest.pgtest import PGTest + try: self.cluster = PGTest() except OSError as e: @@ -59,7 +60,6 @@ def create_database( return postgres_config -# TODO: Update docstring accordingly @pytest.fixture(scope='session') def postgres_cluster(): """Create a temporary and isolated PostgreSQL cluster using ``pgtest`` and cleanup after the yield. From 9c26ce7c1fc024265a8ffb986ae50783823aa1a7 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Mon, 1 Jul 2024 13:44:25 +0200 Subject: [PATCH 39/82] Release `v2.6.1` --- CHANGELOG.md | 6 ++++++ src/aiida/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b40c27e1dd..fbc5ddf965 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## v2.6.1 - 2024-07-01 + +### Fixes: +- Fixtures: Make `pgtest` truly an optional dependency [[9fe8fd2e0]](https://github.com/aiidateam/aiida-core/commit/9fe8fd2e0b88e746ee2156eccb71b7adbab6b2c5) + + ## v2.6.0 - 2024-07-01 This minor release comes with a number of features that are focused on user friendliness and ease-of-use of the CLI and the API. diff --git a/src/aiida/__init__.py b/src/aiida/__init__.py index 4d2bcab3a9..5067f789e2 100644 --- a/src/aiida/__init__.py +++ b/src/aiida/__init__.py @@ -27,7 +27,7 @@ 'For further information please visit http://www.aiida.net/. All rights reserved.' ) __license__ = 'MIT license, see LICENSE.txt file.' -__version__ = '2.6.0' +__version__ = '2.6.1' __authors__ = 'The AiiDA team.' __paper__ = ( 'S. P. Huber et al., "AiiDA 1.0, a scalable computational infrastructure for automated reproducible workflows and ' From 14bb05f4b4e7fbda86682ea2cf4e3881b3a3e8dc Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 2 Jul 2024 11:18:22 +0200 Subject: [PATCH 40/82] Devops: Update pre-commit dependencies (#6504) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - [github.com/python-jsonschema/check-jsonschema: 0.28.2 → 0.28.6](https://github.com/python-jsonschema/check-jsonschema/compare/0.28.2...0.28.6) - [github.com/astral-sh/ruff-pre-commit: v0.4.1 → v0.5.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.1...v0.5.0) --- .pre-commit-config.yaml | 4 ++-- src/aiida/calculations/transfer.py | 2 +- src/aiida/engine/processes/functions.py | 2 +- src/aiida/orm/nodes/data/remote/base.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cad92cb781..185d5698fb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,7 +26,7 @@ repos: exclude: *exclude_pre_commit_hooks - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.28.2 + rev: 0.28.6 hooks: - id: check-github-workflows @@ -37,7 +37,7 @@ repos: args: [--line-length=120, --fail-on-change] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.1 + rev: v0.5.0 hooks: - id: ruff-format exclude: &exclude_ruff > diff --git a/src/aiida/calculations/transfer.py b/src/aiida/calculations/transfer.py index 04290e0606..fae76aa5ed 100644 --- a/src/aiida/calculations/transfer.py +++ b/src/aiida/calculations/transfer.py @@ -55,7 +55,7 @@ def validate_instructions(instructions, _): return errmsg -def validate_transfer_inputs(inputs, _): +def validate_transfer_inputs(inputs, _ctx): """Check that the instructions dict and the source nodes are consistent""" source_nodes = inputs['source_nodes'] instructions = inputs['instructions'] diff --git a/src/aiida/engine/processes/functions.py b/src/aiida/engine/processes/functions.py index 11bd43946c..509e2d0258 100644 --- a/src/aiida/engine/processes/functions.py +++ b/src/aiida/engine/processes/functions.py @@ -85,7 +85,7 @@ def get_stack_size(size: int = 2) -> int: # type: ignore[return] """ frame = sys._getframe(size) try: - for size in itertools.count(size, 8): + for size in itertools.count(size, 8): # noqa: PLR1704 frame = frame.f_back.f_back.f_back.f_back.f_back.f_back.f_back.f_back # type: ignore[assignment,union-attr] except AttributeError: while frame: # type: ignore[truthy-bool] diff --git a/src/aiida/orm/nodes/data/remote/base.py b/src/aiida/orm/nodes/data/remote/base.py index 760924a725..9147a58d10 100644 --- a/src/aiida/orm/nodes/data/remote/base.py +++ b/src/aiida/orm/nodes/data/remote/base.py @@ -175,8 +175,8 @@ def _clean(self, transport=None): remote_dir = self.get_remote_path() if transport is None: - with self.get_authinfo().get_transport() as transport: - clean_remote(transport, remote_dir) + with self.get_authinfo().get_transport() as _transport: + clean_remote(_transport, remote_dir) else: if transport.hostname != self.computer.hostname: raise ValueError( From d86017f42cb5359d0272694247756d547057a663 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 14 Oct 2022 09:37:44 +0200 Subject: [PATCH 41/82] Dependencies: Update requirements for `kiwipy` and `plumpy` The new version `kiwipy==0.8` and `plumpy==0.22` provide compatiblity with newer versions of `aio-pika==8.0` which comes with various connection stability improvements. The only known problem is that `Communicator.close()` times out if at least one process has been run. A test is added to capture this behavior in `tests/manage/test_manager.py` which currently fails as a `TimeoutError` is thrown. A lot of debugging has not yet led to finding the cause nor a solution. Since this behavior only seems to be appearing in the tests and does not seem to affect regular usage, the upgrade is accepted regardless. --- environment.yml | 5 ++-- pyproject.toml | 5 ++-- requirements/requirements-py-3.10.txt | 10 ++++---- requirements/requirements-py-3.11.txt | 10 ++++---- requirements/requirements-py-3.12.txt | 10 ++++---- requirements/requirements-py-3.9.txt | 10 ++++---- src/aiida/brokers/rabbitmq/broker.py | 2 +- src/aiida/engine/processes/process.py | 5 ++-- tests/manage/test_manager.py | 33 +++++++++++++++++++++++++++ 9 files changed, 61 insertions(+), 29 deletions(-) create mode 100644 tests/manage/test_manager.py diff --git a/environment.yml b/environment.yml index 98dd997ba1..99d7748c64 100644 --- a/environment.yml +++ b/environment.yml @@ -8,7 +8,6 @@ dependencies: - python~=3.9 - alembic~=1.2 - archive-path~=0.4.2 -- aio-pika~=6.6 - circus~=0.18.0 - click-spinner~=0.1.8 - click~=8.1 @@ -19,11 +18,11 @@ dependencies: - ipython>=7 - jedi<0.19 - jinja2~=3.0 -- kiwipy[rmq]~=0.7.7 +- kiwipy[rmq]~=0.8.4 - importlib-metadata~=6.0 - numpy~=1.21 - paramiko>=2.7.2,~=2.7 -- plumpy~=0.21.6 +- plumpy~=0.22.3 - pgsu~=0.2.1 - psutil~=5.6 - psycopg2-binary~=2.8 diff --git a/pyproject.toml b/pyproject.toml index c70c7a96de..5f31cef2a0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,7 +20,6 @@ classifiers = [ dependencies = [ 'alembic~=1.2', 'archive-path~=0.4.2', - 'aio-pika~=6.6', 'circus~=0.18.0', 'click-spinner~=0.1.8', 'click~=8.1', @@ -31,11 +30,11 @@ dependencies = [ 'ipython>=7', 'jedi<0.19', 'jinja2~=3.0', - 'kiwipy[rmq]~=0.7.7', + 'kiwipy[rmq]~=0.8.4', 'importlib-metadata~=6.0', 'numpy~=1.21', 'paramiko~=2.7,>=2.7.2', - 'plumpy~=0.21.6', + 'plumpy~=0.22.3', 'pgsu~=0.2.1', 'psutil~=5.6', 'psycopg2-binary~=2.8', diff --git a/requirements/requirements-py-3.10.txt b/requirements/requirements-py-3.10.txt index 7bd1c23ce4..d6ca92f6c3 100644 --- a/requirements/requirements-py-3.10.txt +++ b/requirements/requirements-py-3.10.txt @@ -6,8 +6,8 @@ # accessible-pygments==0.0.5 aiida-export-migration-tests==0.9.0 -aio-pika==6.8.1 -aiormq==3.3.1 +aio-pika==9.4.0 +aiormq==6.8.0 alabaster==0.7.13 alembic==1.11.1 aniso8601==9.0.1 @@ -78,7 +78,7 @@ jupyter-server==2.6.0 jupyter-server-terminals==0.4.4 jupyterlab-pygments==0.2.2 jupyterlab-widgets==3.0.7 -kiwipy[rmq]==0.7.7 +kiwipy[rmq]==0.8.4 kiwisolver==1.4.4 latexcodec==2.0.1 mako==1.2.4 @@ -106,7 +106,7 @@ numpy==1.25.0 overrides==7.3.1 packaging==23.1 palettable==3.3.3 -pamqp==2.3.0 +pamqp==3.3.0 pandas==2.0.2 pandocfilters==1.5.0 paramiko==2.12.0 @@ -120,7 +120,7 @@ pillow==9.5.0 platformdirs==3.6.0 plotly==5.15.0 pluggy==1.0.0 -plumpy==0.21.8 +plumpy==0.22.3 prometheus-client==0.17.0 prompt-toolkit==3.0.38 psutil==5.9.5 diff --git a/requirements/requirements-py-3.11.txt b/requirements/requirements-py-3.11.txt index db6593c6ba..95347a0980 100644 --- a/requirements/requirements-py-3.11.txt +++ b/requirements/requirements-py-3.11.txt @@ -6,8 +6,8 @@ # accessible-pygments==0.0.5 aiida-export-migration-tests==0.9.0 -aio-pika==6.8.1 -aiormq==3.3.1 +aio-pika==9.4.0 +aiormq==6.8.0 alabaster==0.7.13 alembic==1.11.1 aniso8601==9.0.1 @@ -77,7 +77,7 @@ jupyter-server==2.6.0 jupyter-server-terminals==0.4.4 jupyterlab-pygments==0.2.2 jupyterlab-widgets==3.0.7 -kiwipy[rmq]==0.7.7 +kiwipy[rmq]==0.8.4 kiwisolver==1.4.4 latexcodec==2.0.1 mako==1.2.4 @@ -105,7 +105,7 @@ numpy==1.25.0 overrides==7.3.1 packaging==23.1 palettable==3.3.3 -pamqp==2.3.0 +pamqp==3.3.0 pandas==2.0.2 pandocfilters==1.5.0 paramiko==2.12.0 @@ -119,7 +119,7 @@ pillow==9.5.0 platformdirs==3.6.0 plotly==5.15.0 pluggy==1.0.0 -plumpy==0.21.8 +plumpy==0.22.3 prometheus-client==0.17.0 prompt-toolkit==3.0.38 psutil==5.9.5 diff --git a/requirements/requirements-py-3.12.txt b/requirements/requirements-py-3.12.txt index 78f4e3a8f5..15d59944df 100644 --- a/requirements/requirements-py-3.12.txt +++ b/requirements/requirements-py-3.12.txt @@ -6,8 +6,8 @@ # accessible-pygments==0.0.4 aiida-export-migration-tests==0.9.0 -aio-pika==6.8.1 -aiormq==3.3.1 +aio-pika==9.4.0 +aiormq==6.8.0 alabaster==0.7.13 alembic==1.12.0 aniso8601==9.0.1 @@ -77,7 +77,7 @@ jupyter-server==2.8.0 jupyter-server-terminals==0.4.4 jupyterlab-pygments==0.2.2 jupyterlab-widgets==3.0.9 -kiwipy[rmq]==0.7.8 +kiwipy[rmq]==0.8.4 kiwisolver==1.4.5 latexcodec==2.0.1 mako==1.2.4 @@ -105,7 +105,7 @@ numpy==1.26.1 overrides==7.4.0 packaging==23.2 palettable==3.3.3 -pamqp==2.3.0 +pamqp==3.3.0 pandas==2.1.1 pandocfilters==1.5.0 paramiko==2.12.0 @@ -119,7 +119,7 @@ pillow==10.1.0 platformdirs==3.11.0 plotly==5.17.0 pluggy==1.3.0 -plumpy==0.21.10 +plumpy==0.22.3 prometheus-client==0.17.1 prompt-toolkit==3.0.39 psutil==5.9.6 diff --git a/requirements/requirements-py-3.9.txt b/requirements/requirements-py-3.9.txt index a576ca238d..1a7d1b2704 100644 --- a/requirements/requirements-py-3.9.txt +++ b/requirements/requirements-py-3.9.txt @@ -6,8 +6,8 @@ # accessible-pygments==0.0.5 aiida-export-migration-tests==0.9.0 -aio-pika==6.8.1 -aiormq==3.3.1 +aio-pika==9.4.0 +aiormq==6.8.0 alabaster==0.7.13 alembic==1.11.1 aniso8601==9.0.1 @@ -80,7 +80,7 @@ jupyter-server==2.6.0 jupyter-server-terminals==0.4.4 jupyterlab-pygments==0.2.2 jupyterlab-widgets==3.0.7 -kiwipy[rmq]==0.7.7 +kiwipy[rmq]==0.8.4 kiwisolver==1.4.4 latexcodec==2.0.1 mako==1.2.4 @@ -108,7 +108,7 @@ numpy==1.25.0 overrides==7.3.1 packaging==23.1 palettable==3.3.3 -pamqp==2.3.0 +pamqp==3.3.0 pandas==2.0.2 pandocfilters==1.5.0 paramiko==2.12.0 @@ -122,7 +122,7 @@ pillow==9.5.0 platformdirs==3.6.0 plotly==5.15.0 pluggy==1.0.0 -plumpy==0.21.8 +plumpy==0.22.3 prometheus-client==0.17.0 prompt-toolkit==3.0.38 psutil==5.9.5 diff --git a/src/aiida/brokers/rabbitmq/broker.py b/src/aiida/brokers/rabbitmq/broker.py index 5321f6d400..c4ecfa2400 100644 --- a/src/aiida/brokers/rabbitmq/broker.py +++ b/src/aiida/brokers/rabbitmq/broker.py @@ -122,4 +122,4 @@ def get_rabbitmq_version(self): """ from packaging.version import parse - return parse(self.get_communicator().server_properties['version'].decode('utf-8')) + return parse(self.get_communicator().server_properties['version']) diff --git a/src/aiida/engine/processes/process.py b/src/aiida/engine/processes/process.py index f4dc9f9d69..5eabfd56f7 100644 --- a/src/aiida/engine/processes/process.py +++ b/src/aiida/engine/processes/process.py @@ -39,9 +39,10 @@ import plumpy.futures import plumpy.persistence import plumpy.processes -from aio_pika.exceptions import ConnectionClosed from kiwipy.communications import UnroutableError from plumpy.process_states import Finished, ProcessState +from plumpy.processes import ConnectionClosed # type: ignore[attr-defined] +from plumpy.processes import Process as PlumpyProcess from plumpy.utils import AttributesFrozendict from aiida import orm @@ -66,7 +67,7 @@ @plumpy.persistence.auto_persist('_parent_pid', '_enable_persistence') -class Process(plumpy.processes.Process): +class Process(PlumpyProcess): """This class represents an AiiDA process which can be executed and will have full provenance saved in the database. """ diff --git a/tests/manage/test_manager.py b/tests/manage/test_manager.py new file mode 100644 index 0000000000..4359a2ab48 --- /dev/null +++ b/tests/manage/test_manager.py @@ -0,0 +1,33 @@ +"""Tests for :mod:`aiida.manage.manager`.""" + +import pytest +from aiida import engine, orm + + +@engine.calcfunction +def add_calcfunction(data): + return orm.Int(data.value + 1) + + +@pytest.mark.requires_rmq +def test_disconnect(): + """Test the communicator disconnect. + + When the dependency ``kiwipy`` was updated to v0.8, it introduced a problem with shutting down the communicator. + After at least one process would have been run, trying to disconnect the communcitor would time out. The problem + is related to the update of the lower lying libraries ``aio-pika`` and ``aiormq`` to v9.4 and v6.8, respectively. + After much painstaking debugging the cause could not be determined, nor a solution. This test is added to + demonstrate the problematic behavior. Getting the communicator and then disconnecting it (through calling + :meth:`aiida.manage.manager.Manager.reset_profile`) works fine. However, if a process is a run before closing it, + for example running a calcfunction, the closing of the communicator will raise a ``TimeoutError``. + """ + from aiida.manage import get_manager + + manager = get_manager() + manager.get_communicator() + manager.reset_profile() # This returns just fine + + result, node = add_calcfunction.run_get_node(1) + assert node.is_finished_ok + assert result == 2 + manager.reset_profile() # This hangs before timing out From e91371573a84d4a68d6107f33c392b8718f2f26f Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 13 Jul 2023 15:28:23 +0200 Subject: [PATCH 42/82] `Manager`: Catch `TimeoutError` when closing communicator The exception is caught and logged as a warning. --- src/aiida/manage/manager.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/aiida/manage/manager.py b/src/aiida/manage/manager.py index c84b39f903..8621b324f4 100644 --- a/src/aiida/manage/manager.py +++ b/src/aiida/manage/manager.py @@ -68,6 +68,8 @@ class Manager: def __init__(self) -> None: """Construct a new instance.""" + from aiida.common.log import AIIDA_LOGGER + # note: the config currently references the global variables self._broker: Optional['Broker'] = None self._profile: Optional['Profile'] = None @@ -76,6 +78,7 @@ def __init__(self) -> None: self._process_controller: Optional['RemoteProcessThreadController'] = None self._persister: Optional['AiiDAPersister'] = None self._runner: Optional['Runner'] = None + self.logger = AIIDA_LOGGER.getChild(__name__) @staticmethod def get_config(create=False) -> 'Config': @@ -165,8 +168,15 @@ def reset_profile_storage(self) -> None: def reset_broker(self) -> None: """Reset the communicator.""" + from concurrent import futures + if self._broker is not None: + try: + self._broker.close() + except futures.TimeoutError as exception: + self.logger.warning(f'Call to close the broker timed out: {exception}') self._broker.close() + self._broker = None self._process_controller = None From 89cd03c0d05881bb5bb7b36cf91a150d79922896 Mon Sep 17 00:00:00 2001 From: Kevin Lefrancois-Gagnon <138684774+kmlefran@users.noreply.github.com> Date: Wed, 3 Jul 2024 17:25:34 -0400 Subject: [PATCH 43/82] Docs: Fix typo in pytest plugins codeblock (#6513) --- docs/source/topics/plugins.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/topics/plugins.rst b/docs/source/topics/plugins.rst index bb371d6d63..6e7c44d1bc 100644 --- a/docs/source/topics/plugins.rst +++ b/docs/source/topics/plugins.rst @@ -344,7 +344,7 @@ To make use of these fixtures, create a ``conftest.py`` file in your ``tests`` f .. code-block:: python - pytest_plugins = 'aiida.tools.pytest_fixtures + pytest_plugins = 'aiida.tools.pytest_fixtures' Just by adding this line, the fixtures that are provided by the :mod:`~aiida.tools.pytest_fixtures` module are automatically imported. The module provides the following fixtures: From 11eefc9c5a8fdfd232d6845b462966bcdba969d8 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 4 Jul 2024 11:13:21 +0200 Subject: [PATCH 44/82] Docs: Add `PluggableSchemaValidator` to nitpick exceptions (#6515) This class comes from `pydantic` and as of `pydantic==2.8.2` this is causing a warning because Sphinx cannot find the cross-reference. --- docs/source/nitpick-exceptions | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source/nitpick-exceptions b/docs/source/nitpick-exceptions index 3e6bd2b5bb..cdfa6151e3 100644 --- a/docs/source/nitpick-exceptions +++ b/docs/source/nitpick-exceptions @@ -142,6 +142,7 @@ py:meth fail py:class ComputedFieldInfo py:class pydantic.fields.Field py:class pydantic.main.BaseModel +py:class PluggableSchemaValidator py:class requests.models.Response py:class requests.Response From 6d2edc919e3340b67d8097c425a5e5f6971707f8 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 4 Jul 2024 15:25:13 +0200 Subject: [PATCH 45/82] CLI: Accept mulitple node identifiers in `verdi node graph generate` (#6443) The `--identifier` option allows the user to specify which identifier to label nodes in the graph with: pk, uuid or label. Here, the interface is updated to allow specifying multiple identifiers, e.g.: verdi node graph generate --identifier pk uuid -- If more than one identifier type is specified, the resulting identifiers for each node are joined using a `|` character. --- src/aiida/cmdline/commands/cmd_node.py | 9 +++-- src/aiida/tools/visualization/graph.py | 30 ++++++++------ tests/cmdline/commands/test_node.py | 2 +- tests/tools/visualization/test_graph.py | 39 +++++++++++++++++-- .../test_graph_node_identifiers_label_.txt | 9 +++++ ..._graph_node_identifiers_node_id_type3_.txt | 9 +++++ ..._graph_node_identifiers_node_id_type4_.txt | 9 +++++ .../test_graph_node_identifiers_pk_.txt | 9 +++++ .../test_graph_node_identifiers_uuid_.txt | 9 +++++ 9 files changed, 106 insertions(+), 19 deletions(-) create mode 100644 tests/tools/visualization/test_graph/test_graph_node_identifiers_label_.txt create mode 100644 tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type3_.txt create mode 100644 tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type4_.txt create mode 100644 tests/tools/visualization/test_graph/test_graph_node_identifiers_pk_.txt create mode 100644 tests/tools/visualization/test_graph/test_graph_node_identifiers_uuid_.txt diff --git a/src/aiida/cmdline/commands/cmd_node.py b/src/aiida/cmdline/commands/cmd_node.py index 8e6ae8fba0..79efcebcef 100644 --- a/src/aiida/cmdline/commands/cmd_node.py +++ b/src/aiida/cmdline/commands/cmd_node.py @@ -15,6 +15,7 @@ from aiida.cmdline.commands.cmd_verdi import verdi from aiida.cmdline.params import arguments, options +from aiida.cmdline.params.options.multivalue import MultipleValueOption from aiida.cmdline.params.types.plugin import PluginParamType from aiida.cmdline.utils import decorators, echo, echo_tabulate, multi_line_input from aiida.cmdline.utils.decorators import with_dbenv @@ -439,8 +440,10 @@ def verdi_graph(): ) @click.option( '--identifier', + 'identifiers', help='the type of identifier to use within the node text', - default='uuid', + default=('uuid',), + cls=MultipleValueOption, type=click.Choice(['pk', 'uuid', 'label']), ) @click.option( @@ -483,7 +486,7 @@ def verdi_graph(): def graph_generate( root_nodes, link_types, - identifier, + identifiers, ancestor_depth, descendant_depth, process_out, @@ -506,7 +509,7 @@ def graph_generate( output_file = pathlib.Path(f'{pks}.{engine}.{output_format}') echo.echo_info(f'Initiating graphviz engine: {engine}') - graph = Graph(engine=engine, node_id_type=identifier) + graph = Graph(engine=engine, node_id_type=identifiers) link_types = {'all': (), 'logic': ('input_work', 'return'), 'data': ('input_calc', 'create')}[link_types] for root_node in root_nodes: diff --git a/src/aiida/tools/visualization/graph.py b/src/aiida/tools/visualization/graph.py index 2fe7f4250e..91411796ea 100644 --- a/src/aiida/tools/visualization/graph.py +++ b/src/aiida/tools/visualization/graph.py @@ -29,6 +29,7 @@ __all__ = ('Graph', 'default_link_styles', 'default_node_styles', 'pstate_node_styles', 'default_node_sublabels') LinkAnnotateType = Literal[None, 'label', 'type', 'both'] +IdentifierType = Literal['pk', 'uuid', 'label'] class LinkStyleFunc(Protocol): @@ -254,18 +255,25 @@ def default_node_sublabels(node: orm.Node) -> str: return sublabel -def get_node_id_label(node: orm.Node, id_type: Literal['pk', 'uuid', 'label']) -> str: +NODE_IDENTIFIER_TO_LABEL = { + 'pk': lambda node: str(node.pk), + 'uuid': lambda node: node.uuid.split('-')[0], + 'label': lambda node: node.label, +} + + +def get_node_id_label(node: orm.Node, id_type: IdentifierType | list[IdentifierType]) -> str: """Return an identifier str for the node""" - if id_type == 'pk': - return str(node.pk) - if id_type == 'uuid': - return node.uuid.split('-')[0] - if id_type == 'label': - return node.label - raise ValueError(f'node_id_type not recognised: {id_type}') + + id_types = id_type if isinstance(id_type, (list, tuple)) else [id_type] + + try: + return '|'.join(NODE_IDENTIFIER_TO_LABEL[key](node) for key in id_types) + except KeyError as exception: + raise ValueError(f'`{id_type}` is not a valid `node_id_type`, choose from: pk, uuid, label') from exception -def _get_node_label(node: orm.Node, id_type: Literal['pk', 'uuid', 'label'] = 'pk') -> str: +def _get_node_label(node: orm.Node, id_type: IdentifierType | list[IdentifierType] = 'pk') -> str: """Return a label text of node and the return format is ' ()'.""" if isinstance(node, orm.Data): label = f'{node.__class__.__name__} ({get_node_id_label(node, id_type)})' @@ -287,7 +295,7 @@ def _add_graphviz_node( node_sublabel_func, style_override: None | dict = None, include_sublabels: bool = True, - id_type: Literal['pk', 'uuid', 'label'] = 'pk', + id_type: IdentifierType | list[IdentifierType] = 'pk', ): """Create a node in the graph @@ -360,7 +368,7 @@ def __init__( link_style_fn: LinkStyleFunc | None = None, node_style_fn: Callable[[orm.Node], dict] | None = None, node_sublabel_fn: Callable[[orm.Node], str] | None = None, - node_id_type: Literal['pk', 'uuid', 'label'] = 'pk', + node_id_type: IdentifierType | list[IdentifierType] = 'pk', backend: StorageBackend | None = None, ): """A class to create graphviz graphs of the AiiDA node provenance diff --git a/tests/cmdline/commands/test_node.py b/tests/cmdline/commands/test_node.py index d9c7774556..66ca83b686 100644 --- a/tests/cmdline/commands/test_node.py +++ b/tests/cmdline/commands/test_node.py @@ -392,7 +392,7 @@ def test_node_id_label_format(self, run_cli_command): filename = f'{root_node}.dot.pdf' for id_label_type in ['uuid', 'pk', 'label']: - options = ['--identifier', id_label_type, root_node] + options = ['--identifier', id_label_type, '--', root_node] try: run_cli_command(cmd_node.graph_generate, options) assert os.path.isfile(filename) diff --git a/tests/tools/visualization/test_graph.py b/tests/tools/visualization/test_graph.py index 17bda9993f..ba47b335b2 100644 --- a/tests/tools/visualization/test_graph.py +++ b/tests/tools/visualization/test_graph.py @@ -8,6 +8,8 @@ ########################################################################### """Tests for creating graphs (using graphviz)""" +import re + import pytest from aiida import orm from aiida.common import AttributeDict @@ -290,10 +292,6 @@ def test_graph_graphviz_source_pstate(self): graph = graph_mod.Graph(node_style_fn=graph_mod.pstate_node_styles) graph.recurse_descendants(nodes.pd0) - # print() - # print(graph.graphviz.source) - # graph.graphviz.render("test_graphviz_pstate", cleanup=True) - expected = """\ digraph {{ N{pd0} [label="Dict ({pd0})" color=red pencolor=black penwidth=6 shape=rectangle] @@ -325,3 +323,36 @@ def test_graph_graphviz_source_pstate(self): assert sorted([line.strip() for line in graph.graphviz.source.splitlines()]) == sorted( [line.strip() for line in expected.splitlines()] ) + + @pytest.mark.parametrize( + 'node_id_type', + ( + 'pk', + 'uuid', + 'label', + ('pk', 'uuid'), + ('pk', 'label'), + ), + ) + def test_graph_node_identifiers(self, node_id_type, monkeypatch, file_regression): + """.""" + nodes = self.create_provenance() + + # Monkeypatch the mapping of lambdas that convert return a node's identifier in string form. This is because + # the pks and uuids of the test nodes will change between each test run and this would fail the file regression. + node_identifier_to_label = { + 'pk': lambda node: '10', + 'uuid': lambda node: '16739459', + 'label': lambda node: 'some-label', + } + monkeypatch.setattr(graph_mod, 'NODE_IDENTIFIER_TO_LABEL', node_identifier_to_label) + + graph = graph_mod.Graph(node_id_type=node_id_type) + graph.recurse_descendants(nodes.calcf1) + + # The order of certain output lines can be randomly ordered so we split the file in lines, sort, and then join + # them into a single string again. The node identifiers generated by the engine are of the form ``N{pk}`` and + # they also clearly vary, so they are replaced with the ``NODE`` placeholder. + string = '\n'.join(sorted(graph.graphviz.source.strip().split('\n'))) + string = re.sub(r'N\d+', 'NODE', string) + file_regression.check(string) diff --git a/tests/tools/visualization/test_graph/test_graph_node_identifiers_label_.txt b/tests/tools/visualization/test_graph/test_graph_node_identifiers_label_.txt new file mode 100644 index 0000000000..2c23527560 --- /dev/null +++ b/tests/tools/visualization/test_graph/test_graph_node_identifiers_label_.txt @@ -0,0 +1,9 @@ + NODE -> NODE [color="#000000" style=solid] + NODE -> NODE [color="#000000" style=solid] + NODE [label="CalcFunctionNode (some-label) + NODE [label="Dict (some-label)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] + NODE [label="FolderData (some-label)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] +Exit Code: 200" color=red fillcolor="#de707f77" penwidth=6 shape=rectangle style=filled] +State: finished +digraph { +} \ No newline at end of file diff --git a/tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type3_.txt b/tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type3_.txt new file mode 100644 index 0000000000..7644ab1562 --- /dev/null +++ b/tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type3_.txt @@ -0,0 +1,9 @@ + NODE -> NODE [color="#000000" style=solid] + NODE -> NODE [color="#000000" style=solid] + NODE [label="CalcFunctionNode (10|16739459) + NODE [label="Dict (10|16739459)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] + NODE [label="FolderData (10|16739459)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] +Exit Code: 200" color=red fillcolor="#de707f77" penwidth=6 shape=rectangle style=filled] +State: finished +digraph { +} \ No newline at end of file diff --git a/tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type4_.txt b/tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type4_.txt new file mode 100644 index 0000000000..87b15dcf5c --- /dev/null +++ b/tests/tools/visualization/test_graph/test_graph_node_identifiers_node_id_type4_.txt @@ -0,0 +1,9 @@ + NODE -> NODE [color="#000000" style=solid] + NODE -> NODE [color="#000000" style=solid] + NODE [label="CalcFunctionNode (10|some-label) + NODE [label="Dict (10|some-label)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] + NODE [label="FolderData (10|some-label)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] +Exit Code: 200" color=red fillcolor="#de707f77" penwidth=6 shape=rectangle style=filled] +State: finished +digraph { +} \ No newline at end of file diff --git a/tests/tools/visualization/test_graph/test_graph_node_identifiers_pk_.txt b/tests/tools/visualization/test_graph/test_graph_node_identifiers_pk_.txt new file mode 100644 index 0000000000..ec6b88d6c2 --- /dev/null +++ b/tests/tools/visualization/test_graph/test_graph_node_identifiers_pk_.txt @@ -0,0 +1,9 @@ + NODE -> NODE [color="#000000" style=solid] + NODE -> NODE [color="#000000" style=solid] + NODE [label="CalcFunctionNode (10) + NODE [label="Dict (10)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] + NODE [label="FolderData (10)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] +Exit Code: 200" color=red fillcolor="#de707f77" penwidth=6 shape=rectangle style=filled] +State: finished +digraph { +} \ No newline at end of file diff --git a/tests/tools/visualization/test_graph/test_graph_node_identifiers_uuid_.txt b/tests/tools/visualization/test_graph/test_graph_node_identifiers_uuid_.txt new file mode 100644 index 0000000000..b4ca47ff60 --- /dev/null +++ b/tests/tools/visualization/test_graph/test_graph_node_identifiers_uuid_.txt @@ -0,0 +1,9 @@ + NODE -> NODE [color="#000000" style=solid] + NODE -> NODE [color="#000000" style=solid] + NODE [label="CalcFunctionNode (16739459) + NODE [label="Dict (16739459)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] + NODE [label="FolderData (16739459)" fillcolor="#8cd499ff" penwidth=0 shape=ellipse style=filled] +Exit Code: 200" color=red fillcolor="#de707f77" penwidth=6 shape=rectangle style=filled] +State: finished +digraph { +} \ No newline at end of file From f9924437070c67a9505f2f3b70a2d6d303acd38a Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 5 Jul 2024 09:00:54 +0200 Subject: [PATCH 46/82] Engine: Fix bug in upload calculation for `PortableCode` with SSH (#6519) When a `CalcJob` would be run with a `PortableCode` using a computer configured with the `core.ssh` transport plugin, the upload task would except. The `aiida.engine.daemon.execmanager.upload_calculation` method is passing `pathlib.Path` objects to the transport interface which is not supported. By chance this does not raise an exception when using the `LocalTransport`, but the `SshTransport` passes these values to the paramiko library which does choke on anything but strings. The use of a `PortableCode` was tested for in the unit test `tests/engine/processes/calcjobs/test_calc_job.py:test_portable_code` but this would only use a local transport and thus the bug would not appear. Parametrizing it to also use the `SshTransport` wouldn't help since the test uses `metadata.dry_run = True`, whose implementation will always swap the transport to a local one, still avoiding the bugged code pathway. Instead a test is added that directly calls `upload_calculation` which parametrizes over all installed transport plugins and uses a `PortableCode`. This confirmed the bug. The `upload_calculation` method is updated to ensure casting all `pathlib.Path` objects to `str` before passing it to the transport. --- src/aiida/engine/daemon/execmanager.py | 8 +++---- tests/engine/daemon/test_execmanager.py | 31 ++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/src/aiida/engine/daemon/execmanager.py b/src/aiida/engine/daemon/execmanager.py index f7517a0580..6f2a42fa15 100644 --- a/src/aiida/engine/daemon/execmanager.py +++ b/src/aiida/engine/daemon/execmanager.py @@ -178,11 +178,11 @@ def upload_calculation( # Note: this will possibly overwrite files for root, dirnames, filenames in code.base.repository.walk(): # mkdir of root - transport.makedirs(root, ignore_existing=True) + transport.makedirs(str(root), ignore_existing=True) # remotely mkdir first for dirname in dirnames: - transport.makedirs((root / dirname), ignore_existing=True) + transport.makedirs(str(root / dirname), ignore_existing=True) # Note, once #2579 is implemented, use the `node.open` method instead of the named temporary file in # combination with the new `Transport.put_object_from_filelike` @@ -192,8 +192,8 @@ def upload_calculation( content = code.base.repository.get_object_content((pathlib.Path(root) / filename), mode='rb') handle.write(content) handle.flush() - transport.put(handle.name, (root / filename)) - transport.chmod(code.filepath_executable, 0o755) # rwxr-xr-x + transport.put(handle.name, str(root / filename)) + transport.chmod(str(code.filepath_executable), 0o755) # rwxr-xr-x # local_copy_list is a list of tuples, each with (uuid, dest_path, rel_path) # NOTE: validation of these lists are done inside calculation.presubmit() diff --git a/tests/engine/daemon/test_execmanager.py b/tests/engine/daemon/test_execmanager.py index d5fc8fdbcc..bb4209659d 100644 --- a/tests/engine/daemon/test_execmanager.py +++ b/tests/engine/daemon/test_execmanager.py @@ -15,7 +15,7 @@ from aiida.common.datastructures import CalcInfo, CodeInfo, FileCopyOperation from aiida.common.folders import SandboxFolder from aiida.engine.daemon import execmanager -from aiida.orm import CalcJobNode, FolderData, RemoteData, SinglefileData +from aiida.orm import CalcJobNode, FolderData, PortableCode, RemoteData, SinglefileData from aiida.plugins import entry_point from aiida.transports.plugins.local import LocalTransport @@ -585,3 +585,32 @@ def test_upload_combinations( filepath_workdir = pathlib.Path(node.get_remote_workdir()) assert serialize_file_hierarchy(filepath_workdir, read_bytes=False) == expected_hierarchy + + +def test_upload_calculation_portable_code(fixture_sandbox, node_and_calc_info, tmp_path): + """Test ``upload_calculation`` with a ``PortableCode`` for different transports. + + Regression test for https://github.com/aiidateam/aiida-core/issues/6518 + """ + subdir = tmp_path / 'sub' + subdir.mkdir() + (subdir / 'some-file').write_bytes(b'sub dummy') + (tmp_path / 'bash').write_bytes(b'bash implementation') + + code = PortableCode( + filepath_executable='bash', + filepath_files=tmp_path, + ).store() + + node, calc_info = node_and_calc_info + code_info = CodeInfo() + code_info.code_uuid = code.uuid + calc_info.codes_info = [code_info] + + with node.computer.get_transport() as transport: + execmanager.upload_calculation( + node, + transport, + calc_info, + fixture_sandbox, + ) From 6196dcd3b321758ae8dfb84b22a59e1c77d8e933 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 5 Jul 2024 09:49:20 +0200 Subject: [PATCH 47/82] `SqliteDosStorage`: Make the migrator compatible with SQLite (#6429) The majority of the `SqliteDosStorage` piggy-backs off of the `PsqlDosStorage` plugin. It also uses the `PsqlDosMigrator` as-is to perform the database migrations. This is not safe however, as PostgreSQL and SQLite do not have exactly the same syntax. An example is the `main_0002` revision which was added to drop the hashes of certain nodes. This uses the `#-` operator which is JSONB specific syntax of PostgreSQL and is not supported by SQLite. Since this migration was added before the `SqliteDosStorage` plugin was added, this has never caused a problems as all profiles would be new, would not have any nodes and therefore the SQL code of the migration would not actually be executed. In preparation for any future migrations that may need to be added, the `SqliteDosStorage` now uses the `SqliteDosMigrator`. This subclasses the `PsqlDosMigrator` as it can still use most of the functionality, but it changes a few critical things. Most notably the location of the schema versions which now are kept individually and are no longer lent from the `core.psql_dos` plugin. The initial version `main_0001_initial.py` is taken from the migration `main_0000_initial.py` of the `core.sqlite_zip` storage plugin. The only difference is that UUID fields are declared as `String(32)` instead of `CHAR(32)`. The SQLAlchemy models that are automatically generated for SQLite from the PostgreSQL-based models actually use the latter type. See `aiida.storage.sqlite_zip.models:pg_to_sqlite`. --- src/aiida/storage/migrations.py | 8 + src/aiida/storage/psql_dos/migrator.py | 8 +- src/aiida/storage/sqlite_dos/backend.py | 123 +++++++- .../storage/sqlite_dos/migrations/env.py | 54 ++++ .../migrations/versions/main_0001_initial.py | 198 +++++++++++++ .../main_0002_recompute_hash_calc_job_node.py | 84 ++++++ .../storage/sqlite_zip/migrations/env.py | 2 +- tests/cmdline/commands/test_status.py | 2 + .../storage/sqlite_dos/migrations/conftest.py | 76 +++++ .../sqlite_dos/migrations/test_all_schema.py | 49 ++++ .../test_head_vs_orm_main_0002_.yml | 269 ++++++++++++++++++ .../test_all_schema/test_main_main_0001_.yml | 255 +++++++++++++++++ .../test_all_schema/test_main_main_0002_.yml | 255 +++++++++++++++++ 13 files changed, 1360 insertions(+), 23 deletions(-) create mode 100644 src/aiida/storage/migrations.py create mode 100644 src/aiida/storage/sqlite_dos/migrations/env.py create mode 100644 src/aiida/storage/sqlite_dos/migrations/versions/main_0001_initial.py create mode 100644 src/aiida/storage/sqlite_dos/migrations/versions/main_0002_recompute_hash_calc_job_node.py create mode 100644 tests/storage/sqlite_dos/migrations/conftest.py create mode 100644 tests/storage/sqlite_dos/migrations/test_all_schema.py create mode 100644 tests/storage/sqlite_dos/migrations/test_all_schema/test_head_vs_orm_main_0002_.yml create mode 100644 tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0001_.yml create mode 100644 tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0002_.yml diff --git a/src/aiida/storage/migrations.py b/src/aiida/storage/migrations.py new file mode 100644 index 0000000000..c37cbab641 --- /dev/null +++ b/src/aiida/storage/migrations.py @@ -0,0 +1,8 @@ +"""Module with common resources related to storage migrations.""" + +TEMPLATE_INVALID_SCHEMA_VERSION = """ +Database schema version `{schema_version_database}` is incompatible with the required schema version `{schema_version_code}`. +To migrate the database schema version to the current one, run the following command: + + verdi -p {profile_name} storage migrate +""" # noqa: E501 diff --git a/src/aiida/storage/psql_dos/migrator.py b/src/aiida/storage/psql_dos/migrator.py index 3ea36b9307..5251fd49de 100644 --- a/src/aiida/storage/psql_dos/migrator.py +++ b/src/aiida/storage/psql_dos/migrator.py @@ -33,6 +33,7 @@ from aiida.common import exceptions from aiida.manage.configuration.profile import Profile from aiida.storage.log import MIGRATE_LOGGER +from aiida.storage.migrations import TEMPLATE_INVALID_SCHEMA_VERSION from aiida.storage.psql_dos.models.settings import DbSetting from aiida.storage.psql_dos.utils import create_sqlalchemy_engine @@ -46,13 +47,6 @@ verdi -p {profile_name} storage migrate """ -TEMPLATE_INVALID_SCHEMA_VERSION = """ -Database schema version `{schema_version_database}` is incompatible with the required schema version `{schema_version_code}`. -To migrate the database schema version to the current one, run the following command: - - verdi -p {profile_name} storage migrate -""" # noqa: E501 - ALEMBIC_REL_PATH = 'migrations' REPOSITORY_UUID_KEY = 'repository|uuid' diff --git a/src/aiida/storage/sqlite_dos/backend.py b/src/aiida/storage/sqlite_dos/backend.py index 3b13764b3d..7be70f4a1c 100644 --- a/src/aiida/storage/sqlite_dos/backend.py +++ b/src/aiida/storage/sqlite_dos/backend.py @@ -10,31 +10,36 @@ from __future__ import annotations +import pathlib from functools import cached_property, lru_cache from pathlib import Path from shutil import rmtree from typing import TYPE_CHECKING, Optional from uuid import uuid4 +from alembic.config import Config from disk_objectstore import Container, backup_utils from pydantic import BaseModel, Field, field_validator -from sqlalchemy import insert +from sqlalchemy import insert, inspect, select from sqlalchemy.orm import scoped_session, sessionmaker from aiida.common import exceptions from aiida.common.log import AIIDA_LOGGER -from aiida.manage import Profile +from aiida.manage.configuration.profile import Profile from aiida.manage.configuration.settings import AIIDA_CONFIG_FOLDER from aiida.orm.implementation import BackendEntity +from aiida.storage.log import MIGRATE_LOGGER from aiida.storage.psql_dos.models.settings import DbSetting from aiida.storage.sqlite_zip import models, orm -from aiida.storage.sqlite_zip.migrator import get_schema_version_head from aiida.storage.sqlite_zip.utils import create_sqla_engine +from ..migrations import TEMPLATE_INVALID_SCHEMA_VERSION from ..psql_dos import PsqlDosBackend -from ..psql_dos.migrator import REPOSITORY_UUID_KEY, PsqlDosMigrator +from ..psql_dos.migrator import PsqlDosMigrator if TYPE_CHECKING: + from disk_objectstore import Container + from aiida.orm.entities import EntityTypes from aiida.repository.backend import DiskObjectStoreRepositoryBackend @@ -45,15 +50,26 @@ FILENAME_CONTAINER = 'container' +ALEMBIC_REL_PATH = 'migrations' + +REPOSITORY_UUID_KEY = 'repository|uuid' + + class SqliteDosMigrator(PsqlDosMigrator): - """Storage implementation using Sqlite database and disk-objectstore container. + """Class for validating and migrating `sqlite_dos` storage instances. - This storage backend is not recommended for use in production. The sqlite database is not the most performant and it - does not support all the ``QueryBuilder`` functionality that is supported by the ``core.psql_dos`` storage backend. - This storage is ideally suited for use cases that want to test or demo AiiDA as it requires no server but just a - folder on the local filesystem. + .. important:: This class should only be accessed via the storage backend class (apart from for test purposes) + + The class subclasses the ``PsqlDosMigrator``. It essentially changes two things in the implementation: + + * Changes the path to the migration version files. This allows custom migrations to be written for SQLite-based + storage plugins, which is necessary since the PSQL-based migrations may use syntax that is not compatible. + * The logic for validating the storage is significantly simplified since the SQLite-based storage plugins do not + have to take legacy Django-based implementations into account. """ + alembic_version_tbl_name = 'alembic_version' + def __init__(self, profile: Profile) -> None: filepath_database = Path(profile.storage_config['filepath']) / FILENAME_DATABASE filepath_database.touch() @@ -91,6 +107,86 @@ def initialise_database(self) -> None: context.stamp(context.script, 'main@head') # type: ignore[arg-type] self.connection.commit() + def get_schema_version_profile(self) -> Optional[str]: # type: ignore[override] + """Return the schema version of the backend instance for this profile. + + Note, the version will be None if the database is empty or is a legacy django database. + """ + with self._migration_context() as context: + return context.get_current_revision() + + @staticmethod + def _alembic_config(): + """Return an instance of an Alembic `Config`.""" + dirpath = pathlib.Path(__file__).resolve().parent + config = Config() + config.set_main_option('script_location', str(dirpath / ALEMBIC_REL_PATH)) + return config + + def validate_storage(self) -> None: + """Validate that the storage for this profile + + 1. That the database schema is at the head version, i.e. is compatible with the code API. + 2. That the repository ID is equal to the UUID set in the database + + :raises: :class:`aiida.common.exceptions.UnreachableStorage` if the storage cannot be connected to + :raises: :class:`aiida.common.exceptions.IncompatibleStorageSchema` + if the storage is not compatible with the code API. + :raises: :class:`aiida.common.exceptions.CorruptStorage` + if the repository ID is not equal to the UUID set in thedatabase. + """ + # check there is an alembic_version table from which to get the schema version + if not inspect(self.connection).has_table(self.alembic_version_tbl_name): + raise exceptions.IncompatibleStorageSchema('The database has no known version.') + + # now we can check that the alembic version is the latest + schema_version_code = self.get_schema_version_head() + schema_version_database = self.get_schema_version_profile() + if schema_version_database != schema_version_code: + raise exceptions.IncompatibleStorageSchema( + TEMPLATE_INVALID_SCHEMA_VERSION.format( + schema_version_database=schema_version_database, + schema_version_code=schema_version_code, + profile_name=self.profile.name, + ) + ) + + # finally, we check that the ID set within the disk-objectstore is equal to the one saved in the database, + # i.e. this container is indeed the one associated with the db + repository_uuid = self.get_repository_uuid() + stmt = select(DbSetting.val).where(DbSetting.key == REPOSITORY_UUID_KEY) + database_repository_uuid = self.connection.execute(stmt).scalar_one_or_none() + if database_repository_uuid is None: + raise exceptions.CorruptStorage('The database has no repository UUID set.') + if database_repository_uuid != repository_uuid: + raise exceptions.CorruptStorage( + f'The database has a repository UUID configured to {database_repository_uuid} ' + f"but the disk-objectstore's is {repository_uuid}." + ) + + @property + def is_database_initialised(self) -> bool: + """Return whether the database is initialised. + + This is the case if it contains the table that holds the schema version for alembic. + + :returns: ``True`` if the database is initialised, ``False`` otherwise. + """ + return inspect(self.connection).has_table(self.alembic_version_tbl_name) + + def migrate(self) -> None: + """Migrate the storage for this profile to the head version. + + :raises: :class:`~aiida.common.exceptions.UnreachableStorage` if the storage cannot be accessed. + :raises: :class:`~aiida.common.exceptions.StorageMigrationError` if the storage is not initialised. + """ + if not inspect(self.connection).has_table(self.alembic_version_tbl_name): + raise exceptions.StorageMigrationError('storage is uninitialised, cannot migrate.') + + MIGRATE_LOGGER.report('Migrating to the head of the main branch') + self.migrate_up('main@head') + self.connection.commit() + class SqliteDosStorage(PsqlDosBackend): """A lightweight storage that is easy to install. @@ -178,12 +274,9 @@ def get_repository(self) -> 'DiskObjectStoreRepositoryBackend': return DiskObjectStoreRepositoryBackend(container=self.get_container()) @classmethod - def version_head(cls) -> str: - return get_schema_version_head() - - @classmethod - def version_profile(cls, profile: Profile) -> str | None: - return get_schema_version_head() + def version_profile(cls, profile: Profile) -> Optional[str]: + with cls.migrator_context(profile) as migrator: + return migrator.get_schema_version_profile() def query(self) -> orm.SqliteQueryBuilder: return orm.SqliteQueryBuilder(self) diff --git a/src/aiida/storage/sqlite_dos/migrations/env.py b/src/aiida/storage/sqlite_dos/migrations/env.py new file mode 100644 index 0000000000..e2beb1ad9f --- /dev/null +++ b/src/aiida/storage/sqlite_dos/migrations/env.py @@ -0,0 +1,54 @@ +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Environment configuration to be used by alembic to perform database migrations.""" + +from alembic import context + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + The connection should have been passed to the config, which we use to configure the migration context. + """ + from aiida.storage.sqlite_zip.models import SqliteBase + + config = context.config + + connection = config.attributes.get('connection', None) + aiida_profile = config.attributes.get('aiida_profile', None) + on_version_apply = config.attributes.get('on_version_apply', None) + + if connection is None: + from aiida.common.exceptions import ConfigurationError + + raise ConfigurationError('An initialized connection is expected for the AiiDA online migrations.') + if aiida_profile is None: + from aiida.common.exceptions import ConfigurationError + + raise ConfigurationError('An aiida_profile is expected for the AiiDA online migrations.') + + context.configure( + connection=connection, + target_metadata=SqliteBase.metadata, + transaction_per_migration=True, + aiida_profile=aiida_profile, + on_version_apply=on_version_apply, + ) + + context.run_migrations() + + +try: + if context.is_offline_mode(): + raise NotImplementedError('This feature is not currently supported.') + + run_migrations_online() +except NameError: + # This will occur in an environment that is just compiling the documentation + pass diff --git a/src/aiida/storage/sqlite_dos/migrations/versions/main_0001_initial.py b/src/aiida/storage/sqlite_dos/migrations/versions/main_0001_initial.py new file mode 100644 index 0000000000..6af0887766 --- /dev/null +++ b/src/aiida/storage/sqlite_dos/migrations/versions/main_0001_initial.py @@ -0,0 +1,198 @@ +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Initial main branch schema + +This schema is mainly equivalent to the `main_0000_initial.py` schema of the `sqlite_zip` backend. Except that UUID +columns use ``String(32)`` instead of ``CHAR(32)``. + +Revision ID: main_0001 +Revises: +Create Date: 2024-05-29 +""" + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects.sqlite import JSON + +revision = 'main_0001' +down_revision = None +branch_labels = ('main',) +depends_on = None + + +def upgrade(): + """Migrations for the upgrade.""" + op.create_table( + 'db_dbcomputer', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('uuid', sa.String(32), nullable=False, unique=True), + sa.Column('label', sa.String(length=255), nullable=False, unique=True), + sa.Column('hostname', sa.String(length=255), nullable=False), + sa.Column('description', sa.Text(), nullable=False), + sa.Column('scheduler_type', sa.String(length=255), nullable=False), + sa.Column('transport_type', sa.String(length=255), nullable=False), + sa.Column('metadata', JSON(), nullable=False), + ) + op.create_table( + 'db_dbuser', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('email', sa.String(length=254), nullable=False, unique=True), + sa.Column('first_name', sa.String(length=254), nullable=False), + sa.Column('last_name', sa.String(length=254), nullable=False), + sa.Column('institution', sa.String(length=254), nullable=False), + ) + op.create_table( + 'db_dbauthinfo', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('aiidauser_id', sa.Integer(), nullable=False, index=True), + sa.Column('dbcomputer_id', sa.Integer(), nullable=False, index=True), + sa.Column('metadata', JSON(), nullable=False), + sa.Column('auth_params', JSON(), nullable=False), + sa.Column('enabled', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint( + ['aiidauser_id'], + ['db_dbuser.id'], + ondelete='CASCADE', + initially='DEFERRED', + deferrable=True, + ), + sa.ForeignKeyConstraint( + ['dbcomputer_id'], + ['db_dbcomputer.id'], + ondelete='CASCADE', + initially='DEFERRED', + deferrable=True, + ), + sa.UniqueConstraint('aiidauser_id', 'dbcomputer_id'), + ) + op.create_table( + 'db_dbgroup', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('uuid', sa.String(32), nullable=False, unique=True), + sa.Column('label', sa.String(length=255), nullable=False, index=True), + sa.Column('type_string', sa.String(length=255), nullable=False, index=True), + sa.Column('time', sa.DateTime(timezone=True), nullable=False), + sa.Column('description', sa.Text(), nullable=False), + sa.Column('extras', JSON(), nullable=False), + sa.Column('user_id', sa.Integer(), nullable=False, index=True), + sa.ForeignKeyConstraint( + ['user_id'], + ['db_dbuser.id'], + ondelete='CASCADE', + initially='DEFERRED', + deferrable=True, + ), + sa.UniqueConstraint('label', 'type_string'), + ) + + op.create_table( + 'db_dbnode', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('uuid', sa.String(32), nullable=False, unique=True), + sa.Column('node_type', sa.String(length=255), nullable=False, index=True), + sa.Column('process_type', sa.String(length=255), nullable=True, index=True), + sa.Column('label', sa.String(length=255), nullable=False, index=True), + sa.Column('description', sa.Text(), nullable=False), + sa.Column('ctime', sa.DateTime(timezone=True), nullable=False, index=True), + sa.Column('mtime', sa.DateTime(timezone=True), nullable=False, index=True), + sa.Column('attributes', JSON(), nullable=True), + sa.Column('extras', JSON(), nullable=True), + sa.Column('repository_metadata', JSON(), nullable=False), + sa.Column('dbcomputer_id', sa.Integer(), nullable=True, index=True), + sa.Column('user_id', sa.Integer(), nullable=False, index=True), + sa.ForeignKeyConstraint( + ['dbcomputer_id'], + ['db_dbcomputer.id'], + ondelete='RESTRICT', + initially='DEFERRED', + deferrable=True, + ), + sa.ForeignKeyConstraint( + ['user_id'], + ['db_dbuser.id'], + ondelete='restrict', + initially='DEFERRED', + deferrable=True, + ), + ) + + op.create_table( + 'db_dbcomment', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('uuid', sa.String(32), nullable=False, unique=True), + sa.Column('dbnode_id', sa.Integer(), nullable=False, index=True), + sa.Column('ctime', sa.DateTime(timezone=True), nullable=False), + sa.Column('mtime', sa.DateTime(timezone=True), nullable=False), + sa.Column('user_id', sa.Integer(), nullable=False, index=True), + sa.Column('content', sa.Text(), nullable=False), + sa.ForeignKeyConstraint( + ['dbnode_id'], + ['db_dbnode.id'], + ondelete='CASCADE', + initially='DEFERRED', + deferrable=True, + ), + sa.ForeignKeyConstraint( + ['user_id'], + ['db_dbuser.id'], + ondelete='CASCADE', + initially='DEFERRED', + deferrable=True, + ), + ) + + op.create_table( + 'db_dbgroup_dbnodes', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('dbnode_id', sa.Integer(), nullable=False, index=True), + sa.Column('dbgroup_id', sa.Integer(), nullable=False, index=True), + sa.ForeignKeyConstraint(['dbgroup_id'], ['db_dbgroup.id'], initially='DEFERRED', deferrable=True), + sa.ForeignKeyConstraint(['dbnode_id'], ['db_dbnode.id'], initially='DEFERRED', deferrable=True), + sa.UniqueConstraint('dbgroup_id', 'dbnode_id'), + ) + op.create_table( + 'db_dblink', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('input_id', sa.Integer(), nullable=False, index=True), + sa.Column('output_id', sa.Integer(), nullable=False, index=True), + sa.Column('label', sa.String(length=255), nullable=False, index=True), + sa.Column('type', sa.String(length=255), nullable=False, index=True), + sa.ForeignKeyConstraint(['input_id'], ['db_dbnode.id'], initially='DEFERRED', deferrable=True), + sa.ForeignKeyConstraint( + ['output_id'], + ['db_dbnode.id'], + ondelete='CASCADE', + initially='DEFERRED', + deferrable=True, + ), + ) + + op.create_table( + 'db_dblog', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True), + sa.Column('uuid', sa.String(32), nullable=False, unique=True), + sa.Column('time', sa.DateTime(timezone=True), nullable=False), + sa.Column('loggername', sa.String(length=255), nullable=False, index=True), + sa.Column('levelname', sa.String(length=50), nullable=False, index=True), + sa.Column('dbnode_id', sa.Integer(), nullable=False, index=True), + sa.Column('message', sa.Text(), nullable=False), + sa.Column('metadata', JSON(), nullable=False), + sa.ForeignKeyConstraint( + ['dbnode_id'], + ['db_dbnode.id'], + ondelete='CASCADE', + initially='DEFERRED', + deferrable=True, + ), + ) + + +def downgrade(): + """Migrations for the downgrade.""" + raise NotImplementedError('Downgrade of main_0000.') diff --git a/src/aiida/storage/sqlite_dos/migrations/versions/main_0002_recompute_hash_calc_job_node.py b/src/aiida/storage/sqlite_dos/migrations/versions/main_0002_recompute_hash_calc_job_node.py new file mode 100644 index 0000000000..ae70c45c4c --- /dev/null +++ b/src/aiida/storage/sqlite_dos/migrations/versions/main_0002_recompute_hash_calc_job_node.py @@ -0,0 +1,84 @@ +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Drop the hashes for all ``CalcJobNode`` instances. + +The computed hash erroneously included the hash of the file repository. This was present as of v2.0 and so all nodes +created with versions since then will have incorrect hashes. + +Revision ID: main_0002 +Revises: main_0001 +Create Date: 2024-05-29 +""" + +from __future__ import annotations + +from aiida.common.log import AIIDA_LOGGER +from alembic import op + +LOGGER = AIIDA_LOGGER.getChild(__file__) + +revision = 'main_0002' +down_revision = 'main_0001' +branch_labels = None +depends_on = None + + +def drop_hashes(conn, hash_extra_key: str, entry_point_string: str | None = None) -> None: + """Drop hashes of nodes. + + Print warning only if the DB actually contains nodes. + + :param hash_extra_key: The key in the extras used to store the hash at the time of this migration. + :param entry_point_string: Optional entry point string of a node type to narrow the subset of nodes to reset. The + value should be a complete entry point string, e.g., ``aiida.node:process.calculation.calcjob`` to drop the hash + of all ``CalcJobNode`` rows. + """ + from aiida.orm.utils.node import get_type_string_from_class + from aiida.plugins import load_entry_point_from_string + from sqlalchemy.sql import text + + if entry_point_string is not None: + entry_point = load_entry_point_from_string(entry_point_string) + node_type = get_type_string_from_class(entry_point.__module__, entry_point.__name__) + else: + node_type = None + + if node_type: + statement_count = text(f"SELECT count(*) FROM db_dbnode WHERE node_type = '{node_type}';") + statement_update = text( + f"UPDATE db_dbnode SET extras = json_remove(db_dbnode.extras, '$.{hash_extra_key}') WHERE node_type = '{node_type}';" # noqa: E501 + ) + else: + statement_count = text('SELECT count(*) FROM db_dbnode;') + statement_update = text(f"UPDATE db_dbnode SET extras = json_remove(db_dbnode.extras, '$.{hash_extra_key}');") + + node_count = conn.execute(statement_count).fetchall()[0][0] + + if node_count > 0: + if entry_point_string: + msg = f'Invalidating the hashes of certain nodes. Please run `verdi node rehash -e {entry_point_string}`.' + else: + msg = 'Invalidating the hashes of all nodes. Please run `verdi node rehash`.' + LOGGER.warning(msg) + + conn.execute(statement_update) + + +def upgrade(): + """Migrations for the upgrade.""" + drop_hashes( + op.get_bind(), hash_extra_key='_aiida_hash', entry_point_string='aiida.node:process.calculation.calcjob' + ) + + +def downgrade(): + """Migrations for the downgrade.""" + drop_hashes( + op.get_bind(), hash_extra_key='_aiida_hash', entry_point_string='aiida.node:process.calculation.calcjob' + ) diff --git a/src/aiida/storage/sqlite_zip/migrations/env.py b/src/aiida/storage/sqlite_zip/migrations/env.py index 73abbd917b..5691a95568 100644 --- a/src/aiida/storage/sqlite_zip/migrations/env.py +++ b/src/aiida/storage/sqlite_zip/migrations/env.py @@ -6,7 +6,7 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### -"""Upper level SQLAlchemy migration funcitons.""" +"""Upper level SQLAlchemy migration functions.""" from alembic import context diff --git a/tests/cmdline/commands/test_status.py b/tests/cmdline/commands/test_status.py index d02aff07d2..a4b81dbfc6 100644 --- a/tests/cmdline/commands/test_status.py +++ b/tests/cmdline/commands/test_status.py @@ -68,6 +68,7 @@ def test_storage_unable_to_connect(run_cli_command): profile._attributes['storage']['config']['database_port'] = old_port +@pytest.mark.requires_psql def test_storage_incompatible(run_cli_command, monkeypatch): """Test `verdi status` when storage schema version is incompatible with that of the code.""" @@ -83,6 +84,7 @@ def storage_cls(*args, **kwargs): assert result.exit_code is ExitCode.CRITICAL +@pytest.mark.requires_psql def test_storage_corrupted(run_cli_command, monkeypatch): """Test `verdi status` when the storage is found to be corrupt (e.g. non-matching repository UUIDs).""" diff --git a/tests/storage/sqlite_dos/migrations/conftest.py b/tests/storage/sqlite_dos/migrations/conftest.py new file mode 100644 index 0000000000..bba974705f --- /dev/null +++ b/tests/storage/sqlite_dos/migrations/conftest.py @@ -0,0 +1,76 @@ +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Tests for the migration engine (Alembic) as well as for the AiiDA migrations for SQLAlchemy.""" + +import collections +import pathlib + +import pytest +from aiida.manage.configuration import Profile +from aiida.storage.sqlite_zip.utils import create_sqla_engine +from sqlalchemy import text + + +@pytest.fixture +def uninitialised_profile(tmp_path): + """Create a profile attached to an empty database and repository folder.""" + + yield Profile( + 'test_migrate', + { + 'test_profile': True, + 'storage': { + 'backend': 'core.sqlite_dos', + 'config': { + 'filepath': str(tmp_path), + }, + }, + 'process_control': {'backend': 'null', 'config': {}}, + }, + ) + + +def _generate_schema(profile: Profile) -> dict: + """Create a dict containing indexes of AiiDA tables.""" + with create_sqla_engine(pathlib.Path(profile.storage_config['filepath']) / 'database.sqlite').connect() as conn: + data = collections.defaultdict(list) + for type_, name, tbl_name, rootpage, sql in conn.execute(text('SELECT * FROM sqlite_master;')): + lines_sql = sql.strip().split('\n') if sql else [] + + # For an unknown reason, the ``sql`` is not deterministic as the order of the ``CONSTRAINTS`` rules seem to + # be in random order. To make sure they are always in the same order, they have to be ordered manually. + if type_ == 'table': + lines_constraints = [] + lines_other = [] + for line in lines_sql: + stripped = line.strip().strip(',') + + if 'CONSTRAINT' in stripped: + lines_constraints.append(stripped) + else: + lines_other.append(stripped) + + lines_sql = lines_other + sorted(lines_constraints) + data[type_].append((name, tbl_name, lines_sql)) + + for key in data.keys(): + data[key] = sorted(data[key], key=lambda v: v[0]) + + return dict(data) + + +@pytest.fixture +def reflect_schema(): + """A fixture to generate the schema of AiiDA tables for a given profile.""" + + def factory(profile: Profile) -> dict: + """Create a dict containing all tables and fields of AiiDA tables.""" + return _generate_schema(profile) + + return factory diff --git a/tests/storage/sqlite_dos/migrations/test_all_schema.py b/tests/storage/sqlite_dos/migrations/test_all_schema.py new file mode 100644 index 0000000000..51351f918e --- /dev/null +++ b/tests/storage/sqlite_dos/migrations/test_all_schema.py @@ -0,0 +1,49 @@ +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Basic tests for all migrations""" + +import pytest +from aiida.storage.sqlite_dos.backend import SqliteDosMigrator + + +@pytest.mark.parametrize('version', list(v for v in SqliteDosMigrator.get_schema_versions() if v.startswith('main'))) +def test_main(version, uninitialised_profile, reflect_schema, data_regression): + """Test that the migrations produce the expected database schema.""" + migrator = SqliteDosMigrator(uninitialised_profile) + migrator.migrate_up(f'main@{version}') + data_regression.check(reflect_schema(uninitialised_profile)) + + +def test_main_initialized(uninitialised_profile): + """Test that ``migrate`` properly stamps the new schema version when updating database with existing schema.""" + migrator = SqliteDosMigrator(uninitialised_profile) + + # Initialize database at first version of main branch + migrator.migrate_up('main@main_0001') + assert migrator.get_schema_version_profile() == 'main_0001' + migrator.close() + + # Reinitialize the migrator to make sure we are fetching actual state of database and not in-memory state and then + # migrate to head schema version. + migrator = SqliteDosMigrator(uninitialised_profile) + migrator.migrate() + migrator.close() + + # Reinitialize the migrator to make sure we are fetching actual state of database and not in-memory state and then + # assert that the database version is properly set to the head schema version + migrator = SqliteDosMigrator(uninitialised_profile) + assert migrator.get_schema_version_profile() == migrator.get_schema_version_head() + + +def test_head_vs_orm(uninitialised_profile, reflect_schema, data_regression): + """Test that the migrations produce the same database schema as the models.""" + migrator = SqliteDosMigrator(uninitialised_profile) + head_version = migrator.get_schema_version_head() + migrator.initialise() + data_regression.check(reflect_schema(uninitialised_profile), basename=f'test_head_vs_orm_{head_version}_') diff --git a/tests/storage/sqlite_dos/migrations/test_all_schema/test_head_vs_orm_main_0002_.yml b/tests/storage/sqlite_dos/migrations/test_all_schema/test_head_vs_orm_main_0002_.yml new file mode 100644 index 0000000000..b70a576550 --- /dev/null +++ b/tests/storage/sqlite_dos/migrations/test_all_schema/test_head_vs_orm_main_0002_.yml @@ -0,0 +1,269 @@ +index: +- - ix_db_dbauthinfo_db_dbauthinfo_aiidauser_id + - db_dbauthinfo + - - CREATE INDEX ix_db_dbauthinfo_db_dbauthinfo_aiidauser_id ON db_dbauthinfo (aiidauser_id) +- - ix_db_dbauthinfo_db_dbauthinfo_dbcomputer_id + - db_dbauthinfo + - - CREATE INDEX ix_db_dbauthinfo_db_dbauthinfo_dbcomputer_id ON db_dbauthinfo (dbcomputer_id) +- - ix_db_dbcomment_db_dbcomment_dbnode_id + - db_dbcomment + - - CREATE INDEX ix_db_dbcomment_db_dbcomment_dbnode_id ON db_dbcomment (dbnode_id) +- - ix_db_dbcomment_db_dbcomment_user_id + - db_dbcomment + - - CREATE INDEX ix_db_dbcomment_db_dbcomment_user_id ON db_dbcomment (user_id) +- - ix_db_dbgroup_db_dbgroup_label + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_label ON db_dbgroup (label) +- - ix_db_dbgroup_db_dbgroup_type_string + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_type_string ON db_dbgroup (type_string) +- - ix_db_dbgroup_db_dbgroup_user_id + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_user_id ON db_dbgroup (user_id) +- - ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbgroup_id + - db_dbgroup_dbnodes + - - CREATE INDEX ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbgroup_id ON db_dbgroup_dbnodes + (dbgroup_id) +- - ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbnode_id + - db_dbgroup_dbnodes + - - CREATE INDEX ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbnode_id ON db_dbgroup_dbnodes + (dbnode_id) +- - ix_db_dblink_db_dblink_input_id + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_input_id ON db_dblink (input_id) +- - ix_db_dblink_db_dblink_label + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_label ON db_dblink (label) +- - ix_db_dblink_db_dblink_output_id + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_output_id ON db_dblink (output_id) +- - ix_db_dblink_db_dblink_type + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_type ON db_dblink (type) +- - ix_db_dblog_db_dblog_dbnode_id + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_dbnode_id ON db_dblog (dbnode_id) +- - ix_db_dblog_db_dblog_levelname + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_levelname ON db_dblog (levelname) +- - ix_db_dblog_db_dblog_loggername + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_loggername ON db_dblog (loggername) +- - ix_db_dbnode_db_dbnode_ctime + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_ctime ON db_dbnode (ctime) +- - ix_db_dbnode_db_dbnode_dbcomputer_id + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_dbcomputer_id ON db_dbnode (dbcomputer_id) +- - ix_db_dbnode_db_dbnode_label + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_label ON db_dbnode (label) +- - ix_db_dbnode_db_dbnode_mtime + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_mtime ON db_dbnode (mtime) +- - ix_db_dbnode_db_dbnode_node_type + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_node_type ON db_dbnode (node_type) +- - ix_db_dbnode_db_dbnode_process_type + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_process_type ON db_dbnode (process_type) +- - ix_db_dbnode_db_dbnode_user_id + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_user_id ON db_dbnode (user_id) +- - sqlite_autoindex_alembic_version_1 + - alembic_version + - [] +- - sqlite_autoindex_db_dbauthinfo_1 + - db_dbauthinfo + - [] +- - sqlite_autoindex_db_dbcomment_1 + - db_dbcomment + - [] +- - sqlite_autoindex_db_dbcomputer_1 + - db_dbcomputer + - [] +- - sqlite_autoindex_db_dbcomputer_2 + - db_dbcomputer + - [] +- - sqlite_autoindex_db_dbgroup_1 + - db_dbgroup + - [] +- - sqlite_autoindex_db_dbgroup_2 + - db_dbgroup + - [] +- - sqlite_autoindex_db_dbgroup_dbnodes_1 + - db_dbgroup_dbnodes + - [] +- - sqlite_autoindex_db_dblog_1 + - db_dblog + - [] +- - sqlite_autoindex_db_dbnode_1 + - db_dbnode + - [] +- - sqlite_autoindex_db_dbsetting_1 + - db_dbsetting + - [] +- - sqlite_autoindex_db_dbuser_1 + - db_dbuser + - [] +table: +- - alembic_version + - alembic_version + - - CREATE TABLE alembic_version ( + - version_num VARCHAR(32) NOT NULL + - ) + - CONSTRAINT alembic_version_pkc PRIMARY KEY (version_num) +- - db_dbauthinfo + - db_dbauthinfo + - - CREATE TABLE db_dbauthinfo ( + - id INTEGER NOT NULL + - aiidauser_id INTEGER NOT NULL + - dbcomputer_id INTEGER NOT NULL + - metadata JSON NOT NULL + - auth_params JSON NOT NULL + - enabled BOOLEAN NOT NULL + - ) + - CONSTRAINT db_dbauthinfo_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbauthinfo_aiidauser_id_db_dbuser FOREIGN KEY(aiidauser_id) + REFERENCES db_dbuser (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbauthinfo_dbcomputer_id_db_dbcomputer FOREIGN KEY(dbcomputer_id) + REFERENCES db_dbcomputer (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbauthinfo_aiidauser_id_dbcomputer_id UNIQUE (aiidauser_id, + dbcomputer_id) +- - db_dbcomment + - db_dbcomment + - - CREATE TABLE db_dbcomment ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - dbnode_id INTEGER NOT NULL + - ctime DATETIME NOT NULL + - mtime DATETIME NOT NULL + - user_id INTEGER NOT NULL + - content TEXT NOT NULL + - ) + - CONSTRAINT db_dbcomment_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbcomment_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbcomment_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES + db_dbuser (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbcomment_uuid UNIQUE (uuid) +- - db_dbcomputer + - db_dbcomputer + - - CREATE TABLE db_dbcomputer ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - label VARCHAR(255) NOT NULL + - hostname VARCHAR(255) NOT NULL + - description TEXT NOT NULL + - scheduler_type VARCHAR(255) NOT NULL + - transport_type VARCHAR(255) NOT NULL + - metadata JSON NOT NULL + - ) + - CONSTRAINT db_dbcomputer_pkey PRIMARY KEY (id) + - CONSTRAINT uq_db_dbcomputer_label UNIQUE (label) + - CONSTRAINT uq_db_dbcomputer_uuid UNIQUE (uuid) +- - db_dbgroup + - db_dbgroup + - - CREATE TABLE db_dbgroup ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - label VARCHAR(255) NOT NULL + - type_string VARCHAR(255) NOT NULL + - time DATETIME NOT NULL + - description TEXT NOT NULL + - extras JSON NOT NULL + - user_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbgroup_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbgroup_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES db_dbuser + (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbgroup_label_type_string UNIQUE (label, type_string) + - CONSTRAINT uq_db_dbgroup_uuid UNIQUE (uuid) +- - db_dbgroup_dbnodes + - db_dbgroup_dbnodes + - - CREATE TABLE db_dbgroup_dbnodes ( + - id INTEGER NOT NULL + - dbnode_id INTEGER NOT NULL + - dbgroup_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbgroup_dbnodes_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbgroup_dbnodes_dbgroup_id_db_dbgroup FOREIGN KEY(dbgroup_id) + REFERENCES db_dbgroup (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbgroup_dbnodes_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) + REFERENCES db_dbnode (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbgroup_dbnodes_dbgroup_id_dbnode_id UNIQUE (dbgroup_id, dbnode_id) +- - db_dblink + - db_dblink + - - CREATE TABLE db_dblink ( + - id INTEGER NOT NULL + - input_id INTEGER NOT NULL + - output_id INTEGER NOT NULL + - label VARCHAR(255) NOT NULL + - type VARCHAR(255) NOT NULL + - ) + - CONSTRAINT db_dblink_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dblink_input_id_db_dbnode FOREIGN KEY(input_id) REFERENCES + db_dbnode (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dblink_output_id_db_dbnode FOREIGN KEY(output_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED +- - db_dblog + - db_dblog + - - CREATE TABLE db_dblog ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - time DATETIME NOT NULL + - loggername VARCHAR(255) NOT NULL + - levelname VARCHAR(50) NOT NULL + - dbnode_id INTEGER NOT NULL + - message TEXT NOT NULL + - metadata JSON NOT NULL + - ) + - CONSTRAINT db_dblog_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dblog_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dblog_uuid UNIQUE (uuid) +- - db_dbnode + - db_dbnode + - - CREATE TABLE db_dbnode ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - node_type VARCHAR(255) NOT NULL + - process_type VARCHAR(255) + - label VARCHAR(255) NOT NULL + - description TEXT NOT NULL + - ctime DATETIME NOT NULL + - mtime DATETIME NOT NULL + - attributes JSON + - extras JSON + - repository_metadata JSON NOT NULL + - dbcomputer_id INTEGER + - user_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbnode_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbnode_dbcomputer_id_db_dbcomputer FOREIGN KEY(dbcomputer_id) + REFERENCES db_dbcomputer (id) ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbnode_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES db_dbuser + (id) ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbnode_uuid UNIQUE (uuid) +- - db_dbsetting + - db_dbsetting + - - CREATE TABLE db_dbsetting ( + - id INTEGER NOT NULL + - '"key" VARCHAR(1024) NOT NULL' + - val JSON + - description TEXT NOT NULL + - time DATETIME NOT NULL + - ) + - CONSTRAINT db_dbsetting_pkey PRIMARY KEY (id) + - CONSTRAINT uq_db_dbsetting_key UNIQUE ("key") +- - db_dbuser + - db_dbuser + - - CREATE TABLE db_dbuser ( + - id INTEGER NOT NULL + - email VARCHAR(254) NOT NULL + - first_name VARCHAR(254) NOT NULL + - last_name VARCHAR(254) NOT NULL + - institution VARCHAR(254) NOT NULL + - ) + - CONSTRAINT db_dbuser_pkey PRIMARY KEY (id) + - CONSTRAINT uq_db_dbuser_email UNIQUE (email) diff --git a/tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0001_.yml b/tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0001_.yml new file mode 100644 index 0000000000..3b49696512 --- /dev/null +++ b/tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0001_.yml @@ -0,0 +1,255 @@ +index: +- - ix_db_dbauthinfo_db_dbauthinfo_aiidauser_id + - db_dbauthinfo + - - CREATE INDEX ix_db_dbauthinfo_db_dbauthinfo_aiidauser_id ON db_dbauthinfo (aiidauser_id) +- - ix_db_dbauthinfo_db_dbauthinfo_dbcomputer_id + - db_dbauthinfo + - - CREATE INDEX ix_db_dbauthinfo_db_dbauthinfo_dbcomputer_id ON db_dbauthinfo (dbcomputer_id) +- - ix_db_dbcomment_db_dbcomment_dbnode_id + - db_dbcomment + - - CREATE INDEX ix_db_dbcomment_db_dbcomment_dbnode_id ON db_dbcomment (dbnode_id) +- - ix_db_dbcomment_db_dbcomment_user_id + - db_dbcomment + - - CREATE INDEX ix_db_dbcomment_db_dbcomment_user_id ON db_dbcomment (user_id) +- - ix_db_dbgroup_db_dbgroup_label + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_label ON db_dbgroup (label) +- - ix_db_dbgroup_db_dbgroup_type_string + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_type_string ON db_dbgroup (type_string) +- - ix_db_dbgroup_db_dbgroup_user_id + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_user_id ON db_dbgroup (user_id) +- - ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbgroup_id + - db_dbgroup_dbnodes + - - CREATE INDEX ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbgroup_id ON db_dbgroup_dbnodes + (dbgroup_id) +- - ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbnode_id + - db_dbgroup_dbnodes + - - CREATE INDEX ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbnode_id ON db_dbgroup_dbnodes + (dbnode_id) +- - ix_db_dblink_db_dblink_input_id + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_input_id ON db_dblink (input_id) +- - ix_db_dblink_db_dblink_label + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_label ON db_dblink (label) +- - ix_db_dblink_db_dblink_output_id + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_output_id ON db_dblink (output_id) +- - ix_db_dblink_db_dblink_type + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_type ON db_dblink (type) +- - ix_db_dblog_db_dblog_dbnode_id + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_dbnode_id ON db_dblog (dbnode_id) +- - ix_db_dblog_db_dblog_levelname + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_levelname ON db_dblog (levelname) +- - ix_db_dblog_db_dblog_loggername + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_loggername ON db_dblog (loggername) +- - ix_db_dbnode_db_dbnode_ctime + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_ctime ON db_dbnode (ctime) +- - ix_db_dbnode_db_dbnode_dbcomputer_id + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_dbcomputer_id ON db_dbnode (dbcomputer_id) +- - ix_db_dbnode_db_dbnode_label + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_label ON db_dbnode (label) +- - ix_db_dbnode_db_dbnode_mtime + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_mtime ON db_dbnode (mtime) +- - ix_db_dbnode_db_dbnode_node_type + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_node_type ON db_dbnode (node_type) +- - ix_db_dbnode_db_dbnode_process_type + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_process_type ON db_dbnode (process_type) +- - ix_db_dbnode_db_dbnode_user_id + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_user_id ON db_dbnode (user_id) +- - sqlite_autoindex_alembic_version_1 + - alembic_version + - [] +- - sqlite_autoindex_db_dbauthinfo_1 + - db_dbauthinfo + - [] +- - sqlite_autoindex_db_dbcomment_1 + - db_dbcomment + - [] +- - sqlite_autoindex_db_dbcomputer_1 + - db_dbcomputer + - [] +- - sqlite_autoindex_db_dbcomputer_2 + - db_dbcomputer + - [] +- - sqlite_autoindex_db_dbgroup_1 + - db_dbgroup + - [] +- - sqlite_autoindex_db_dbgroup_2 + - db_dbgroup + - [] +- - sqlite_autoindex_db_dbgroup_dbnodes_1 + - db_dbgroup_dbnodes + - [] +- - sqlite_autoindex_db_dblog_1 + - db_dblog + - [] +- - sqlite_autoindex_db_dbnode_1 + - db_dbnode + - [] +- - sqlite_autoindex_db_dbuser_1 + - db_dbuser + - [] +table: +- - alembic_version + - alembic_version + - - CREATE TABLE alembic_version ( + - version_num VARCHAR(32) NOT NULL + - ) + - CONSTRAINT alembic_version_pkc PRIMARY KEY (version_num) +- - db_dbauthinfo + - db_dbauthinfo + - - CREATE TABLE db_dbauthinfo ( + - id INTEGER NOT NULL + - aiidauser_id INTEGER NOT NULL + - dbcomputer_id INTEGER NOT NULL + - metadata JSON NOT NULL + - auth_params JSON NOT NULL + - enabled BOOLEAN NOT NULL + - ) + - CONSTRAINT db_dbauthinfo_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbauthinfo_aiidauser_id_db_dbuser FOREIGN KEY(aiidauser_id) + REFERENCES db_dbuser (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbauthinfo_dbcomputer_id_db_dbcomputer FOREIGN KEY(dbcomputer_id) + REFERENCES db_dbcomputer (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbauthinfo_aiidauser_id_dbcomputer_id UNIQUE (aiidauser_id, + dbcomputer_id) +- - db_dbcomment + - db_dbcomment + - - CREATE TABLE db_dbcomment ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - dbnode_id INTEGER NOT NULL + - ctime DATETIME NOT NULL + - mtime DATETIME NOT NULL + - user_id INTEGER NOT NULL + - content TEXT NOT NULL + - ) + - CONSTRAINT db_dbcomment_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbcomment_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbcomment_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES + db_dbuser (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbcomment_uuid UNIQUE (uuid) +- - db_dbcomputer + - db_dbcomputer + - - CREATE TABLE db_dbcomputer ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - label VARCHAR(255) NOT NULL + - hostname VARCHAR(255) NOT NULL + - description TEXT NOT NULL + - scheduler_type VARCHAR(255) NOT NULL + - transport_type VARCHAR(255) NOT NULL + - metadata JSON NOT NULL + - ) + - CONSTRAINT db_dbcomputer_pkey PRIMARY KEY (id) + - CONSTRAINT uq_db_dbcomputer_label UNIQUE (label) + - CONSTRAINT uq_db_dbcomputer_uuid UNIQUE (uuid) +- - db_dbgroup + - db_dbgroup + - - CREATE TABLE db_dbgroup ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - label VARCHAR(255) NOT NULL + - type_string VARCHAR(255) NOT NULL + - time DATETIME NOT NULL + - description TEXT NOT NULL + - extras JSON NOT NULL + - user_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbgroup_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbgroup_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES db_dbuser + (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbgroup_label_type_string UNIQUE (label, type_string) + - CONSTRAINT uq_db_dbgroup_uuid UNIQUE (uuid) +- - db_dbgroup_dbnodes + - db_dbgroup_dbnodes + - - CREATE TABLE db_dbgroup_dbnodes ( + - id INTEGER NOT NULL + - dbnode_id INTEGER NOT NULL + - dbgroup_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbgroup_dbnodes_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbgroup_dbnodes_dbgroup_id_db_dbgroup FOREIGN KEY(dbgroup_id) + REFERENCES db_dbgroup (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbgroup_dbnodes_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) + REFERENCES db_dbnode (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbgroup_dbnodes_dbgroup_id_dbnode_id UNIQUE (dbgroup_id, dbnode_id) +- - db_dblink + - db_dblink + - - CREATE TABLE db_dblink ( + - id INTEGER NOT NULL + - input_id INTEGER NOT NULL + - output_id INTEGER NOT NULL + - label VARCHAR(255) NOT NULL + - type VARCHAR(255) NOT NULL + - ) + - CONSTRAINT db_dblink_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dblink_input_id_db_dbnode FOREIGN KEY(input_id) REFERENCES + db_dbnode (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dblink_output_id_db_dbnode FOREIGN KEY(output_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED +- - db_dblog + - db_dblog + - - CREATE TABLE db_dblog ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - time DATETIME NOT NULL + - loggername VARCHAR(255) NOT NULL + - levelname VARCHAR(50) NOT NULL + - dbnode_id INTEGER NOT NULL + - message TEXT NOT NULL + - metadata JSON NOT NULL + - ) + - CONSTRAINT db_dblog_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dblog_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dblog_uuid UNIQUE (uuid) +- - db_dbnode + - db_dbnode + - - CREATE TABLE db_dbnode ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - node_type VARCHAR(255) NOT NULL + - process_type VARCHAR(255) + - label VARCHAR(255) NOT NULL + - description TEXT NOT NULL + - ctime DATETIME NOT NULL + - mtime DATETIME NOT NULL + - attributes JSON + - extras JSON + - repository_metadata JSON NOT NULL + - dbcomputer_id INTEGER + - user_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbnode_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbnode_dbcomputer_id_db_dbcomputer FOREIGN KEY(dbcomputer_id) + REFERENCES db_dbcomputer (id) ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbnode_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES db_dbuser + (id) ON DELETE restrict DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbnode_uuid UNIQUE (uuid) +- - db_dbuser + - db_dbuser + - - CREATE TABLE db_dbuser ( + - id INTEGER NOT NULL + - email VARCHAR(254) NOT NULL + - first_name VARCHAR(254) NOT NULL + - last_name VARCHAR(254) NOT NULL + - institution VARCHAR(254) NOT NULL + - ) + - CONSTRAINT db_dbuser_pkey PRIMARY KEY (id) + - CONSTRAINT uq_db_dbuser_email UNIQUE (email) diff --git a/tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0002_.yml b/tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0002_.yml new file mode 100644 index 0000000000..3b49696512 --- /dev/null +++ b/tests/storage/sqlite_dos/migrations/test_all_schema/test_main_main_0002_.yml @@ -0,0 +1,255 @@ +index: +- - ix_db_dbauthinfo_db_dbauthinfo_aiidauser_id + - db_dbauthinfo + - - CREATE INDEX ix_db_dbauthinfo_db_dbauthinfo_aiidauser_id ON db_dbauthinfo (aiidauser_id) +- - ix_db_dbauthinfo_db_dbauthinfo_dbcomputer_id + - db_dbauthinfo + - - CREATE INDEX ix_db_dbauthinfo_db_dbauthinfo_dbcomputer_id ON db_dbauthinfo (dbcomputer_id) +- - ix_db_dbcomment_db_dbcomment_dbnode_id + - db_dbcomment + - - CREATE INDEX ix_db_dbcomment_db_dbcomment_dbnode_id ON db_dbcomment (dbnode_id) +- - ix_db_dbcomment_db_dbcomment_user_id + - db_dbcomment + - - CREATE INDEX ix_db_dbcomment_db_dbcomment_user_id ON db_dbcomment (user_id) +- - ix_db_dbgroup_db_dbgroup_label + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_label ON db_dbgroup (label) +- - ix_db_dbgroup_db_dbgroup_type_string + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_type_string ON db_dbgroup (type_string) +- - ix_db_dbgroup_db_dbgroup_user_id + - db_dbgroup + - - CREATE INDEX ix_db_dbgroup_db_dbgroup_user_id ON db_dbgroup (user_id) +- - ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbgroup_id + - db_dbgroup_dbnodes + - - CREATE INDEX ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbgroup_id ON db_dbgroup_dbnodes + (dbgroup_id) +- - ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbnode_id + - db_dbgroup_dbnodes + - - CREATE INDEX ix_db_dbgroup_dbnodes_db_dbgroup_dbnodes_dbnode_id ON db_dbgroup_dbnodes + (dbnode_id) +- - ix_db_dblink_db_dblink_input_id + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_input_id ON db_dblink (input_id) +- - ix_db_dblink_db_dblink_label + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_label ON db_dblink (label) +- - ix_db_dblink_db_dblink_output_id + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_output_id ON db_dblink (output_id) +- - ix_db_dblink_db_dblink_type + - db_dblink + - - CREATE INDEX ix_db_dblink_db_dblink_type ON db_dblink (type) +- - ix_db_dblog_db_dblog_dbnode_id + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_dbnode_id ON db_dblog (dbnode_id) +- - ix_db_dblog_db_dblog_levelname + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_levelname ON db_dblog (levelname) +- - ix_db_dblog_db_dblog_loggername + - db_dblog + - - CREATE INDEX ix_db_dblog_db_dblog_loggername ON db_dblog (loggername) +- - ix_db_dbnode_db_dbnode_ctime + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_ctime ON db_dbnode (ctime) +- - ix_db_dbnode_db_dbnode_dbcomputer_id + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_dbcomputer_id ON db_dbnode (dbcomputer_id) +- - ix_db_dbnode_db_dbnode_label + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_label ON db_dbnode (label) +- - ix_db_dbnode_db_dbnode_mtime + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_mtime ON db_dbnode (mtime) +- - ix_db_dbnode_db_dbnode_node_type + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_node_type ON db_dbnode (node_type) +- - ix_db_dbnode_db_dbnode_process_type + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_process_type ON db_dbnode (process_type) +- - ix_db_dbnode_db_dbnode_user_id + - db_dbnode + - - CREATE INDEX ix_db_dbnode_db_dbnode_user_id ON db_dbnode (user_id) +- - sqlite_autoindex_alembic_version_1 + - alembic_version + - [] +- - sqlite_autoindex_db_dbauthinfo_1 + - db_dbauthinfo + - [] +- - sqlite_autoindex_db_dbcomment_1 + - db_dbcomment + - [] +- - sqlite_autoindex_db_dbcomputer_1 + - db_dbcomputer + - [] +- - sqlite_autoindex_db_dbcomputer_2 + - db_dbcomputer + - [] +- - sqlite_autoindex_db_dbgroup_1 + - db_dbgroup + - [] +- - sqlite_autoindex_db_dbgroup_2 + - db_dbgroup + - [] +- - sqlite_autoindex_db_dbgroup_dbnodes_1 + - db_dbgroup_dbnodes + - [] +- - sqlite_autoindex_db_dblog_1 + - db_dblog + - [] +- - sqlite_autoindex_db_dbnode_1 + - db_dbnode + - [] +- - sqlite_autoindex_db_dbuser_1 + - db_dbuser + - [] +table: +- - alembic_version + - alembic_version + - - CREATE TABLE alembic_version ( + - version_num VARCHAR(32) NOT NULL + - ) + - CONSTRAINT alembic_version_pkc PRIMARY KEY (version_num) +- - db_dbauthinfo + - db_dbauthinfo + - - CREATE TABLE db_dbauthinfo ( + - id INTEGER NOT NULL + - aiidauser_id INTEGER NOT NULL + - dbcomputer_id INTEGER NOT NULL + - metadata JSON NOT NULL + - auth_params JSON NOT NULL + - enabled BOOLEAN NOT NULL + - ) + - CONSTRAINT db_dbauthinfo_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbauthinfo_aiidauser_id_db_dbuser FOREIGN KEY(aiidauser_id) + REFERENCES db_dbuser (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbauthinfo_dbcomputer_id_db_dbcomputer FOREIGN KEY(dbcomputer_id) + REFERENCES db_dbcomputer (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbauthinfo_aiidauser_id_dbcomputer_id UNIQUE (aiidauser_id, + dbcomputer_id) +- - db_dbcomment + - db_dbcomment + - - CREATE TABLE db_dbcomment ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - dbnode_id INTEGER NOT NULL + - ctime DATETIME NOT NULL + - mtime DATETIME NOT NULL + - user_id INTEGER NOT NULL + - content TEXT NOT NULL + - ) + - CONSTRAINT db_dbcomment_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbcomment_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbcomment_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES + db_dbuser (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbcomment_uuid UNIQUE (uuid) +- - db_dbcomputer + - db_dbcomputer + - - CREATE TABLE db_dbcomputer ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - label VARCHAR(255) NOT NULL + - hostname VARCHAR(255) NOT NULL + - description TEXT NOT NULL + - scheduler_type VARCHAR(255) NOT NULL + - transport_type VARCHAR(255) NOT NULL + - metadata JSON NOT NULL + - ) + - CONSTRAINT db_dbcomputer_pkey PRIMARY KEY (id) + - CONSTRAINT uq_db_dbcomputer_label UNIQUE (label) + - CONSTRAINT uq_db_dbcomputer_uuid UNIQUE (uuid) +- - db_dbgroup + - db_dbgroup + - - CREATE TABLE db_dbgroup ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - label VARCHAR(255) NOT NULL + - type_string VARCHAR(255) NOT NULL + - time DATETIME NOT NULL + - description TEXT NOT NULL + - extras JSON NOT NULL + - user_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbgroup_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbgroup_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES db_dbuser + (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbgroup_label_type_string UNIQUE (label, type_string) + - CONSTRAINT uq_db_dbgroup_uuid UNIQUE (uuid) +- - db_dbgroup_dbnodes + - db_dbgroup_dbnodes + - - CREATE TABLE db_dbgroup_dbnodes ( + - id INTEGER NOT NULL + - dbnode_id INTEGER NOT NULL + - dbgroup_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbgroup_dbnodes_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbgroup_dbnodes_dbgroup_id_db_dbgroup FOREIGN KEY(dbgroup_id) + REFERENCES db_dbgroup (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbgroup_dbnodes_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) + REFERENCES db_dbnode (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbgroup_dbnodes_dbgroup_id_dbnode_id UNIQUE (dbgroup_id, dbnode_id) +- - db_dblink + - db_dblink + - - CREATE TABLE db_dblink ( + - id INTEGER NOT NULL + - input_id INTEGER NOT NULL + - output_id INTEGER NOT NULL + - label VARCHAR(255) NOT NULL + - type VARCHAR(255) NOT NULL + - ) + - CONSTRAINT db_dblink_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dblink_input_id_db_dbnode FOREIGN KEY(input_id) REFERENCES + db_dbnode (id) DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dblink_output_id_db_dbnode FOREIGN KEY(output_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED +- - db_dblog + - db_dblog + - - CREATE TABLE db_dblog ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - time DATETIME NOT NULL + - loggername VARCHAR(255) NOT NULL + - levelname VARCHAR(50) NOT NULL + - dbnode_id INTEGER NOT NULL + - message TEXT NOT NULL + - metadata JSON NOT NULL + - ) + - CONSTRAINT db_dblog_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dblog_dbnode_id_db_dbnode FOREIGN KEY(dbnode_id) REFERENCES + db_dbnode (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dblog_uuid UNIQUE (uuid) +- - db_dbnode + - db_dbnode + - - CREATE TABLE db_dbnode ( + - id INTEGER NOT NULL + - uuid VARCHAR(32) NOT NULL + - node_type VARCHAR(255) NOT NULL + - process_type VARCHAR(255) + - label VARCHAR(255) NOT NULL + - description TEXT NOT NULL + - ctime DATETIME NOT NULL + - mtime DATETIME NOT NULL + - attributes JSON + - extras JSON + - repository_metadata JSON NOT NULL + - dbcomputer_id INTEGER + - user_id INTEGER NOT NULL + - ) + - CONSTRAINT db_dbnode_pkey PRIMARY KEY (id) + - CONSTRAINT fk_db_dbnode_dbcomputer_id_db_dbcomputer FOREIGN KEY(dbcomputer_id) + REFERENCES db_dbcomputer (id) ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT fk_db_dbnode_user_id_db_dbuser FOREIGN KEY(user_id) REFERENCES db_dbuser + (id) ON DELETE restrict DEFERRABLE INITIALLY DEFERRED + - CONSTRAINT uq_db_dbnode_uuid UNIQUE (uuid) +- - db_dbuser + - db_dbuser + - - CREATE TABLE db_dbuser ( + - id INTEGER NOT NULL + - email VARCHAR(254) NOT NULL + - first_name VARCHAR(254) NOT NULL + - last_name VARCHAR(254) NOT NULL + - institution VARCHAR(254) NOT NULL + - ) + - CONSTRAINT db_dbuser_pkey PRIMARY KEY (id) + - CONSTRAINT uq_db_dbuser_email UNIQUE (email) From 5c1f5d6fcb3dcbe51fe7c8eda88daf123f191635 Mon Sep 17 00:00:00 2001 From: Ali Khosravi Date: Fri, 5 Jul 2024 10:15:23 +0200 Subject: [PATCH 48/82] Docs: Add `robots.txt` to only allow indexing of `latest` and `stable` (#6517) Currently, all versions of the documentation are indexed with the result that google searches come up with very outdated versions and the latest version is almost impossible to find. The `robots.txt` now disallows any path from being indexed except for the `latest` and `stable` versions of the documentation. --- docs/source/conf.py | 3 +++ docs/source/robots.txt | 4 ++++ 2 files changed, 7 insertions(+) create mode 100644 docs/source/robots.txt diff --git a/docs/source/conf.py b/docs/source/conf.py index d017051cb3..03922a0efa 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -171,6 +171,9 @@ # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = False +# This is to tell search engines to index only stable and latest version +html_extra_path = ['robots.txt'] + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] diff --git a/docs/source/robots.txt b/docs/source/robots.txt new file mode 100644 index 0000000000..f94eda030d --- /dev/null +++ b/docs/source/robots.txt @@ -0,0 +1,4 @@ +User-agent: * +Allow: /projects/aiida-core/en/latest/ +Allow: /projects/aiida-core/en/stable/ +Disallow: / From a3f734d8a89715ea27f273df64e130cac89957a6 Mon Sep 17 00:00:00 2001 From: Julian Geiger Date: Fri, 5 Jul 2024 13:14:35 +0200 Subject: [PATCH 49/82] Docs: Update `redirects.txt` for installation pages (#6509) --- docs/source/redirects.txt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/source/redirects.txt b/docs/source/redirects.txt index 50f8ed2029..594ba48d66 100644 --- a/docs/source/redirects.txt +++ b/docs/source/redirects.txt @@ -13,6 +13,13 @@ install/installation.rst installation/index.rst install/configuration.rst howto/installation.rst install/updating_installation.rst howto/installation.rst install/troubleshooting.rst installation/troubleshooting.rst +intro/get_started.rst installation/index.rst +intro/install_system.rst installation/index.rst +intro/install_conda.rst installation/index.rst +intro/installation.rst installation/index.rst +intro/run_docker.rst installation/docker.rst +intro/tutorial.md tutorials/index.rst +intro/about.rst intro/index.rst restapi/index.rst reference/rest_api.rst verdi/verdi_user_guide.rst topics/cli.rst working_with_aiida/index.rst howto/index.rst From c740b99f2bfe366a733f140164a21048cd51198e Mon Sep 17 00:00:00 2001 From: Daniel Hollas Date: Fri, 5 Jul 2024 15:53:57 +0100 Subject: [PATCH 50/82] Docker: Fix release tag in publish workflow (#6520) The `AIIDA_VERSION` variable was no longer present in the env. It is now retrieved separately from the tag version. --- .github/workflows/docker-publish.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml index cd290a5a7c..53969ae2d8 100644 --- a/.github/workflows/docker-publish.yml +++ b/.github/workflows/docker-publish.yml @@ -59,6 +59,12 @@ jobs: vars=$(cat build.json | jq -c '[.variable | to_entries[] | {"key": .key, "value": .value.default}] | from_entries') echo "vars=$vars" | tee -a "${GITHUB_OUTPUT}" + - id: get-version + if: ${{ github.ref_type == 'tag' && startsWith(github.ref_name, 'v') }} + run: | + tag="${{ github.ref_name }}" + echo "AIIDA_VERSION=${tag#v}" >> $GITHUB_OUTPUT + - name: Docker meta id: meta uses: docker/metadata-action@v5 @@ -69,7 +75,7 @@ jobs: type=ref,event=pr type=ref,event=branch,enable=${{ github.ref_name != 'main' }} type=edge,enable={{is_default_branch}} - type=raw,value=aiida-${{ env.AIIDA_VERSION }},enable=${{ github.ref_type == 'tag' && startsWith(github.ref_name, 'v') }} + type=raw,value=aiida-${{ steps.get-version.outputs.AIIDA_VERSION }},enable=${{ github.ref_type == 'tag' && startsWith(github.ref_name, 'v') }} type=raw,value=python-${{ env.PYTHON_VERSION }},enable=${{ github.ref_type == 'tag' && startsWith(github.ref_name, 'v') }} type=raw,value=postgresql-${{ env.PGSQL_VERSION }},enable=${{ github.ref_type == 'tag' && startsWith(github.ref_name, 'v') }} type=match,pattern=v(\d{4}\.\d{4}(-.+)?),group=1 From a5da4eda131f844c3639bdb01a256b9e9a7873a2 Mon Sep 17 00:00:00 2001 From: Daniel Hollas Date: Wed, 10 Jul 2024 11:25:36 +0200 Subject: [PATCH 51/82] Devops: Mark `test_leak_ssh_calcjob` as nightly (#6521) It is a very slow test and is unlikely to be affected by typical changes to the codebase. --- tests/engine/test_memory_leaks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/engine/test_memory_leaks.py b/tests/engine/test_memory_leaks.py index 9cfbe7653a..4eece939f3 100644 --- a/tests/engine/test_memory_leaks.py +++ b/tests/engine/test_memory_leaks.py @@ -65,6 +65,7 @@ def test_leak_local_calcjob(aiida_code_installed): @pytest.mark.skipif(sys.version_info >= (3, 12), reason='Garbage collecting hangs on Python 3.12') +@pytest.mark.nightly @pytest.mark.usefixtures('aiida_profile', 'check_memory_leaks') def test_leak_ssh_calcjob(aiida_computer_ssh): """Test whether running a CalcJob over SSH leaks memory. From 9355a9878134b7c8e3e75bb029c251f0bf0a7357 Mon Sep 17 00:00:00 2001 From: Julian Geiger Date: Wed, 10 Jul 2024 13:53:47 +0200 Subject: [PATCH 52/82] CLI: Add default for `output_file` in computer and code export commands (#6486) --- src/aiida/cmdline/commands/cmd_code.py | 33 ++-- src/aiida/cmdline/commands/cmd_computer.py | 52 ++++--- src/aiida/cmdline/params/options/__init__.py | 1 + src/aiida/cmdline/params/options/main.py | 10 ++ src/aiida/cmdline/utils/common.py | 22 +++ tests/cmdline/commands/test_code.py | 74 +++++++-- .../test_code/test_code_export_True_.yml | 8 - ...e_.yml => test_code_export___no_sort_.yml} | 0 ...xport.yml => test_code_export___sort_.yml} | 0 tests/cmdline/commands/test_computer.py | 147 +++++++++++++++--- .../test_computer_export_setup___no_sort_.yml | 13 ++ .../test_computer_export_setup___sort_.yml | 13 ++ tests/cmdline/utils/test_common.py | 39 +++++ tests/tools/dumping/test_processes.py | 5 +- 14 files changed, 336 insertions(+), 81 deletions(-) delete mode 100644 tests/cmdline/commands/test_code/test_code_export_True_.yml rename tests/cmdline/commands/test_code/{test_code_export_False_.yml => test_code_export___no_sort_.yml} (100%) rename tests/cmdline/commands/test_code/{test_code_export.yml => test_code_export___sort_.yml} (100%) create mode 100644 tests/cmdline/commands/test_computer/test_computer_export_setup___no_sort_.yml create mode 100644 tests/cmdline/commands/test_computer/test_computer_export_setup___sort_.yml diff --git a/src/aiida/cmdline/commands/cmd_code.py b/src/aiida/cmdline/commands/cmd_code.py index 477d2f61ab..9740ed8e02 100644 --- a/src/aiida/cmdline/commands/cmd_code.py +++ b/src/aiida/cmdline/commands/cmd_code.py @@ -8,6 +8,7 @@ ########################################################################### """`verdi code` command.""" +import pathlib from collections import defaultdict from functools import partial @@ -18,6 +19,7 @@ from aiida.cmdline.params import arguments, options, types from aiida.cmdline.params.options.commands import code as options_code from aiida.cmdline.utils import echo, echo_tabulate +from aiida.cmdline.utils.common import generate_validate_output_file from aiida.cmdline.utils.decorators import with_dbenv from aiida.common import exceptions @@ -234,34 +236,35 @@ def show(code): @verdi_code.command() @arguments.CODE() -@arguments.OUTPUT_FILE(type=click.Path(exists=False)) -@click.option( - '--sort/--no-sort', - is_flag=True, - default=True, - help='Sort the keys of the output YAML.', - show_default=True, -) +@arguments.OUTPUT_FILE(type=click.Path(exists=False, path_type=pathlib.Path), required=False) +@options.OVERWRITE() +@options.SORT() @with_dbenv() -def export(code, output_file, sort): +def export(code, output_file, overwrite, sort): """Export code to a yaml file.""" + import yaml code_data = {} for key in code.Model.model_fields.keys(): - if key == 'computer': - value = getattr(code, key).label - else: - value = getattr(code, key) + value = getattr(code, key).label if key == 'computer' else getattr(code, key) # If the attribute is not set, for example ``with_mpi`` do not export it, because the YAML won't be valid for # use in ``verdi code create`` since ``None`` is not a valid value on the CLI. if value is not None: code_data[key] = str(value) - with open(output_file, 'w', encoding='utf-8') as yfhandle: - yaml.dump(code_data, yfhandle, sort_keys=sort) + try: + output_file = generate_validate_output_file( + output_file=output_file, entity_label=code.label, overwrite=overwrite, appendix=f'@{code_data["computer"]}' + ) + except (FileExistsError, IsADirectoryError) as exception: + raise click.BadParameter(str(exception), param_hint='OUTPUT_FILE') from exception + + output_file.write_text(yaml.dump(code_data, sort_keys=sort)) + + echo.echo_success(f'Code<{code.pk}> {code.label} exported to file `{output_file}`.') @verdi_code.command() diff --git a/src/aiida/cmdline/commands/cmd_computer.py b/src/aiida/cmdline/commands/cmd_computer.py index 7f8508b77a..acb9c2da81 100644 --- a/src/aiida/cmdline/commands/cmd_computer.py +++ b/src/aiida/cmdline/commands/cmd_computer.py @@ -20,6 +20,7 @@ from aiida.cmdline.params import arguments, options from aiida.cmdline.params.options.commands import computer as options_computer from aiida.cmdline.utils import echo, echo_tabulate +from aiida.cmdline.utils.common import generate_validate_output_file from aiida.cmdline.utils.decorators import with_dbenv from aiida.common.exceptions import EntryPointError, ValidationError from aiida.plugins.entry_point import get_entry_point_names @@ -741,16 +742,11 @@ def computer_export(): @computer_export.command('setup') @arguments.COMPUTER() -@arguments.OUTPUT_FILE(type=click.Path(exists=False, path_type=pathlib.Path)) -@click.option( - '--sort/--no-sort', - is_flag=True, - default=True, - help='Sort the keys of the output YAML.', - show_default=True, -) +@arguments.OUTPUT_FILE(type=click.Path(exists=False, path_type=pathlib.Path), required=False) +@options.OVERWRITE() +@options.SORT() @with_dbenv() -def computer_export_setup(computer, output_file, sort): +def computer_export_setup(computer, output_file, overwrite, sort): """Export computer setup to a YAML file.""" import yaml @@ -769,6 +765,14 @@ def computer_export_setup(computer, output_file, sort): 'prepend_text': computer.get_prepend_text(), 'append_text': computer.get_append_text(), } + + try: + output_file = generate_validate_output_file( + output_file=output_file, entity_label=computer.label, overwrite=overwrite, appendix='-setup' + ) + except (FileExistsError, IsADirectoryError) as exception: + raise click.BadParameter(str(exception), param_hint='OUTPUT_FILE') from exception + try: output_file.write_text(yaml.dump(computer_setup, sort_keys=sort), 'utf-8') except Exception as e: @@ -783,19 +787,14 @@ def computer_export_setup(computer, output_file, sort): @computer_export.command('config') @arguments.COMPUTER() -@arguments.OUTPUT_FILE(type=click.Path(exists=False, path_type=pathlib.Path)) +@arguments.OUTPUT_FILE(type=click.Path(exists=False, path_type=pathlib.Path), required=False) @options.USER( help='Email address of the AiiDA user from whom to export this computer (if different from default user).' ) -@click.option( - '--sort/--no-sort', - is_flag=True, - default=True, - help='Sort the keys of the output YAML.', - show_default=True, -) +@options.OVERWRITE() +@options.SORT() @with_dbenv() -def computer_export_config(computer, output_file, user, sort): +def computer_export_config(computer, output_file, user, overwrite, sort): """Export computer transport configuration for a user to a YAML file.""" import yaml @@ -804,20 +803,29 @@ def computer_export_config(computer, output_file, user, sort): f'Computer<{computer.pk}> {computer.label} configuration cannot be exported,' ' because computer has not been configured yet.' ) + else: + try: + output_file = generate_validate_output_file( + output_file=output_file, entity_label=computer.label, overwrite=overwrite, appendix='-config' + ) + except (FileExistsError, IsADirectoryError) as exception: + raise click.BadParameter(str(exception), param_hint='OUTPUT_FILE') from exception + try: computer_configuration = computer.get_configuration(user) output_file.write_text(yaml.dump(computer_configuration, sort_keys=sort), 'utf-8') - except Exception as e: + + except Exception as exception: error_traceback = traceback.format_exc() echo.CMDLINE_LOGGER.debug(error_traceback) if user is None: echo.echo_critical( - f'Unexpected error while exporting configuration for Computer<{computer.pk}> {computer.label}: {e!s}.' + f'Unexpected error while exporting configuration for Computer<{computer.pk}> {computer.label}: {exception!s}.' # noqa: E501 ) else: echo.echo_critical( f'Unexpected error while exporting configuration for Computer<{computer.pk}> {computer.label}' - f' and User<{user.pk}> {user.email}: {e!s}.' + f' and User<{user.pk}> {user.email}: {exception!s}.' ) else: - echo.echo_success(f"Computer<{computer.pk}> {computer.label} configuration exported to file '{output_file}'.") + echo.echo_success(f'Computer<{computer.pk}> {computer.label} configuration exported to file `{output_file}`.') diff --git a/src/aiida/cmdline/params/options/__init__.py b/src/aiida/cmdline/params/options/__init__.py index 065efe4223..ea4be61461 100644 --- a/src/aiida/cmdline/params/options/__init__.py +++ b/src/aiida/cmdline/params/options/__init__.py @@ -92,6 +92,7 @@ 'REPOSITORY_PATH', 'SCHEDULER', 'SILENT', + 'SORT', 'TIMEOUT', 'TRAJECTORY_INDEX', 'TRANSPORT', diff --git a/src/aiida/cmdline/params/options/main.py b/src/aiida/cmdline/params/options/main.py index f5eb2d551f..d521828450 100644 --- a/src/aiida/cmdline/params/options/main.py +++ b/src/aiida/cmdline/params/options/main.py @@ -96,6 +96,7 @@ 'REPOSITORY_PATH', 'SCHEDULER', 'SILENT', + 'SORT', 'TIMEOUT', 'TRAJECTORY_INDEX', 'TRANSPORT', @@ -771,3 +772,12 @@ def set_log_level(ctx, _param, value): show_default=True, help='Overwrite file/directory if writing to disk.', ) + +SORT = OverridableOption( + '--sort/--no-sort', + 'sort', + is_flag=True, + default=True, + help='Sort the keys of the output YAML.', + show_default=True, +) diff --git a/src/aiida/cmdline/utils/common.py b/src/aiida/cmdline/utils/common.py index 53420fd33b..d410b33d91 100644 --- a/src/aiida/cmdline/utils/common.py +++ b/src/aiida/cmdline/utils/common.py @@ -8,10 +8,13 @@ ########################################################################### """Common utility functions for command line commands.""" +from __future__ import annotations + import logging import os import sys import textwrap +from pathlib import Path from typing import TYPE_CHECKING from click import style @@ -481,3 +484,22 @@ def build_entries(ports): echo.echo(tabulate(table, tablefmt='plain')) echo.echo(style('\nExit codes that invalidate the cache are marked in bold red.\n', italic=True)) + + +def generate_validate_output_file( + output_file: Path | None, entity_label: str, appendix: str = '', overwrite: bool = False +): + """Generate default output filename for `Code`/`Computer` export and validate.""" + + if output_file is None: + output_file = Path(f'{entity_label}{appendix}.yml') + + if output_file.is_dir(): + raise IsADirectoryError( + f'A directory with the name `{output_file.resolve()}` already exists. Remove manually and try again.' + ) + + if output_file.is_file() and not overwrite: + raise FileExistsError(f'File `{output_file}` already exists, use `--overwrite` to overwrite.') + + return output_file diff --git a/tests/cmdline/commands/test_code.py b/tests/cmdline/commands/test_code.py index 8aeeb5cec8..b7d1c5cf5f 100644 --- a/tests/cmdline/commands/test_code.py +++ b/tests/cmdline/commands/test_code.py @@ -259,8 +259,8 @@ def test_code_duplicate_ignore(run_cli_command, aiida_code_installed, non_intera @pytest.mark.usefixtures('aiida_profile_clean') -@pytest.mark.parametrize('sort', (True, False)) -def test_code_export(run_cli_command, aiida_code_installed, tmp_path, file_regression, sort): +@pytest.mark.parametrize('sort_option', ('--sort', '--no-sort')) +def test_code_export(run_cli_command, aiida_code_installed, tmp_path, file_regression, sort_option): """Test export the code setup to str.""" prepend_text = 'module load something\n some command' code = aiida_code_installed( @@ -271,14 +271,11 @@ def test_code_export(run_cli_command, aiida_code_installed, tmp_path, file_regre ) filepath = tmp_path / 'code.yml' - options = [str(code.pk), str(filepath)] - options.append('--sort' if sort else '--no-sort') - - run_cli_command(cmd_code.export, options) - + options = [str(code.pk), str(filepath), sort_option] + result = run_cli_command(cmd_code.export, options) + assert str(filepath) in result.output, 'Filename should be in terminal output but was not found.' # file regression check - with open(filepath, 'r', encoding='utf-8') as fhandle: - content = fhandle.read() + content = filepath.read_text() file_regression.check(content, extension='.yml') # round trip test by create code from the config file @@ -292,6 +289,65 @@ def test_code_export(run_cli_command, aiida_code_installed, tmp_path, file_regre assert isinstance(new_code, InstalledCode) +@pytest.mark.usefixtures('aiida_profile_clean') +def test_code_export_overwrite(run_cli_command, aiida_code_installed, tmp_path): + prepend_text = 'module load something\n some command' + code = aiida_code_installed( + default_calc_job_plugin='core.arithmetic.add', + filepath_executable='/bin/cat', + label='code', + prepend_text=prepend_text, + ) + filepath = tmp_path / 'code.yml' + + options = [str(code.pk), str(filepath)] + + # Create directory with the same name and check that command fails + filepath.mkdir() + result = run_cli_command(cmd_code.export, options, raises=True) + assert f'A directory with the name `{filepath}` already exists' in result.output + filepath.rmdir() + + # Export fails if file already exists and overwrite set to False + filepath.touch() + result = run_cli_command(cmd_code.export, options, raises=True) + assert f'File `{filepath}` already exists' in result.output + + # Check that overwrite actually overwrites the exported Code config with the new data + code_echo = aiida_code_installed( + default_calc_job_plugin='core.arithmetic.add', + filepath_executable='/bin/echo', + # Need to set different label, therefore manually specify the same output filename + label='code_echo', + prepend_text=prepend_text, + ) + + options = [str(code_echo.pk), str(filepath), '--overwrite'] + run_cli_command(cmd_code.export, options) + + content = filepath.read_text() + assert '/bin/echo' in content + + +@pytest.mark.usefixtures('aiida_profile_clean') +@pytest.mark.usefixtures('chdir_tmp_path') +def test_code_export_default_filename(run_cli_command, aiida_code_installed): + """Test default filename being created if no argument passed.""" + + prepend_text = 'module load something\n some command' + code = aiida_code_installed( + default_calc_job_plugin='core.arithmetic.add', + filepath_executable='/bin/cat', + label='code', + prepend_text=prepend_text, + ) + + options = [str(code.pk)] + run_cli_command(cmd_code.export, options) + + assert pathlib.Path('code@localhost.yml').is_file() + + @pytest.mark.parametrize('non_interactive_editor', ('vim -cwq',), indirect=True) def test_from_config_local_file(non_interactive_editor, run_cli_command, aiida_localhost): """Test setting up a code from a config file on disk.""" diff --git a/tests/cmdline/commands/test_code/test_code_export_True_.yml b/tests/cmdline/commands/test_code/test_code_export_True_.yml deleted file mode 100644 index 640717a1d2..0000000000 --- a/tests/cmdline/commands/test_code/test_code_export_True_.yml +++ /dev/null @@ -1,8 +0,0 @@ -append_text: '' -computer: localhost -default_calc_job_plugin: core.arithmetic.add -description: '' -filepath_executable: /bin/cat -label: code -prepend_text: "module load something\n some command" -use_double_quotes: 'False' diff --git a/tests/cmdline/commands/test_code/test_code_export_False_.yml b/tests/cmdline/commands/test_code/test_code_export___no_sort_.yml similarity index 100% rename from tests/cmdline/commands/test_code/test_code_export_False_.yml rename to tests/cmdline/commands/test_code/test_code_export___no_sort_.yml diff --git a/tests/cmdline/commands/test_code/test_code_export.yml b/tests/cmdline/commands/test_code/test_code_export___sort_.yml similarity index 100% rename from tests/cmdline/commands/test_code/test_code_export.yml rename to tests/cmdline/commands/test_code/test_code_export___sort_.yml diff --git a/tests/cmdline/commands/test_computer.py b/tests/cmdline/commands/test_computer.py index 128a3bd61f..dac1170770 100644 --- a/tests/cmdline/commands/test_computer.py +++ b/tests/cmdline/commands/test_computer.py @@ -9,6 +9,7 @@ """Tests for the 'verdi computer' command.""" import os +import pathlib import tempfile import textwrap from collections import OrderedDict @@ -515,69 +516,167 @@ def test_show(self): assert '--username=' in result.output assert result_cur.output == result.output - @pytest.mark.parametrize('sort', ['--sort', '--no-sort']) - def test_computer_export_setup(self, tmp_path, sort): - """Test if 'verdi computer export setup' command works""" - self.comp_builder.label = 'test_computer_export_setup' + sort + @pytest.mark.parametrize('sort_option', ('--sort', '--no-sort')) + def test_computer_export_setup(self, tmp_path, file_regression, sort_option): + """Test if `verdi computer export setup` command works""" + self.comp_builder.label = f'test_computer_export_setup{sort_option}' + # Label needs to be unique during parametrization self.comp_builder.transport = 'core.ssh' comp = self.comp_builder.new() comp.store() exported_setup_filename = tmp_path / 'computer-setup.yml' - result = self.cli_runner(computer_export_setup, [sort, comp.label, exported_setup_filename]) - assert result.exit_code == 0, 'Command should have run successfull.' + + # Successfull write behavior + result = self.cli_runner(computer_export_setup, [comp.label, exported_setup_filename, sort_option]) assert str(exported_setup_filename) in result.output, 'Filename should be in terminal output but was not found.' assert exported_setup_filename.exists(), f"'{exported_setup_filename}' was not created during export." + + # file regresssion check + content = exported_setup_filename.read_text() + file_regression.check(content, extension='.yml') + # verifying correctness by comparing internal and loaded yml object configure_setup_data = yaml.safe_load(exported_setup_filename.read_text()) assert configure_setup_data == self.comp_builder.get_computer_spec( comp ), 'Internal computer configuration does not agree with exported one.' + def test_computer_export_setup_overwrite(self, tmp_path): + """Test if overwriting behavior of `verdi computer export setup` command works as expected""" + + self.comp_builder.label = 'test_computer_export_setup' + self.comp_builder.transport = 'core.ssh' + comp = self.comp_builder.new() + comp.store() + + exported_setup_filename = tmp_path / 'computer-setup.yml' + # Check that export fails if the file already exists + exported_setup_filename.touch() + result = self.cli_runner(computer_export_setup, [comp.label, exported_setup_filename], raises=True) + # assert 'already exists, use `--overwrite`' in result.output + + # Create new instance and check that change is reflected in new YAML file output + self.comp_builder.label = 'test_computer_export_setup_local' + self.comp_builder.transport = 'core.local' + comp_local = self.comp_builder.new() + comp_local.store() + result = self.cli_runner(computer_export_setup, [comp_local.label, exported_setup_filename, '--overwrite']) + content = exported_setup_filename.read_text() + assert 'core.local' in content + # we create a directory so we raise an error when exporting with the same name - # to test the except part of the function - already_existing_filename = tmp_path / 'tmp_dir' - already_existing_filename.mkdir() - result = self.cli_runner(computer_export_setup, [sort, comp.label, already_existing_filename], raises=True) - assert result.exit_code == ExitCode.CRITICAL + already_existing_directory = tmp_path / 'tmp_dir' + already_existing_directory.mkdir() + result = self.cli_runner(computer_export_setup, [comp.label, already_existing_directory], raises=True) + assert f'A directory with the name `{already_existing_directory}` already exists.' in result.output + + @pytest.mark.usefixtures('chdir_tmp_path') + def test_computer_export_setup_default_filename(self): + """Test that default filename is as expected when not specified for `verdi computer export setup`.""" + comp_label = 'test_computer_export_setup_default' + self.comp_builder.label = comp_label + # Label needs to be unique during parametrization + self.comp_builder.transport = 'core.ssh' + comp = self.comp_builder.new() + comp.store() - @pytest.mark.parametrize('sort', ['--sort', '--no-sort']) - def test_computer_export_config(self, tmp_path, sort): + exported_setup_filename = f'{comp_label}-setup.yml' + + self.cli_runner(computer_export_setup, [comp.label]) + assert pathlib.Path(exported_setup_filename).is_file() + + def test_computer_export_config(self, tmp_path): """Test if 'verdi computer export config' command works""" - self.comp_builder.label = 'test_computer_export_config' + sort + self.comp_builder.label = 'test_computer_export_config' self.comp_builder.transport = 'core.ssh' comp = self.comp_builder.new() comp.store() exported_config_filename = tmp_path / 'computer-configure.yml' + # We have not configured the computer yet so it should exit with an critical error result = self.cli_runner(computer_export_config, [comp.label, exported_config_filename], raises=True) assert result.exit_code == ExitCode.CRITICAL comp.configure(safe_interval=0.0) + comp.configure(username='aiida') + + # Write sorted output file result = self.cli_runner(computer_export_config, [comp.label, exported_config_filename]) assert 'Success' in result.output, 'Command should have run successfull.' assert ( str(exported_config_filename) in result.output ), 'Filename should be in terminal output but was not found.' assert exported_config_filename.exists(), f"'{exported_config_filename}' was not created during export." + + content = exported_config_filename.read_text() + assert content.startswith('safe_interval: 0.0') + # verifying correctness by comparing internal and loaded yml object configure_config_data = yaml.safe_load(exported_config_filename.read_text()) assert ( configure_config_data == comp.get_configuration() ), 'Internal computer configuration does not agree with exported one.' - # we create a directory so we raise an error when exporting with the same name - # to test the except part of the function - already_existing_filename = tmp_path / 'tmp_dir' - already_existing_filename.mkdir() - result = self.cli_runner(computer_export_config, [comp.label, already_existing_filename], raises=True) - assert result.exit_code == ExitCode.CRITICAL + # Check that unsorted output file creation works as expected + exported_config_filename.unlink() + result = self.cli_runner(computer_export_config, [comp.label, exported_config_filename, '--no-sort']) + assert 'Success' in result.output, 'Command should have run successfull.' + assert ( + str(exported_config_filename) in result.output + ), 'Filename should be in terminal output but was not found.' + assert exported_config_filename.exists(), f"'{exported_config_filename}' was not created during export." - result = self.cli_runner( - computer_export_config, ['--user', self.user.email, comp.label, already_existing_filename], raises=True - ) - assert result.exit_code == ExitCode.CRITICAL + # Check contents + content = exported_config_filename.read_text() + assert 'username: aiida' in content, 'username not in output YAML' + assert 'safe_interval: 0.0' in content, 'safe_interval not in output YAML' + + def test_computer_export_config_overwrite(self, tmp_path): + """Test if overwrite behavior of `verdi computer export config` command works""" + self.comp_builder.label = 'test_computer_export_config_overwrite' + self.comp_builder.transport = 'core.ssh' + comp = self.comp_builder.new() + comp.store() + comp.configure(safe_interval=0.0) + + exported_config_filename = tmp_path / 'computer-configure.yml' + + # Create directory with the same name and check that command fails + exported_config_filename.mkdir() + result = self.cli_runner(computer_export_config, [comp.label, exported_config_filename], raises=True) + assert f'A directory with the name `{exported_config_filename}` already exists' in result.output + exported_config_filename.rmdir() + + # Check that export fails if the file already exists + exported_config_filename.touch() + result = self.cli_runner(computer_export_config, [comp.label, exported_config_filename], raises=True) + assert 'already exists, use `--overwrite`' in result.output + + # Create new instance and check that change is reflected in overwritten YAML output file + self.comp_builder.label = 'test_computer_export_config_0' + comp_mod = self.comp_builder.new() + comp_mod.store() + comp_mod.configure(safe_interval=1.0) + self.cli_runner(computer_export_config, [comp_mod.label, exported_config_filename, '--overwrite']) + content = exported_config_filename.read_text() + assert 'safe_interval: 1.0' in content + + @pytest.mark.usefixtures('chdir_tmp_path') + def test_computer_export_config_default_filename(self): + """Test that default filename is as expected when not specified for `verdi computer export config`.""" + comp_label = 'test_computer_export_config_default' + self.comp_builder.label = comp_label + self.comp_builder.transport = 'core.ssh' + comp = self.comp_builder.new() + comp.store() + comp.configure(safe_interval=0.0) + + exported_config_filename = f'{comp_label}-config.yml' + + self.cli_runner(computer_export_config, [comp.label]) + assert pathlib.Path(exported_config_filename).is_file() class TestVerdiComputerCommands: diff --git a/tests/cmdline/commands/test_computer/test_computer_export_setup___no_sort_.yml b/tests/cmdline/commands/test_computer/test_computer_export_setup___no_sort_.yml new file mode 100644 index 0000000000..7fc3ce33fd --- /dev/null +++ b/tests/cmdline/commands/test_computer/test_computer_export_setup___no_sort_.yml @@ -0,0 +1,13 @@ +label: test_computer_export_setup--no-sort +hostname: localhost +description: Test Computer +transport: core.ssh +scheduler: core.direct +shebang: '#!xonsh' +work_dir: /tmp/aiida +mpirun_command: mpirun +mpiprocs_per_machine: 8 +default_memory_per_machine: 100000 +use_double_quotes: false +prepend_text: '' +append_text: '' diff --git a/tests/cmdline/commands/test_computer/test_computer_export_setup___sort_.yml b/tests/cmdline/commands/test_computer/test_computer_export_setup___sort_.yml new file mode 100644 index 0000000000..a1c7f6d9cc --- /dev/null +++ b/tests/cmdline/commands/test_computer/test_computer_export_setup___sort_.yml @@ -0,0 +1,13 @@ +append_text: '' +default_memory_per_machine: 100000 +description: Test Computer +hostname: localhost +label: test_computer_export_setup--sort +mpiprocs_per_machine: 8 +mpirun_command: mpirun +prepend_text: '' +scheduler: core.direct +shebang: '#!xonsh' +transport: core.ssh +use_double_quotes: false +work_dir: /tmp/aiida diff --git a/tests/cmdline/utils/test_common.py b/tests/cmdline/utils/test_common.py index 863f17d7a4..69a01090df 100644 --- a/tests/cmdline/utils/test_common.py +++ b/tests/cmdline/utils/test_common.py @@ -8,7 +8,11 @@ ########################################################################### """Tests for the :mod:`aiida.cmdline.utils.common` module.""" +from pathlib import Path + +import pytest from aiida.cmdline.utils import common +from aiida.cmdline.utils.common import generate_validate_output_file from aiida.common import LinkType from aiida.engine import Process, calcfunction from aiida.orm import CalcFunctionNode, CalculationNode, WorkflowNode @@ -88,3 +92,38 @@ def test_with_docstring(): common.print_process_info(TestProcessWithDocstring) common.print_process_info(test_without_docstring) common.print_process_info(test_with_docstring) + + +@pytest.mark.usefixtures('chdir_tmp_path') +def test_generate_validate_output(): + test_entity_label = 'test_code' + test_appendix = '@test_computer' + + expected_output_file = Path(f'{test_entity_label}{test_appendix}.yml') + + # Test default label creation + obtained_output_file = generate_validate_output_file( + output_file=None, entity_label=test_entity_label, appendix=test_appendix + ) + assert expected_output_file == obtained_output_file, 'Filenames differ' + + # Test failure if file exists, but overwrite False + expected_output_file.touch() + with pytest.raises(FileExistsError, match='.*use `--overwrite` to overwrite.'): + generate_validate_output_file( + output_file=None, entity_label=test_entity_label, appendix=test_appendix, overwrite=False + ) + + # Test that overwrite does the job + obtained_output_file = generate_validate_output_file( + output_file=None, entity_label=test_entity_label, appendix=test_appendix, overwrite=True + ) + assert expected_output_file == obtained_output_file, 'Overwrite unsuccessful' + expected_output_file.unlink() + + # Test failure if directory exists + expected_output_file.mkdir() + with pytest.raises(IsADirectoryError, match='A directory with the name.*'): + generate_validate_output_file( + output_file=None, entity_label=test_entity_label, appendix=test_appendix, overwrite=False + ) diff --git a/tests/tools/dumping/test_processes.py b/tests/tools/dumping/test_processes.py index aab1a48abb..82e704f4e2 100644 --- a/tests/tools/dumping/test_processes.py +++ b/tests/tools/dumping/test_processes.py @@ -302,9 +302,8 @@ def test_dump_calculation_add(tmp_path, generate_calculation_node_add): # Tests for helper methods -def test_validate_make_dump_path(chdir_tmp_path, tmp_path): - chdir_tmp_path - +@pytest.mark.usefixtures('chdir_tmp_path') +def test_validate_make_dump_path(tmp_path): safeguard_file = node_metadata_file # Path must be provided From 120c8ac6dcd15cec1ff3260ab65276c60027dd5f Mon Sep 17 00:00:00 2001 From: Ali Khosravi Date: Wed, 10 Jul 2024 14:55:51 +0200 Subject: [PATCH 53/82] CLI: Catch `NotImplementedError` in `verdi calcjob gotocomputer` (#6525) Not all transport plugins implement the `gotocomputer` method. Instead of excepting, the remote working directory is now displayed. --- src/aiida/cmdline/commands/cmd_calcjob.py | 10 ++++-- tests/cmdline/commands/test_calcjob.py | 41 +++++++++++++++++++++++ 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/src/aiida/cmdline/commands/cmd_calcjob.py b/src/aiida/cmdline/commands/cmd_calcjob.py index 9b34f50100..376301247e 100644 --- a/src/aiida/cmdline/commands/cmd_calcjob.py +++ b/src/aiida/cmdline/commands/cmd_calcjob.py @@ -43,9 +43,13 @@ def calcjob_gotocomputer(calcjob): if not remote_workdir: echo.echo_critical('no remote work directory for this calcjob, maybe the daemon did not submit it yet') - command = transport.gotocomputer_command(remote_workdir) - echo.echo_report('going to the remote work directory...') - os.system(command) + try: + command = transport.gotocomputer_command(remote_workdir) + echo.echo_report('going to the remote work directory...') + os.system(command) + except NotImplementedError: + echo.echo_report(f'gotocomputer is not implemented for {transport}') + echo.echo_report(f'remote work directory is {remote_workdir}') @verdi_calcjob.command('res') diff --git a/tests/cmdline/commands/test_calcjob.py b/tests/cmdline/commands/test_calcjob.py index 9fa6467d7f..a9ca84a991 100644 --- a/tests/cmdline/commands/test_calcjob.py +++ b/tests/cmdline/commands/test_calcjob.py @@ -356,3 +356,44 @@ def test_calcjob_remotecat(self): options = [str(self.result_job.uuid), 'fileA.txt'] result = self.cli_runner.invoke(command.calcjob_remotecat, options) assert result.stdout == 'test stringA' + + def test_calcjob_gotocomputer(self): + """Test verdi calcjob gotocomputer""" + + from unittest.mock import patch + + from aiida.common.exceptions import NotExistent + + options = [str(self.result_job.uuid)] + + # Easy peasy no exception + with patch('os.system') as mock_os_system: + result = self.cli_runner.invoke(command.calcjob_gotocomputer, options) + mock_os_system.assert_called_once() + assert mock_os_system.call_args[0][0] is not None + + def raise_(e): + raise e('something') + + # Test when get_transport raises NotExistent + with patch( + 'aiida.orm.nodes.process.calculation.calcjob.CalcJobNode.get_transport', new=lambda _: raise_(NotExistent) + ): + result = self.cli_runner.invoke(command.calcjob_gotocomputer, options) + assert result.exit_code == 1 + assert 'something' in result.output + + # Test when get_remote_workdir returns None + with patch('aiida.orm.nodes.process.calculation.calcjob.CalcJobNode.get_remote_workdir', new=lambda _: None): + result = self.cli_runner.invoke(command.calcjob_gotocomputer, options) + assert result.exit_code == 1 + assert 'no remote work directory for this calcjob' in result.output + + # Test when gotocomputer_command raises NotImplementedError + with patch( + 'aiida.transports.plugins.local.LocalTransport.gotocomputer_command', + new=lambda _, __: raise_(NotImplementedError), + ): + result = self.cli_runner.invoke(command.calcjob_gotocomputer, options) + assert result.exit_code == 0 + assert self.result_job.get_remote_workdir() in result.output From 9579378ba063237baa5b73380eb8e9f0a28529ee Mon Sep 17 00:00:00 2001 From: Jusong Yu Date: Thu, 11 Jul 2024 15:22:44 +0200 Subject: [PATCH 54/82] Docker: Replace sleep with `s6-notifyoncheck` (#6475) For the `aiida-core-with-services` image where the services are part of the image, we cannot rely on the health check for the services provided by docker-build as is used for the `aiida-core-base` case. Instead, a simple sleep was added to the `aiida-prepare.sh` script that sets up the profile, to make sure the services are up before the profile is created. This solution is neither elegant nor robust. Here the sleep approach is replaced by `s6-notifyoncheck`. This hook allows blocking the startup from continuing until a script returns a 0 exit code. The script in question first calls `rabbitmq-diagnostics ping` to make sure the RabbitMQ server is even up, followed by a call to `rabbitmq-diagnostics check_running`. If the latter returns 0, it means RabbitMQ is up and running and the script returns 0 as well, which will trigger `s6-notifyoncheck` to continue with the rest of the startup. Note that `rabbitmq-diagnostics is_running` could not be used as that command sometimes returns 0 even if the service is not ready at all. Co-authored-by: Sebastiaan Huber --- .../s6-assets/init/aiida-prepare.sh | 5 ----- .../s6-assets/s6-rc.d/rabbitmq/data/check | 15 +++++++++++++++ .../s6-assets/s6-rc.d/rabbitmq/notification-fd | 1 + .../s6-assets/s6-rc.d/rabbitmq/run | 13 +++++++++++-- 4 files changed, 27 insertions(+), 7 deletions(-) create mode 100755 .docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/data/check create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/notification-fd diff --git a/.docker/aiida-core-base/s6-assets/init/aiida-prepare.sh b/.docker/aiida-core-base/s6-assets/init/aiida-prepare.sh index 30eefc3999..a9e54142fa 100755 --- a/.docker/aiida-core-base/s6-assets/init/aiida-prepare.sh +++ b/.docker/aiida-core-base/s6-assets/init/aiida-prepare.sh @@ -18,11 +18,6 @@ verdi config set warnings.development_version False # If the environment variable `SETUP_DEFAULT_AIIDA_PROFILE` is not set, set it to `true`. if [[ ${SETUP_DEFAULT_AIIDA_PROFILE:-true} == true ]] && ! verdi profile show ${AIIDA_PROFILE_NAME} &> /dev/null; then - # For the container that includes the services, this script is called as soon as the RabbitMQ startup script has - # been launched, but it can take a while for the service to come up. If ``verdi presto`` is called straight away - # it is possible it tries to connect to the service before that and it will configure the profile without a broker. - sleep 5 - # Create AiiDA profile. verdi presto \ --verbosity info \ diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/data/check b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/data/check new file mode 100755 index 0000000000..46eb70ea89 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/data/check @@ -0,0 +1,15 @@ +#!/bin/bash + +rabbitmq-diagnostics ping + +if [ $? -ne 0 ]; then + exit 1 +fi + +rabbitmq-diagnostics check_running + +if [ $? -ne 0 ]; then + exit 1 +fi + +exit 0 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/notification-fd b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/notification-fd new file mode 100644 index 0000000000..00750edc07 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/notification-fd @@ -0,0 +1 @@ +3 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/run b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/run index e5752294ff..8a35acd20f 100644 --- a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/run +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/run @@ -2,5 +2,14 @@ with-contenv -foreground { s6-echo "Calling /etc/init/rabbitmq.sh" } -rabbitmq-server +foreground { s6-echo "Starting RMQ server and notifying back when the service is ready" } + + +# For the container that includes the services, aiida-prepare.sh script is called as soon as the RabbitMQ startup script has +# been launched, but it can take a while for the RMQ service to come up. If ``verdi presto`` is called straight away +# it is possible it tries to connect to the service before that and it will configure the profile without a broker. +# Here we use s6-notifyoncheck to do the polling healthy check of the readyness of RMQ service. +# +# -w 500: 500 ms between two invocations of ./data/check + +s6-notifyoncheck -w 500 rabbitmq-server From cba6e7c757ec74194afc63809b5dac72bb81a771 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 11 Jul 2024 15:54:18 +0200 Subject: [PATCH 55/82] Dependencies: Update requirement to `psycopg~=3.0` (#6362) The `psycopg` library is used by `sqlalchemy` to connect to the PostgreSQL server. So far, the `psycopg2` package was used, but the next generation v3 has already been out for a while. The release comes with a number of performance improvements according to the author. Although there is no clear timeline for support of v2, we are not waiting and switching now to the new version. When v2 was released, instead of releasing a new major version, it was released as a new library changing the name from `psycopg` to `psycopg2` but for v3 they are switching back to `psycopg`. This means that users would still be able to run v2 and v3 along side one another in a single Python environment. This supports the decision to move to v3. Note that `aiida-core` will not be supporting both at the same time and from now only supports v3. Interestingly, from the very early versions of AiiDA, the profile would contain the `database_engine` key. This is still an option in `verdi setup` and `verdi quicksetup`, except it is not really an option as it is hardcoded to `postgresql_psycopg2`. So all `core.psql_dos` profiles out there contain: 'main': { 'storage': { 'backend': 'core.psql_dos', 'config': { 'database_engine': 'postgresql_psycopg2', ... } }, } The value is not actually used however as the connection string for sqlalchemy's engine, where it _could_ be used, simply hardcodes this to `postgres://` which is the default psycopg dialect and maps to `postgres+psycopg2://`. This value is now simply updated to `postgres+psycopg://` to target the new version of `psycopg`. Because it is hardcoded, a migration for the existing configs is not required and it can be left as a vestigial attribute. Since `verdi setup` and `verdi quicksetup` are deprecated now anyway, the options don't have to be removed here. --- .github/config/profile.yaml | 2 +- docs/source/howto/installation.rst | 2 +- docs/source/installation/docker.rst | 2 +- docs/source/installation/troubleshooting.rst | 2 +- docs/source/nitpick-exceptions | 2 -- docs/source/reference/command_line.rst | 4 ++-- environment.yml | 4 ++-- pyproject.toml | 6 +++--- requirements/requirements-py-3.10.txt | 4 ++-- requirements/requirements-py-3.11.txt | 4 ++-- requirements/requirements-py-3.12.txt | 4 ++-- requirements/requirements-py-3.9.txt | 4 ++-- src/aiida/cmdline/commands/cmd_setup.py | 4 ++-- .../cmdline/params/options/commands/setup.py | 4 ++-- src/aiida/cmdline/params/options/main.py | 4 ++-- src/aiida/manage/external/postgres.py | 17 ++++++++--------- src/aiida/manage/tests/pytest_fixtures.py | 6 +++--- src/aiida/restapi/common/utils.py | 7 +++---- src/aiida/storage/psql_dos/backend.py | 2 +- src/aiida/storage/psql_dos/utils.py | 2 +- src/aiida/tools/pytest_fixtures/storage.py | 6 +++--- tests/conftest.py | 2 +- tests/orm/test_querybuilder/test_as_sql.txt | 2 +- tests/storage/psql_dos/migrations/conftest.py | 15 ++++++++------- 24 files changed, 54 insertions(+), 57 deletions(-) diff --git a/.github/config/profile.yaml b/.github/config/profile.yaml index e031d1a6e2..d0e2c9eebf 100644 --- a/.github/config/profile.yaml +++ b/.github/config/profile.yaml @@ -4,7 +4,7 @@ first_name: Giuseppe last_name: Verdi institution: Khedivial db_backend: core.psql_dos -db_engine: postgresql_psycopg2 +db_engine: postgresql_psycopg db_host: localhost db_port: 5432 db_name: test_aiida diff --git a/docs/source/howto/installation.rst b/docs/source/howto/installation.rst index 50547b8f21..d2c28d5b02 100644 --- a/docs/source/howto/installation.rst +++ b/docs/source/howto/installation.rst @@ -55,7 +55,7 @@ To display these parameters, use ``verdi profile show``: storage: backend: core.psql_dos config: - database_engine: postgresql_psycopg2 + database_engine: postgresql_psycopg database_hostname: localhost database_name: name database_password: abc diff --git a/docs/source/installation/docker.rst b/docs/source/installation/docker.rst index d1a2a2f22e..3be265d431 100644 --- a/docs/source/installation/docker.rst +++ b/docs/source/installation/docker.rst @@ -66,7 +66,7 @@ which should show something like:: ✔ version: AiiDA v2.5.1 ✔ config: /home/aiida/.aiida ✔ profile: default - ✔ storage: Storage for 'default' [open] @ postgresql://aiida:***@localhost:5432 + ✔ storage: Storage for 'default' [open] @ postgresql+psycopg://aiida:***@localhost:5432 ✔ rabbitmq: Connected to RabbitMQ v3.10.18 as amqp://guest:guest@127.0.0.1:5672 ✔ daemon: Daemon is running with PID 324 diff --git a/docs/source/installation/troubleshooting.rst b/docs/source/installation/troubleshooting.rst index e70b82a17b..474aad751f 100644 --- a/docs/source/installation/troubleshooting.rst +++ b/docs/source/installation/troubleshooting.rst @@ -13,7 +13,7 @@ If you experience any problems, first check that all services are up and running ✓ version: AiiDA v2.0.0 ✓ config: /path/to/.aiida ✓ profile: default - ✓ storage: Storage for 'default' @ postgresql://username:***@localhost:5432/db_name / file:///path/to/repository + ✓ storage: Storage for 'default' @ postgresql+psycopg://username:***@localhost:5432/db_name / file:///path/to/repository ✓ rabbitmq: Connected as amqp://127.0.0.1?heartbeat=600 ✓ daemon: Daemon is running as PID 2809 since 2019-03-15 16:27:52 diff --git a/docs/source/nitpick-exceptions b/docs/source/nitpick-exceptions index cdfa6151e3..bf0ba64d66 100644 --- a/docs/source/nitpick-exceptions +++ b/docs/source/nitpick-exceptions @@ -236,8 +236,6 @@ py:class yaml.nodes.MappingNode py:class yaml.nodes.ScalarNode py:class uuid.UUID -py:class psycopg2.extensions.cursor - py:class alembic.config.Config py:class alembic.op py:class alembic.runtime.migration.MigrationContext diff --git a/docs/source/reference/command_line.rst b/docs/source/reference/command_line.rst index c3f3250c9c..3982f75c53 100644 --- a/docs/source/reference/command_line.rst +++ b/docs/source/reference/command_line.rst @@ -427,7 +427,7 @@ Below is a list with all available subcommands. --first-name NONEMPTYSTRING First name of the user. [required] --last-name NONEMPTYSTRING Last name of the user. [required] --institution NONEMPTYSTRING Institution of the user. [required] - --db-engine [postgresql_psycopg2] + --db-engine [postgresql_psycopg] Engine to use to connect to the database. [required] --db-backend [core.psql_dos] Database backend to use. [required] --db-host HOSTNAME Database server host. Leave empty for "peer" @@ -534,7 +534,7 @@ Below is a list with all available subcommands. --first-name NONEMPTYSTRING First name of the user. [required] --last-name NONEMPTYSTRING Last name of the user. [required] --institution NONEMPTYSTRING Institution of the user. [required] - --db-engine [postgresql_psycopg2] + --db-engine [postgresql_psycopg] Engine to use to connect to the database. [required] --db-backend [core.psql_dos] Database backend to use. [required] --db-host HOSTNAME Database server host. Leave empty for "peer" diff --git a/environment.yml b/environment.yml index 99d7748c64..cb86eef22f 100644 --- a/environment.yml +++ b/environment.yml @@ -23,9 +23,9 @@ dependencies: - numpy~=1.21 - paramiko>=2.7.2,~=2.7 - plumpy~=0.22.3 -- pgsu~=0.2.1 +- pgsu~=0.3.0 - psutil~=5.6 -- psycopg2-binary~=2.8 +- psycopg[binary]~=3.0 - pydantic~=2.4 - pytz~=2021.1 - pyyaml~=6.0 diff --git a/pyproject.toml b/pyproject.toml index 5f31cef2a0..36a7af77c3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,9 +35,9 @@ dependencies = [ 'numpy~=1.21', 'paramiko~=2.7,>=2.7.2', 'plumpy~=0.22.3', - 'pgsu~=0.2.1', + 'pgsu~=0.3.0', 'psutil~=5.6', - 'psycopg2-binary~=2.8', + 'psycopg[binary]~=3.0', 'pydantic~=2.4', 'pytz~=2021.1', 'pyyaml~=6.0', @@ -326,7 +326,7 @@ module = [ 'pgtest.*', 'phonopy.*', 'psutil.*', - 'psycopg2.*', + 'psycopg.*', 'pymatgen.*', 'pymysql.*', 'pyparsing.*', diff --git a/requirements/requirements-py-3.10.txt b/requirements/requirements-py-3.10.txt index d6ca92f6c3..2a52146929 100644 --- a/requirements/requirements-py-3.10.txt +++ b/requirements/requirements-py-3.10.txt @@ -113,7 +113,7 @@ paramiko==2.12.0 parso==0.8.3 pexpect==4.8.0 pg8000==1.29.8 -pgsu==0.2.3 +pgsu==0.3.0 pgtest==1.3.2 pickleshare==0.7.5 pillow==9.5.0 @@ -124,7 +124,7 @@ plumpy==0.22.3 prometheus-client==0.17.0 prompt-toolkit==3.0.38 psutil==5.9.5 -psycopg2-binary==2.9.6 +psycopg[binary]==3.1.18 ptyprocess==0.7.0 pure-eval==0.2.2 py-cpuinfo==9.0.0 diff --git a/requirements/requirements-py-3.11.txt b/requirements/requirements-py-3.11.txt index 95347a0980..67ffe6add5 100644 --- a/requirements/requirements-py-3.11.txt +++ b/requirements/requirements-py-3.11.txt @@ -112,7 +112,7 @@ paramiko==2.12.0 parso==0.8.3 pexpect==4.8.0 pg8000==1.29.8 -pgsu==0.2.3 +pgsu==0.3.0 pgtest==1.3.2 pickleshare==0.7.5 pillow==9.5.0 @@ -123,7 +123,7 @@ plumpy==0.22.3 prometheus-client==0.17.0 prompt-toolkit==3.0.38 psutil==5.9.5 -psycopg2-binary==2.9.6 +psycopg[binary]==3.1.18 ptyprocess==0.7.0 pure-eval==0.2.2 py-cpuinfo==9.0.0 diff --git a/requirements/requirements-py-3.12.txt b/requirements/requirements-py-3.12.txt index 15d59944df..4a6d8ec05f 100644 --- a/requirements/requirements-py-3.12.txt +++ b/requirements/requirements-py-3.12.txt @@ -112,7 +112,7 @@ paramiko==2.12.0 parso==0.8.3 pexpect==4.8.0 pg8000==1.30.2 -pgsu==0.2.4 +pgsu==0.3.0 pgtest==1.3.2 pickleshare==0.7.5 pillow==10.1.0 @@ -123,7 +123,7 @@ plumpy==0.22.3 prometheus-client==0.17.1 prompt-toolkit==3.0.39 psutil==5.9.6 -psycopg2-binary==2.9.9 +psycopg[binary]==3.1.18 ptyprocess==0.7.0 pure-eval==0.2.2 py-cpuinfo==9.0.0 diff --git a/requirements/requirements-py-3.9.txt b/requirements/requirements-py-3.9.txt index 1a7d1b2704..6707b64057 100644 --- a/requirements/requirements-py-3.9.txt +++ b/requirements/requirements-py-3.9.txt @@ -115,7 +115,7 @@ paramiko==2.12.0 parso==0.8.3 pexpect==4.8.0 pg8000==1.29.8 -pgsu==0.2.3 +pgsu==0.3.0 pgtest==1.3.2 pickleshare==0.7.5 pillow==9.5.0 @@ -126,7 +126,7 @@ plumpy==0.22.3 prometheus-client==0.17.0 prompt-toolkit==3.0.38 psutil==5.9.5 -psycopg2-binary==2.9.6 +psycopg[binary]==3.1.18 ptyprocess==0.7.0 pure-eval==0.2.2 py-cpuinfo==9.0.0 diff --git a/src/aiida/cmdline/commands/cmd_setup.py b/src/aiida/cmdline/commands/cmd_setup.py index 93e6162141..ad86ee21db 100644 --- a/src/aiida/cmdline/commands/cmd_setup.py +++ b/src/aiida/cmdline/commands/cmd_setup.py @@ -241,8 +241,8 @@ def quicksetup( 'db_backend': db_backend, 'db_name': db_name, # from now on we connect as the AiiDA DB user, which may be forbidden when going via sockets - 'db_host': postgres.host_for_psycopg2, - 'db_port': postgres.port_for_psycopg2, + 'db_host': postgres.host_for_psycopg, + 'db_port': postgres.port_for_psycopg, 'db_username': db_username, 'db_password': db_password, 'broker_protocol': broker_protocol, diff --git a/src/aiida/cmdline/params/options/commands/setup.py b/src/aiida/cmdline/params/options/commands/setup.py index 40df742d4e..930aa97018 100644 --- a/src/aiida/cmdline/params/options/commands/setup.py +++ b/src/aiida/cmdline/params/options/commands/setup.py @@ -258,7 +258,7 @@ def get_quicksetup_password(ctx, param, value): '--su-db-name', help='Name of the template database to connect to as the database superuser.', type=click.STRING, - default=DEFAULT_DBINFO['database'], + default=DEFAULT_DBINFO['dbname'], ) QUICKSETUP_SUPERUSER_DATABASE_PASSWORD = options.OverridableOption( @@ -288,7 +288,7 @@ def get_quicksetup_password(ctx, param, value): SETUP_DATABASE_ENGINE = QUICKSETUP_DATABASE_ENGINE.clone( prompt='Database engine', contextual_default=functools.partial( - get_profile_attribute_default, ('storage.config.database_engine', 'postgresql_psycopg2') + get_profile_attribute_default, ('storage.config.database_engine', 'postgresql_psycopg') ), cls=options.interactive.InteractiveOption, ) diff --git a/src/aiida/cmdline/params/options/main.py b/src/aiida/cmdline/params/options/main.py index d521828450..381199d199 100644 --- a/src/aiida/cmdline/params/options/main.py +++ b/src/aiida/cmdline/params/options/main.py @@ -383,8 +383,8 @@ def set_log_level(ctx, _param, value): '--db-engine', required=True, help='Engine to use to connect to the database.', - default='postgresql_psycopg2', - type=click.Choice(['postgresql_psycopg2']), + default='postgresql_psycopg', + type=click.Choice(['postgresql_psycopg']), ) DB_BACKEND = OverridableOption( diff --git a/src/aiida/manage/external/postgres.py b/src/aiida/manage/external/postgres.py index 530d23b9c5..62092d7835 100644 --- a/src/aiida/manage/external/postgres.py +++ b/src/aiida/manage/external/postgres.py @@ -96,7 +96,7 @@ def create_dbuser(self, dbuser, dbpass, privileges=''): :param str dbuser: Name of the user to be created. :param str dbpass: Password the user should be given. - :raises: psycopg2.errors.DuplicateObject if user already exists and + :raises: psycopg.errors.DuplicateObject if user already exists and self.connection_mode == PostgresConnectionMode.PSYCOPG """ self.execute(_CREATE_USER_COMMAND.format(dbuser, dbpass, privileges)) @@ -130,14 +130,13 @@ def find_new_dbuser(self, start_from='aiida'): def can_user_authenticate(self, dbuser, dbpass): """Check whether the database user credentials are valid. - Checks whether dbuser has access to the `template1` postgres database - via psycopg2. + Checks whether dbuser has access to the `template1` postgres database via psycopg. :param dbuser: the database user :param dbpass: the database password :return: True if the credentials are valid, False otherwise """ - import psycopg2 + import psycopg from pgsu import _execute_psyco dsn = self.dsn.copy() @@ -146,7 +145,7 @@ def can_user_authenticate(self, dbuser, dbpass): try: _execute_psyco('SELECT 1', dsn) - except psycopg2.OperationalError: + except psycopg.OperationalError: return False return True @@ -227,8 +226,8 @@ def create_dbuser_db_safe(self, dbname, dbuser, dbpass): return dbuser, dbname @property - def host_for_psycopg2(self): - """Return correct host for psycopg2 connection (as required by regular AiiDA operation).""" + def host_for_psycopg(self): + """Return correct host for psycopg connection (as required by regular AiiDA operation).""" host = self.dsn.get('host') if self.connection_mode == PostgresConnectionMode.PSQL: # If "sudo su postgres" was needed to create the DB, we are likely on Ubuntu, where @@ -238,8 +237,8 @@ def host_for_psycopg2(self): return host @property - def port_for_psycopg2(self): - """Return port for psycopg2 connection (as required by regular AiiDA operation).""" + def port_for_psycopg(self): + """Return port for psycopg connection (as required by regular AiiDA operation).""" return self.dsn.get('port') @property diff --git a/src/aiida/manage/tests/pytest_fixtures.py b/src/aiida/manage/tests/pytest_fixtures.py index 96b7abaaad..92856aff66 100644 --- a/src/aiida/manage/tests/pytest_fixtures.py +++ b/src/aiida/manage/tests/pytest_fixtures.py @@ -96,7 +96,7 @@ def create_database( from aiida.manage.external.postgres import Postgres postgres_config = { - 'database_engine': 'postgresql_psycopg2', + 'database_engine': 'postgresql_psycopg', 'database_name': database_name or str(uuid.uuid4()), 'database_username': database_username or 'guest', 'database_password': database_password or 'guest', @@ -109,8 +109,8 @@ def create_database( ) postgres.create_db(postgres_config['database_username'], postgres_config['database_name']) - postgres_config['database_hostname'] = postgres.host_for_psycopg2 - postgres_config['database_port'] = postgres.port_for_psycopg2 + postgres_config['database_hostname'] = postgres.host_for_psycopg + postgres_config['database_port'] = postgres.port_for_psycopg return postgres_config diff --git a/src/aiida/restapi/common/utils.py b/src/aiida/restapi/common/utils.py index d81a86156b..993699fb75 100644 --- a/src/aiida/restapi/common/utils.py +++ b/src/aiida/restapi/common/utils.py @@ -9,7 +9,7 @@ """Util methods""" import urllib.parse -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone from flask import jsonify from flask.json.provider import DefaultJSONProvider @@ -670,7 +670,6 @@ def parse_query_string(self, query_string): :param query_string (as obtained from request.query_string) :return: parsed values for the querykeys """ - from psycopg2.tz import FixedOffsetTimezone from pyparsing import ( Combine, Group, @@ -774,10 +773,10 @@ def validate_time(toks): if dtobj.tzinfo is not None and dtobj.utcoffset() is not None: tzoffset_minutes = int(dtobj.utcoffset().total_seconds() // 60) return DatetimePrecision( - dtobj.replace(tzinfo=FixedOffsetTimezone(offset=tzoffset_minutes, name=None)), precision + dtobj.replace(tzinfo=timezone(offset=timedelta(minutes=tzoffset_minutes), name='UTC')), precision ) - return DatetimePrecision(dtobj.replace(tzinfo=FixedOffsetTimezone(offset=0, name=None)), precision) + return DatetimePrecision(dtobj.replace(tzinfo=timezone(offset=timedelta(minutes=0), name='UTC')), precision) ######################################################################## diff --git a/src/aiida/storage/psql_dos/backend.py b/src/aiida/storage/psql_dos/backend.py index 82c92f1bfe..b0d2dc813a 100644 --- a/src/aiida/storage/psql_dos/backend.py +++ b/src/aiida/storage/psql_dos/backend.py @@ -80,7 +80,7 @@ class Model(BaseModel, defer_build=True): database_engine: str = Field( title='PostgreSQL engine', description='The engine to use to connect to the database.', - default='postgresql_psycopg2', + default='postgresql_psycopg', ) database_hostname: str = Field( title='PostgreSQL hostname', description='The hostname of the PostgreSQL server.', default='localhost' diff --git a/src/aiida/storage/psql_dos/utils.py b/src/aiida/storage/psql_dos/utils.py index 116e1ecdba..3aaa8ed9dd 100644 --- a/src/aiida/storage/psql_dos/utils.py +++ b/src/aiida/storage/psql_dos/utils.py @@ -42,7 +42,7 @@ def create_sqlalchemy_engine(config: PsqlConfig): separator = ':' if config['database_port'] else '' password = quote_plus(config['database_password']) - engine_url = 'postgresql://{user}:{password}@{hostname}{separator}{port}/{name}'.format( + engine_url = 'postgresql+psycopg://{user}:{password}@{hostname}{separator}{port}/{name}'.format( separator=separator, user=config['database_username'], password=password, diff --git a/src/aiida/tools/pytest_fixtures/storage.py b/src/aiida/tools/pytest_fixtures/storage.py index 4565e621b4..dd47ad0f21 100644 --- a/src/aiida/tools/pytest_fixtures/storage.py +++ b/src/aiida/tools/pytest_fixtures/storage.py @@ -41,7 +41,7 @@ def create_database( self._create() postgres_config = { - 'database_engine': 'postgresql_psycopg2', + 'database_engine': 'postgresql_psycopg', 'database_name': database_name or str(uuid4()), 'database_username': database_username or 'guest', 'database_password': database_password or 'guest', @@ -54,8 +54,8 @@ def create_database( ) postgres.create_db(postgres_config['database_username'], postgres_config['database_name']) - postgres_config['database_hostname'] = postgres.host_for_psycopg2 - postgres_config['database_port'] = postgres.port_for_psycopg2 + postgres_config['database_hostname'] = postgres.host_for_psycopg + postgres_config['database_port'] = postgres.port_for_psycopg return postgres_config diff --git a/tests/conftest.py b/tests/conftest.py index 19d282548c..3a9cd56336 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -351,7 +351,7 @@ def _create_profile(name='test-profile', **kwargs): 'storage': { 'backend': kwargs.pop('storage_backend', 'core.psql_dos'), 'config': { - 'database_engine': kwargs.pop('database_engine', 'postgresql_psycopg2'), + 'database_engine': kwargs.pop('database_engine', 'postgresql_psycopg'), 'database_hostname': kwargs.pop('database_hostname', 'localhost'), 'database_port': kwargs.pop('database_port', 5432), 'database_name': kwargs.pop('database_name', name), diff --git a/tests/orm/test_querybuilder/test_as_sql.txt b/tests/orm/test_querybuilder/test_as_sql.txt index 930a10e458..9d0b1f3fab 100644 --- a/tests/orm/test_querybuilder/test_as_sql.txt +++ b/tests/orm/test_querybuilder/test_as_sql.txt @@ -1 +1 @@ -'SELECT db_dbnode_1.uuid \nFROM db_dbnode AS db_dbnode_1 \nWHERE CAST(db_dbnode_1.node_type AS VARCHAR) LIKE %(param_1)s AND CASE WHEN (jsonb_typeof((db_dbnode_1.extras #> %(extras_1)s)) = %(jsonb_typeof_1)s) THEN (db_dbnode_1.extras #>> %(extras_1)s) = %(param_2)s ELSE %(param_3)s END' % {'param_1': '%', 'extras_1': ('tag4',), 'jsonb_typeof_1': 'string', 'param_2': 'appl_pecoal', 'param_3': False} +'SELECT db_dbnode_1.uuid \nFROM db_dbnode AS db_dbnode_1 \nWHERE CAST(db_dbnode_1.node_type AS VARCHAR) LIKE %(param_1)s::VARCHAR AND CASE WHEN (jsonb_typeof((db_dbnode_1.extras #> %(extras_1)s)) = %(jsonb_typeof_1)s::VARCHAR) THEN (db_dbnode_1.extras #>> %(extras_1)s) = %(param_2)s::VARCHAR ELSE %(param_3)s END' % {'param_1': '%', 'extras_1': ('tag4',), 'jsonb_typeof_1': 'string', 'param_2': 'appl_pecoal', 'param_3': False} diff --git a/tests/storage/psql_dos/migrations/conftest.py b/tests/storage/psql_dos/migrations/conftest.py index 088b2b73b1..5ca772dc5f 100644 --- a/tests/storage/psql_dos/migrations/conftest.py +++ b/tests/storage/psql_dos/migrations/conftest.py @@ -29,15 +29,16 @@ def empty_pg_cluster(): @pytest.fixture def uninitialised_profile(empty_pg_cluster: PGTest, tmp_path): """Create a profile attached to an empty database and repository folder.""" - import psycopg2 - from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT + import psycopg database_name = f'test_{uuid4().hex}' + dsn = empty_pg_cluster.dsn + dsn['dbname'] = dsn.pop('database') conn = None try: - conn = psycopg2.connect(**empty_pg_cluster.dsn) - conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) + conn = psycopg.connect(**dsn) + conn.autocommit = True with conn.cursor() as cursor: cursor.execute(f"CREATE DATABASE {database_name} ENCODING 'utf8';") finally: @@ -51,7 +52,7 @@ def uninitialised_profile(empty_pg_cluster: PGTest, tmp_path): 'storage': { 'backend': 'core.psql_dos', 'config': { - 'database_engine': 'postgresql_psycopg2', + 'database_engine': 'postgresql_psycopg', 'database_port': empty_pg_cluster.port, 'database_hostname': empty_pg_cluster.dsn['host'], 'database_name': database_name, @@ -66,8 +67,8 @@ def uninitialised_profile(empty_pg_cluster: PGTest, tmp_path): conn = None try: - conn = psycopg2.connect(**empty_pg_cluster.dsn) - conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) + conn = psycopg.connect(**dsn) + conn.autocommit = True with conn.cursor() as cursor: # note after postgresql 13 you can use 'DROP DATABASE name WITH (FORCE)' # but for now, we first close all possible open connections to the database, before dropping it From 954cbdd3ee5127d6618db9d144508505e41cffcc Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 11 Jul 2024 22:52:44 +0200 Subject: [PATCH 56/82] `Scheduler`: Refactor interface to make it more generic (#6043) The original `Scheduler` interface made the assumption that all interfaces would interact with the scheduler through a command line interface that would be invoked through a bash shell. However, this is not always the case. Prime example is the new FirecREST service, being developed by CSCS, that will allow to interact with the scheduler through a REST API. Due to the assumptions of the `Scheduler` interface, it was difficult to implement it for this use case. The `Scheduler` interface is made more generic, by removing the following (abstract) methods: * `_get_joblist_command` * `_parse_joblist_output` * `_get_submit_command` * `_parse_submit_output` * `submit_from_script` * `kill` * `_get_kill_command` * `_parse_kill_output` They are replaced by three abstract methods: * `submit_job` * `get_jobs` * `kill_job` The new interface no longer makes an assumption about how a plugin implements these methods. The first one should simply submit the job, given the location of the submission script on the remote computer. The second should return the status of the list of active jobs. And the final should kill a job and return the result. Unfortunately, this change is backwards incompatible and will break existing scheduler plugins. To simplify the migration pathway, a subclass `BashCliScheduler` is added. This implements the new `Scheduler` interface while maintaining the old interface. This means that this new class is a drop-in replacement of the old `Scheduler` class for existing plugins. The plugins that ship with `aiida-core` are all updated to subclass from `BashCliScheduler`. Any existing plugins that subclassed from these plugins will therefore not be affected whatsoever by these changes. --- src/aiida/engine/daemon/execmanager.py | 10 +- src/aiida/engine/processes/calcjobs/tasks.py | 2 +- src/aiida/schedulers/__init__.py | 2 + src/aiida/schedulers/plugins/__init__.py | 3 + src/aiida/schedulers/plugins/bash.py | 123 ++++++++++++++++ src/aiida/schedulers/plugins/direct.py | 4 +- src/aiida/schedulers/plugins/lsf.py | 7 +- .../schedulers/plugins/pbsbaseclasses.py | 7 +- src/aiida/schedulers/plugins/sge.py | 4 +- src/aiida/schedulers/plugins/slurm.py | 4 +- src/aiida/schedulers/scheduler.py | 138 +++++------------- .../processes/calcjobs/test_calc_job.py | 4 +- tests/transports/test_all_plugins.py | 2 +- 13 files changed, 194 insertions(+), 116 deletions(-) create mode 100644 src/aiida/schedulers/plugins/bash.py diff --git a/src/aiida/engine/daemon/execmanager.py b/src/aiida/engine/daemon/execmanager.py index 6f2a42fa15..045347404c 100644 --- a/src/aiida/engine/daemon/execmanager.py +++ b/src/aiida/engine/daemon/execmanager.py @@ -397,7 +397,7 @@ def submit_calculation(calculation: CalcJobNode, transport: Transport) -> str | :param calculation: the instance of CalcJobNode to submit. :param transport: an already opened transport to use to submit the calculation. - :return: the job id as returned by the scheduler `submit_from_script` call + :return: the job id as returned by the scheduler `submit_job` call """ job_id = calculation.get_job_id() @@ -414,7 +414,7 @@ def submit_calculation(calculation: CalcJobNode, transport: Transport) -> str | submit_script_filename = calculation.get_option('submit_script_filename') workdir = calculation.get_remote_workdir() - result = scheduler.submit_from_script(workdir, submit_script_filename) + result = scheduler.submit_job(workdir, submit_script_filename) if isinstance(result, str): calculation.set_job_id(result) @@ -572,7 +572,7 @@ def kill_calculation(calculation: CalcJobNode, transport: Transport) -> None: scheduler.set_transport(transport) # Call the proper kill method for the job ID of this calculation - result = scheduler.kill(job_id) + result = scheduler.kill_job(job_id) if result is not True: # Failed to kill because the job might have already been completed @@ -581,10 +581,10 @@ def kill_calculation(calculation: CalcJobNode, transport: Transport) -> None: # If the job is returned it is still running and the kill really failed, so we raise if job is not None and job.job_state != JobState.DONE: - raise exceptions.RemoteOperationError(f'scheduler.kill({job_id}) was unsuccessful') + raise exceptions.RemoteOperationError(f'scheduler.kill_job({job_id}) was unsuccessful') else: EXEC_LOGGER.warning( - 'scheduler.kill() failed but job<{%s}> no longer seems to be running regardless', job_id + 'scheduler.kill_job() failed but job<{%s}> no longer seems to be running regardless', job_id ) diff --git a/src/aiida/engine/processes/calcjobs/tasks.py b/src/aiida/engine/processes/calcjobs/tasks.py index 085e5c1f50..8b8231634f 100644 --- a/src/aiida/engine/processes/calcjobs/tasks.py +++ b/src/aiida/engine/processes/calcjobs/tasks.py @@ -510,7 +510,7 @@ async def execute(self) -> plumpy.process_states.State: # type: ignore[override result = await self._launch_task(task_submit_job, node, transport_queue) if isinstance(result, ExitCode): - # The scheduler plugin returned an exit code from ``Scheduler.submit_from_script`` indicating the + # The scheduler plugin returned an exit code from ``Scheduler.submit_job`` indicating the # job submission failed due to a non-transient problem and the job should be terminated. return self.create_state(ProcessState.RUNNING, self.process.terminate, result) diff --git a/src/aiida/schedulers/__init__.py b/src/aiida/schedulers/__init__.py index b81d7f79c4..748e23b5d5 100644 --- a/src/aiida/schedulers/__init__.py +++ b/src/aiida/schedulers/__init__.py @@ -13,9 +13,11 @@ # fmt: off from .datastructures import * +from .plugins import * from .scheduler import * __all__ = ( + 'BashCliScheduler', 'JobInfo', 'JobResource', 'JobState', diff --git a/src/aiida/schedulers/plugins/__init__.py b/src/aiida/schedulers/plugins/__init__.py index c56ff0a1f8..cae7feed47 100644 --- a/src/aiida/schedulers/plugins/__init__.py +++ b/src/aiida/schedulers/plugins/__init__.py @@ -6,3 +6,6 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### +from .bash import BashCliScheduler + +__all__ = ('BashCliScheduler',) diff --git a/src/aiida/schedulers/plugins/bash.py b/src/aiida/schedulers/plugins/bash.py new file mode 100644 index 0000000000..0511a4cb99 --- /dev/null +++ b/src/aiida/schedulers/plugins/bash.py @@ -0,0 +1,123 @@ +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Job scheduler that is interacted with through a CLI in bash.""" + +from __future__ import annotations + +import abc + +from aiida.common.escaping import escape_for_bash +from aiida.engine.processes.exit_code import ExitCode +from aiida.schedulers.datastructures import JobInfo +from aiida.schedulers.scheduler import Scheduler, SchedulerError + +__all__ = ('BashCliScheduler',) + + +class BashCliScheduler(Scheduler, metaclass=abc.ABCMeta): + """Job scheduler that is interacted with through a CLI in bash.""" + + def submit_job(self, working_directory: str, filename: str) -> str | ExitCode: + """Submit a job. + + :param working_directory: The absolute filepath to the working directory where the job is to be exectued. + :param filename: The filename of the submission script relative to the working directory. + """ + self.transport.chdir(working_directory) + result = self.transport.exec_command_wait(self._get_submit_command(escape_for_bash(filename))) + return self._parse_submit_output(*result) + + def get_jobs( + self, + jobs: list[str] | None = None, + user: str | None = None, + as_dict: bool = False, + ) -> list[JobInfo] | dict[str, JobInfo]: + """Return the list of currently active jobs. + + :param jobs: A list of jobs to check; only these are checked. + :param user: A string with a user: only jobs of this user are checked. + :param as_dict: If ``False`` (default), a list of ``JobInfo`` objects is returned. If ``True``, a dictionary is + returned, where the ``job_id`` is the key and the values are the ``JobInfo`` objects. + :returns: List of active jobs. + """ + with self.transport: + retval, stdout, stderr = self.transport.exec_command_wait(self._get_joblist_command(jobs=jobs, user=user)) + + joblist = self._parse_joblist_output(retval, stdout, stderr) + if as_dict: + jobdict = {job.job_id: job for job in joblist} + if None in jobdict: + raise SchedulerError('Found at least one job without jobid') + return jobdict + + return joblist + + def kill_job(self, jobid: str) -> bool: + """Kill a remote job and parse the return value of the scheduler to check if the command succeeded. + + ..note:: + + On some schedulers, even if the command is accepted, it may take some seconds for the job to actually + disappear from the queue. + + :param jobid: the job ID to be killed + :returns: True if everything seems ok, False otherwise. + """ + retval, stdout, stderr = self.transport.exec_command_wait(self._get_kill_command(jobid)) + return self._parse_kill_output(retval, stdout, stderr) + + @abc.abstractmethod + def _get_submit_command(self, submit_script: str) -> str: + """Return the string to execute to submit a given script. + + .. warning:: the `submit_script` should already have been bash-escaped + + :param submit_script: the path of the submit script relative to the working directory. + :return: the string to execute to submit a given script. + """ + + @abc.abstractmethod + def _parse_submit_output(self, retval: int, stdout: str, stderr: str) -> str | ExitCode: + """Parse the output of the submit command returned by calling the `_get_submit_command` command. + + :return: a string with the job ID or an exit code if the submission failed because the submission script is + invalid and the job should be terminated. + """ + + @abc.abstractmethod + def _get_joblist_command(self, jobs: list[str] | None = None, user: str | None = None) -> str: + """Return the command to get the most complete description possible of currently active jobs. + + .. note:: + + Typically one can pass only either jobs or user, depending on the specific plugin. The choice can be done + according to the value returned by `self.get_feature('can_query_by_user')` + + :param jobs: either None to get a list of all jobs in the machine, or a list of jobs. + :param user: either None, or a string with the username (to show only jobs of the specific user). + """ + + @abc.abstractmethod + def _parse_joblist_output(self, retval: int, stdout: str, stderr: str) -> list[JobInfo]: + """Parse the joblist output as returned by executing the command returned by `_get_joblist_command` method. + + :return: list of `JobInfo` objects, one of each job each with at least its default params implemented. + """ + + @abc.abstractmethod + def _get_kill_command(self, jobid: str) -> str: + """Return the command to kill the job with specified jobid.""" + + @abc.abstractmethod + def _parse_kill_output(self, retval: int, stdout: str, stderr: str) -> bool: + """Parse the output of the kill command. + + :return: True if everything seems ok, False otherwise. + """ diff --git a/src/aiida/schedulers/plugins/direct.py b/src/aiida/schedulers/plugins/direct.py index 21d368a15f..78421acb73 100644 --- a/src/aiida/schedulers/plugins/direct.py +++ b/src/aiida/schedulers/plugins/direct.py @@ -13,6 +13,8 @@ from aiida.schedulers import SchedulerError from aiida.schedulers.datastructures import JobInfo, JobState, NodeNumberJobResource +from .bash import BashCliScheduler + ## From the ps man page on Mac OS X 10.12 # state The state is given by a sequence of characters, for example, # ``RWNA''. The first character indicates the run state of the @@ -74,7 +76,7 @@ def accepts_default_memory_per_machine(cls): return False -class DirectScheduler(aiida.schedulers.Scheduler): +class DirectScheduler(BashCliScheduler): """Support for the direct execution bypassing schedulers.""" _logger = aiida.schedulers.Scheduler._logger.getChild('direct') diff --git a/src/aiida/schedulers/plugins/lsf.py b/src/aiida/schedulers/plugins/lsf.py index aafeb2d167..33512ba944 100644 --- a/src/aiida/schedulers/plugins/lsf.py +++ b/src/aiida/schedulers/plugins/lsf.py @@ -16,6 +16,8 @@ from aiida.schedulers import SchedulerError, SchedulerParsingError from aiida.schedulers.datastructures import JobInfo, JobResource, JobState +from .bash import BashCliScheduler + # This maps LSF status codes to our own state list # # List of states from @@ -167,9 +169,10 @@ def accepts_default_mpiprocs_per_machine(cls): return False -class LsfScheduler(aiida.schedulers.Scheduler): +class LsfScheduler(BashCliScheduler): """Support for the IBM LSF scheduler - 'https://www-01.ibm.com/support/knowledgecenter/SSETD4_9.1.2/lsf_welcome.html' + + https://www-01.ibm.com/support/knowledgecenter/SSETD4_9.1.2/lsf_welcome.html """ _logger = aiida.schedulers.Scheduler._logger.getChild('lsf') diff --git a/src/aiida/schedulers/plugins/pbsbaseclasses.py b/src/aiida/schedulers/plugins/pbsbaseclasses.py index 12331010aa..bcceeae19d 100644 --- a/src/aiida/schedulers/plugins/pbsbaseclasses.py +++ b/src/aiida/schedulers/plugins/pbsbaseclasses.py @@ -11,9 +11,11 @@ import logging from aiida.common.escaping import escape_for_bash -from aiida.schedulers import Scheduler, SchedulerError, SchedulerParsingError +from aiida.schedulers import SchedulerError, SchedulerParsingError from aiida.schedulers.datastructures import JobInfo, JobState, MachineInfo, NodeNumberJobResource +from .bash import BashCliScheduler + _LOGGER = logging.getLogger(__name__) # This maps PbsPro status letters to our own status list @@ -95,8 +97,9 @@ def validate_resources(cls, **kwargs): return resources -class PbsBaseClass(Scheduler): +class PbsBaseClass(BashCliScheduler): """Base class with support for the PBSPro scheduler + (http://www.pbsworks.com/) and for PBS and Torque (http://www.adaptivecomputing.com/products/open-source/torque/). diff --git a/src/aiida/schedulers/plugins/sge.py b/src/aiida/schedulers/plugins/sge.py index fbc6a74233..c01381ba3d 100644 --- a/src/aiida/schedulers/plugins/sge.py +++ b/src/aiida/schedulers/plugins/sge.py @@ -21,6 +21,8 @@ from aiida.schedulers import SchedulerError, SchedulerParsingError from aiida.schedulers.datastructures import JobInfo, JobState, ParEnvJobResource +from .bash import BashCliScheduler + # 'http://www.loni.ucla.edu/twiki/bin/view/Infrastructure/GridComputing?skin=plain': # Jobs Status: # 'qw' - Queued and waiting, @@ -88,7 +90,7 @@ class SgeJobResource(ParEnvJobResource): pass -class SgeScheduler(aiida.schedulers.Scheduler): +class SgeScheduler(BashCliScheduler): """Support for the Sun Grid Engine scheduler and its variants/forks (Son of Grid Engine, Oracle Grid Engine, ...)""" _logger = aiida.schedulers.Scheduler._logger.getChild('sge') diff --git a/src/aiida/schedulers/plugins/slurm.py b/src/aiida/schedulers/plugins/slurm.py index 77ddc2711e..0ef2568c22 100644 --- a/src/aiida/schedulers/plugins/slurm.py +++ b/src/aiida/schedulers/plugins/slurm.py @@ -16,6 +16,8 @@ from aiida.schedulers import Scheduler, SchedulerError from aiida.schedulers.datastructures import JobInfo, JobState, NodeNumberJobResource +from .bash import BashCliScheduler + # This maps SLURM state codes to our own status list ## List of states from the man page of squeue @@ -141,7 +143,7 @@ def validate_resources(cls, **kwargs): return resources -class SlurmScheduler(Scheduler): +class SlurmScheduler(BashCliScheduler): """Support for the SLURM scheduler (http://slurm.schedmd.com/).""" _logger = Scheduler._logger.getChild('slurm') diff --git a/src/aiida/schedulers/scheduler.py b/src/aiida/schedulers/scheduler.py index 571b132672..5168762f80 100644 --- a/src/aiida/schedulers/scheduler.py +++ b/src/aiida/schedulers/scheduler.py @@ -125,6 +125,44 @@ def create_job_resource(cls, **kwargs): assert cls._job_resource_class is not None and issubclass(cls._job_resource_class, JobResource) return cls._job_resource_class(**kwargs) + @abc.abstractmethod + def submit_job(self, working_directory: str, filename: str) -> str | ExitCode: + """Submit a job. + + :param working_directory: The absolute filepath to the working directory where the job is to be exectued. + :param filename: The filename of the submission script relative to the working directory. + :returns: + """ + + @abc.abstractmethod + def get_jobs( + self, + jobs: list[str] | None = None, + user: str | None = None, + as_dict: bool = False, + ) -> list[JobInfo] | dict[str, JobInfo]: + """Return the list of currently active jobs. + + :param jobs: A list of jobs to check; only these are checked. + :param user: A string with a user: only jobs of this user are checked. + :param as_dict: If ``False`` (default), a list of ``JobInfo`` objects is returned. If ``True``, a dictionary is + returned, where the ``job_id`` is the key and the values are the ``JobInfo`` objects. + :returns: List of active jobs. + """ + + @abc.abstractmethod + def kill_job(self, jobid: str) -> bool: + """Kill a remote job and parse the return value of the scheduler to check if the command succeeded. + + ..note:: + + On some schedulers, even if the command is accepted, it may take some seconds for the job to actually + disappear from the queue. + + :param jobid: the job ID to be killed + :returns: True if everything seems ok, False otherwise. + """ + def get_submit_script(self, job_tmpl: JobTemplate) -> str: """Return the submit script as a string. @@ -287,19 +325,6 @@ def _get_run_line(self, codes_info: list[JobTemplateCodeInfo], codes_run_mode: C raise NotImplementedError('Unrecognized code run mode') - @abc.abstractmethod - def _get_joblist_command(self, jobs: list[str] | None = None, user: str | None = None) -> str: - """Return the command to get the most complete description possible of currently active jobs. - - .. note:: - - Typically one can pass only either jobs or user, depending on the specific plugin. The choice can be done - according to the value returned by `self.get_feature('can_query_by_user')` - - :param jobs: either None to get a list of all jobs in the machine, or a list of jobs. - :param user: either None, or a string with the username (to show only jobs of the specific user). - """ - def _get_detailed_job_info_command(self, job_id: str) -> dict[str, t.Any]: """Return the command to run to get detailed information for a given job. @@ -332,41 +357,6 @@ def get_detailed_job_info(self, job_id: str) -> dict[str, str | int]: return detailed_job_info - @abc.abstractmethod - def _parse_joblist_output(self, retval: int, stdout: str, stderr: str) -> list[JobInfo]: - """Parse the joblist output as returned by executing the command returned by `_get_joblist_command` method. - - :return: list of `JobInfo` objects, one of each job each with at least its default params implemented. - """ - - def get_jobs( - self, - jobs: list[str] | None = None, - user: str | None = None, - as_dict: bool = False, - ) -> list[JobInfo] | dict[str, JobInfo]: - """Return the list of currently active jobs. - - .. note:: typically, only either jobs or user can be specified. See also comments in `_get_joblist_command`. - - :param list jobs: a list of jobs to check; only these are checked - :param str user: a string with a user: only jobs of this user are checked - :param list as_dict: if False (default), a list of JobInfo objects is returned. If True, a dictionary is - returned, having as key the job_id and as value the JobInfo object. - :return: list of active jobs - """ - with self.transport: - retval, stdout, stderr = self.transport.exec_command_wait(self._get_joblist_command(jobs=jobs, user=user)) - - joblist = self._parse_joblist_output(retval, stdout, stderr) - if as_dict: - jobdict = {job.job_id: job for job in joblist} - if None in jobdict: - raise SchedulerError('Found at least one job without jobid') - return jobdict - - return joblist - @property def transport(self): """Return the transport set for this scheduler.""" @@ -382,58 +372,6 @@ def set_transport(self, transport: Transport): """ self._transport = transport - @abc.abstractmethod - def _get_submit_command(self, submit_script: str) -> str: - """Return the string to execute to submit a given script. - - .. warning:: the `submit_script` should already have been bash-escaped - - :param submit_script: the path of the submit script relative to the working directory. - :return: the string to execute to submit a given script. - """ - - @abc.abstractmethod - def _parse_submit_output(self, retval: int, stdout: str, stderr: str) -> str | ExitCode: - """Parse the output of the submit command returned by calling the `_get_submit_command` command. - - :return: a string with the job ID or an exit code if the submission failed because the submission script is - invalid and the job should be terminated. - """ - - def submit_from_script(self, working_directory: str, submit_script: str) -> str | ExitCode: - """Submit the submission script to the scheduler. - - :return: return a string with the job ID in a valid format to be used for querying. - """ - self.transport.chdir(working_directory) - result = self.transport.exec_command_wait(self._get_submit_command(escape_for_bash(submit_script))) - return self._parse_submit_output(*result) - - def kill(self, jobid: str) -> bool: - """Kill a remote job and parse the return value of the scheduler to check if the command succeeded. - - ..note:: - - On some schedulers, even if the command is accepted, it may take some seconds for the job to actually - disappear from the queue. - - :param jobid: the job ID to be killed - :return: True if everything seems ok, False otherwise. - """ - retval, stdout, stderr = self.transport.exec_command_wait(self._get_kill_command(jobid)) - return self._parse_kill_output(retval, stdout, stderr) - - @abc.abstractmethod - def _get_kill_command(self, jobid: str) -> str: - """Return the command to kill the job with specified jobid.""" - - @abc.abstractmethod - def _parse_kill_output(self, retval: int, stdout: str, stderr: str) -> bool: - """Parse the output of the kill command. - - :return: True if everything seems ok, False otherwise. - """ - def parse_output( self, detailed_job_info: dict[str, str | int] | None = None, diff --git a/tests/engine/processes/calcjobs/test_calc_job.py b/tests/engine/processes/calcjobs/test_calc_job.py index 4583d9ea18..4e679d5cdf 100644 --- a/tests/engine/processes/calcjobs/test_calc_job.py +++ b/tests/engine/processes/calcjobs/test_calc_job.py @@ -1273,10 +1273,10 @@ def test_monitor_result_action_disable_self(get_calcjob_builder, entry_points, c def test_submit_return_exit_code(get_calcjob_builder, monkeypatch): - """Test that a job is terminated if ``Scheduler.submit_from_script`` returns an exit code. + """Test that a job is terminated if ``Scheduler.submit_job`` returns an exit code. To simulate this situation we monkeypatch ``DirectScheduler._parse_submit_output`` because that is the method that - is called internally by ``Scheduler.submit_from_script`` and it returns its result, and the ``DirectScheduler`` is + is called internally by ``Scheduler.submit_job`` and it returns its result, and the ``DirectScheduler`` is the plugin that is used by the localhost computer used in the inputs for this calcjob. """ from aiida.schedulers.plugins.direct import DirectScheduler diff --git a/tests/transports/test_all_plugins.py b/tests/transports/test_all_plugins.py index 517efe106a..986dd465a9 100644 --- a/tests/transports/test_all_plugins.py +++ b/tests/transports/test_all_plugins.py @@ -1301,7 +1301,7 @@ def test_asynchronous_execution(custom_transport): transport.putfile(tmpf.name, script_fname) timestamp_before = time.time() - job_id_string = scheduler.submit_from_script('/tmp', script_fname) + job_id_string = scheduler.submit_job('/tmp', script_fname) elapsed_time = time.time() - timestamp_before # We want to get back control. If it takes < 5 seconds, it means that it is not blocking From 2bdcb7f00dac93b3287baef042f873cd5f6ee247 Mon Sep 17 00:00:00 2001 From: Daniel Hollas Date: Mon, 15 Jul 2024 07:46:38 +0100 Subject: [PATCH 57/82] Devops: Add type hints to `aiida.orm.utils.remote` (#6503) --- .pre-commit-config.yaml | 1 - src/aiida/orm/utils/remote.py | 36 ++++++++++++++++++++++------------- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 185d5698fb..32305828b4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -145,7 +145,6 @@ repos: src/aiida/orm/utils/builders/computer.py| src/aiida/orm/utils/calcjob.py| src/aiida/orm/utils/node.py| - src/aiida/orm/utils/remote.py| src/aiida/repository/backend/disk_object_store.py| src/aiida/repository/backend/sandbox.py| src/aiida/restapi/common/utils.py| diff --git a/src/aiida/orm/utils/remote.py b/src/aiida/orm/utils/remote.py index 5fdccc9629..2518791fb8 100644 --- a/src/aiida/orm/utils/remote.py +++ b/src/aiida/orm/utils/remote.py @@ -8,12 +8,22 @@ ########################################################################### """Utilities for operations on files on remote computers.""" +from __future__ import annotations + import os +import typing as t from aiida.orm.nodes.data.remote.base import RemoteData +if t.TYPE_CHECKING: + from collections.abc import Sequence + + from aiida import orm + from aiida.orm.implementation import StorageBackend + from aiida.transports import Transport + -def clean_remote(transport, path): +def clean_remote(transport: Transport, path: str) -> None: """Recursively remove a remote folder, with the given absolute path, and all its contents. The path should be made accessible through the transport channel, which should already be open @@ -39,15 +49,15 @@ def clean_remote(transport, path): def get_calcjob_remote_paths( - pks=None, - past_days=None, - older_than=None, - computers=None, - user=None, - backend=None, - exit_status=None, - only_not_cleaned=False, -): + pks: list[int] | None = None, + past_days: int | None = None, + older_than: int | None = None, + computers: Sequence[orm.Computer] | None = None, + user: orm.User | None = None, + backend: StorageBackend | None = None, + exit_status: int | None = None, + only_not_cleaned: bool = False, +) -> dict[str, list[RemoteData]] | None: """Return a mapping of computer uuids to a list of remote paths, for a given set of calcjobs. The set of calcjobs will be determined by a query with filters based on the pks, past_days, older_than, computers and user arguments. @@ -67,7 +77,7 @@ def get_calcjob_remote_paths( from aiida.common import timezone from aiida.orm import CalcJobNode - filters_calc = {} + filters_calc: dict[str, t.Any] = {} filters_computer = {} filters_remote = {} @@ -110,12 +120,12 @@ def get_calcjob_remote_paths( RemoteData, tag='remote', project=['*'], edge_filters={'label': 'remote_folder'}, filters=filters_remote ) query.append(orm.Computer, with_node='calc', tag='computer', project=['uuid'], filters=filters_computer) - query.append(orm.User, with_node='calc', filters={'email': user.email}) + query.append(orm.User, with_node='calc', filters={'email': user.email}) # type: ignore[union-attr] if query.count() == 0: return None - path_mapping = {} + path_mapping: dict[str, list[RemoteData]] = {} for remote_data, computer_uuid in query.iterall(): path_mapping.setdefault(computer_uuid, []).append(remote_data) From a610460572f2d85435dfa850fdc247e1c666ef68 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Tue, 16 Jul 2024 12:21:33 +0200 Subject: [PATCH 58/82] Dependencies: Pin requirement to minor version `sphinx~=7.2.0` (#6527) The recently released v7.4.0 causes the build of the documentation to fail with warnings of the type: Failed to get a method signature for ...: unhashable type It also leads to issues with incremental builds where it fails if a build exists that was generated with an older version. Both these bugs were fixed in v7.4.1 and v7.4.2 respectively, but more warnings remain. Instead of waiting for these to be fixed, we are just pinning to a minor version here which should anyway be done for optional dev dependencies. There is no need to automatically update those to latest releases. --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 36a7af77c3..90e70ffb23 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -194,7 +194,7 @@ bpython = [ ] docs = [ 'pydata-sphinx-theme~=0.15.1', - 'sphinx~=7.2', + 'sphinx~=7.2.0', 'sphinx-copybutton~=0.5.0', 'sphinx-design~=0.5.0', 'sphinx-notfound-page~=1.0', @@ -242,7 +242,7 @@ tests = [ 'pytest-regressions~=2.2', 'pympler~=1.0', 'coverage~=7.0', - 'sphinx~=7.2', + 'sphinx~=7.2.0', 'docutils~=0.20' ] tui = [ From de83e2ce43101ddb8b5aeaa5c3e18d1e5c85590a Mon Sep 17 00:00:00 2001 From: Michael Goulding Date: Wed, 17 Jul 2024 00:49:34 -0700 Subject: [PATCH 59/82] `LocalTransport`: Fix typo for `ignore_nonexisting` in `put` (#6471) The `LocalTransport.put` method contained a typo and so would check for the `ignore_noexisting` argument instead of `ignore_nonexisting`. The typo is corrected but for backwards compatibility the method continues to check for the misspelled version emitting a deprecation warning. --- src/aiida/transports/plugins/local.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/aiida/transports/plugins/local.py b/src/aiida/transports/plugins/local.py index 0740837fc4..b8263620d3 100644 --- a/src/aiida/transports/plugins/local.py +++ b/src/aiida/transports/plugins/local.py @@ -228,9 +228,18 @@ def put(self, localpath, remotepath, *args, **kwargs): :raise OSError: if remotepath is not valid :raise ValueError: if localpath is not valid """ + from aiida.common.warnings import warn_deprecation + + if 'ignore_noexisting' in kwargs: + # Backwards compatibility check for old keyword that was misspelled + warn_deprecation( + 'Detected `ignore_noexisting` which is now deprecated. Use `ignore_nonexisting` instead.', version=3 + ) + ignore_nonexisting = kwargs.get('ignore_noexisting', args[2] if len(args) > 2 else False) + dereference = kwargs.get('dereference', args[0] if args else True) overwrite = kwargs.get('overwrite', args[1] if len(args) > 1 else True) - ignore_nonexisting = kwargs.get('ignore_noexisting', args[2] if len(args) > 2 else False) + ignore_nonexisting = kwargs.get('ignore_nonexisting', args[2] if len(args) > 2 else False) if not remotepath: raise OSError('Input remotepath to put function must be a non empty string') if not localpath: From a2063f8eecb4f059c2d45201da00993e1559fc44 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Wed, 17 Jul 2024 16:10:30 +0200 Subject: [PATCH 60/82] CLI: Fix bug `verdi presto` when tab-completing without config (#6535) For a clean environment where the config directory had not yet been created, tab-completing `verdi presto` would result in an exception being thrown because the callable for the default of the `--profile-name` option would try to access the configuration. The exception is now caught and the callable simply returns the default profile name `presto` which should be correct since no profiles should exist if there is not even a configuration directory. --- src/aiida/cmdline/commands/cmd_presto.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/aiida/cmdline/commands/cmd_presto.py b/src/aiida/cmdline/commands/cmd_presto.py index 6fa9518443..83ba287a94 100644 --- a/src/aiida/cmdline/commands/cmd_presto.py +++ b/src/aiida/cmdline/commands/cmd_presto.py @@ -25,9 +25,17 @@ def get_default_presto_profile_name(): + from aiida.common.exceptions import ConfigurationError from aiida.manage import get_config - profile_names = get_config().profile_names + try: + profile_names = get_config().profile_names + except ConfigurationError: + # This can happen when tab-completing in an environment that did not create the configuration folder yet. + # It would have been possible to just call ``get_config(create=True)`` to create the config directory, but this + # should not be done during tab-completion just to generate a default value. + return DEFAULT_PROFILE_NAME_PREFIX + indices = [] for profile_name in profile_names: From 17dc88c01514cee09449dc66f5d134324c666dd7 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Wed, 17 Jul 2024 22:00:47 +0200 Subject: [PATCH 61/82] CLI: `verdi computer test` report correct failed tests (#6536) If the opening of the transport would fail in `verdi computer test` it would always report: Warning: 1 out of 0 tests failed Since opening the connection is the first test performed and its failure is dealt with separately, the message can simply be hardcoded to 1 out of 1 tests having failed. --- src/aiida/cmdline/commands/cmd_computer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiida/cmdline/commands/cmd_computer.py b/src/aiida/cmdline/commands/cmd_computer.py index acb9c2da81..4ac8480258 100644 --- a/src/aiida/cmdline/commands/cmd_computer.py +++ b/src/aiida/cmdline/commands/cmd_computer.py @@ -605,7 +605,7 @@ def computer_test(user, print_traceback, computer): message += '\n Use the `--print-traceback` option to see the full traceback.' echo.echo(message) - echo.echo_warning(f'{1} out of {num_tests} tests failed') + echo.echo_warning('1 out of 1 tests failed') @verdi_computer.command('delete') From 6b56d8bebf52552217062bc4fbdb97467355d4d5 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Sun, 21 Jul 2024 22:49:29 +0200 Subject: [PATCH 62/82] Devops: Fix pymatgen import causing mypy to fail (#6540) The `pymatgen==2024.07.18` release removed the exposing of `Molecule` in `pymatgen.core` causing `mypy` to fail. The import is updated to reflect the actual origin of the definition. --- tests/orm/nodes/data/test_jsonable.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/orm/nodes/data/test_jsonable.py b/tests/orm/nodes/data/test_jsonable.py index 59fbe9b6a8..bacedac73a 100644 --- a/tests/orm/nodes/data/test_jsonable.py +++ b/tests/orm/nodes/data/test_jsonable.py @@ -6,7 +6,7 @@ import pytest from aiida.orm import load_node from aiida.orm.nodes.data.jsonable import JsonableData -from pymatgen.core import Molecule +from pymatgen.core.structure import Molecule class JsonableClass: From 16b8fe4a0912ac36973fb82f14691723e72599a7 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Mon, 22 Jul 2024 10:37:50 +0200 Subject: [PATCH 63/82] Post release: add the `.post0` qualifier to version attribute --- src/aiida/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiida/__init__.py b/src/aiida/__init__.py index 5067f789e2..8e86350e40 100644 --- a/src/aiida/__init__.py +++ b/src/aiida/__init__.py @@ -27,7 +27,7 @@ 'For further information please visit http://www.aiida.net/. All rights reserved.' ) __license__ = 'MIT license, see LICENSE.txt file.' -__version__ = '2.6.1' +__version__ = '2.6.1.post0' __authors__ = 'The AiiDA team.' __paper__ = ( 'S. P. Huber et al., "AiiDA 1.0, a scalable computational infrastructure for automated reproducible workflows and ' From f1be224c4680407984eda8652692ec0ea708a3e1 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Mon, 22 Jul 2024 11:34:05 +0200 Subject: [PATCH 64/82] Docker: Make warning test insensitive to deprecation warnings (#6541) The Docker image is supposed to configure the profile such that warnings about using a development version of `aiida-core` and a modern version of RabbitMQ are silenced. The test was checking that `Warning` did not appear in the output of `verdi status`, however, this would result in false positives in case a deprecation warning would be printed due to downstream dependencies that we cannot necessarily control. The test is made more specific to check for a line starting with `Warning:` which should reduce the chance for false positives. --- .docker/tests/test_aiida.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.docker/tests/test_aiida.py b/.docker/tests/test_aiida.py index 05fe007db2..7f952bd855 100644 --- a/.docker/tests/test_aiida.py +++ b/.docker/tests/test_aiida.py @@ -1,4 +1,5 @@ import json +import re import pytest from packaging.version import parse @@ -32,8 +33,10 @@ def test_verdi_status(aiida_exec, container_user): assert '✔ broker:' in output assert 'Daemon is running' in output - # check that we have suppressed the warnings - assert 'Warning' not in output + # Check that we have suppressed the warnings coming from using an install from repo and newer RabbitMQ version. + # Make sure to match only lines that start with ``Warning:`` because otherwise deprecation warnings from other + # packages that we cannot control may fail the test. + assert not re.match('^Warning:.*', output) def test_computer_setup_success(aiida_exec, container_user): From 4038d550452df25cccd13137b9dfb5823345c544 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 18 Jul 2024 14:04:38 +0200 Subject: [PATCH 65/82] Docs: Add succint overview of limitations of no-services profile A table is added to the quick installation guide that gives a more succinct overview of the limitations of a profile created without a broker and with SQLite instead of PostgreSQL. This was already explained in more detail in text, but that was not complete and could be too dense for users, making them likely to skip it. The code is updated to provide a link to this documentation section whenever an error is displayed that a broker is not configured. This way users can try to understand why the functionality they are trying to use is not supported and how, if they really care about it, they can go about still installing and configuring RabbitMQ after the fact. --- docs/source/installation/guide_complete.rst | 2 +- docs/source/installation/guide_quick.rst | 20 +++++++++++++++++++- src/aiida/cmdline/commands/cmd_presto.py | 3 ++- src/aiida/cmdline/commands/cmd_process.py | 3 ++- src/aiida/cmdline/commands/cmd_profile.py | 2 ++ src/aiida/cmdline/commands/cmd_status.py | 5 +++-- src/aiida/cmdline/utils/decorators.py | 8 +++++++- src/aiida/common/docs.py | 4 ++++ src/aiida/engine/daemon/client.py | 5 ++++- src/aiida/engine/launch.py | 5 ++++- 10 files changed, 48 insertions(+), 9 deletions(-) create mode 100644 src/aiida/common/docs.py diff --git a/docs/source/installation/guide_complete.rst b/docs/source/installation/guide_complete.rst index 49136bbba7..764274616d 100644 --- a/docs/source/installation/guide_complete.rst +++ b/docs/source/installation/guide_complete.rst @@ -130,7 +130,7 @@ Although it is possible to run AiiDA without a daemon it does provide significan .. important:: The ``aiida-core.services`` package ensures that RabbitMQ is installed in the conda environment. - However, it is not a _service_, in the sense that it is not automatically started, but has to be started manually. + However, it is not a *service*, in the sense that it is not automatically started, but has to be started manually. .. code-block:: console diff --git a/docs/source/installation/guide_quick.rst b/docs/source/installation/guide_quick.rst index af9aaa8dc0..25ef307d57 100644 --- a/docs/source/installation/guide_quick.rst +++ b/docs/source/installation/guide_quick.rst @@ -51,13 +51,31 @@ If none of the lines show a red cross, indicating a problem, the installation wa Quick install limitations ========================= +By default, ``verdi presto`` creates a profile that uses SQLite instead of PostgreSQL and does not use the RabbitMQ message broker. +The table below gives a quick overview of the functionality that is not supported in those cases: + ++-----------------------------------------+------------------------------------------------------------------------+ +| No RabbitMQ | SQLite instead of PostgreSQL | ++=========================================+========================================================================+ +| Cannot run the daemon | Not suitable for high-throughput workloads | ++-----------------------------------------+------------------------------------------------------------------------+ +| Cannot submit processes to the daemon\* | No support for ``has_key`` and ``contains`` operators in query builder | ++-----------------------------------------+------------------------------------------------------------------------+ +| Cannot play, pause, kill processes | No support for ``QueryBuilder.get_creation_statistics`` | ++-----------------------------------------+------------------------------------------------------------------------+ + +\* Calculations can still be run on remote computers + +.. note:: + To enable the RabbitMQ broker for an existing profile, :ref:`install RabbitMQ ` and then run ``verdi profile configure-rabbitmq``. + Functionality ------------- Part of AiiDA's functionality requires a `message broker `_, with the default implementation using `RabbitMQ `_. The message broker is used to allow communication with the :ref:`daemon `. Since RabbitMQ is a separate service and is not always trivial to install, the quick installation guide sets up a profile that does not require it. -As a result, the daemon cannot be started and processes cannot be submitted to it but can only be run locally. +As a result, the daemon cannot be started and processes cannot be submitted to it but can only be run in the current Python interpreter. .. note:: The ``verdi presto`` command automatically checks if RabbitMQ is running on the localhost. diff --git a/src/aiida/cmdline/commands/cmd_presto.py b/src/aiida/cmdline/commands/cmd_presto.py index 83ba287a94..09d7070e7c 100644 --- a/src/aiida/cmdline/commands/cmd_presto.py +++ b/src/aiida/cmdline/commands/cmd_presto.py @@ -178,7 +178,7 @@ def verdi_presto( created profile uses the new PostgreSQL database instead of SQLite. """ from aiida.brokers.rabbitmq.defaults import detect_rabbitmq_config - from aiida.common import exceptions + from aiida.common import docs, exceptions from aiida.manage.configuration import create_profile, load_profile from aiida.orm import Computer @@ -217,6 +217,7 @@ def verdi_presto( broker_config = detect_rabbitmq_config() except ConnectionError as exception: echo.echo_report(f'RabbitMQ server not found ({exception}): configuring the profile without a broker.') + echo.echo_report(f'See {docs.URL_NO_BROKER} for details on the limitations of running without a broker.') else: echo.echo_report('RabbitMQ server detected: configuring the profile with a broker.') broker_backend = 'core.rabbitmq' diff --git a/src/aiida/cmdline/commands/cmd_process.py b/src/aiida/cmdline/commands/cmd_process.py index 77e14a3300..c9c492ae14 100644 --- a/src/aiida/cmdline/commands/cmd_process.py +++ b/src/aiida/cmdline/commands/cmd_process.py @@ -101,6 +101,7 @@ def process_list( from aiida.cmdline.commands.cmd_daemon import execute_client_command from aiida.cmdline.utils.common import print_last_process_state_change + from aiida.common.docs import URL_NO_BROKER from aiida.common.exceptions import ConfigurationError from aiida.engine.daemon.client import get_daemon_client from aiida.orm import ProcessNode, QueryBuilder @@ -137,7 +138,7 @@ def process_list( try: client = get_daemon_client() except ConfigurationError: - echo.echo_warning('This profile does not have a broker and so it has no daemon.') + echo.echo_warning(f'This profile does not have a broker and so it has no daemon. See {URL_NO_BROKER}') return if not client.is_daemon_running: diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py index 057f2de5a9..3dd21b56bf 100644 --- a/src/aiida/cmdline/commands/cmd_profile.py +++ b/src/aiida/cmdline/commands/cmd_profile.py @@ -55,6 +55,7 @@ def command_create_profile( :param kwargs: Arguments to initialise instance of the selected storage implementation. """ from aiida.brokers.rabbitmq.defaults import detect_rabbitmq_config + from aiida.common import docs from aiida.plugins.entry_point import get_entry_point_from_class if not storage_cls.read_only and email is None: @@ -79,6 +80,7 @@ def command_create_profile( else: echo.echo_report('Creating profile without RabbitMQ.') echo.echo_report('It can be configured at a later point in time with `verdi profile configure-rabbitmq`.') + echo.echo_report(f'See {docs.URL_NO_BROKER} for details on the limitations of running without a broker.') try: profile = create_profile( diff --git a/src/aiida/cmdline/commands/cmd_status.py b/src/aiida/cmdline/commands/cmd_status.py index dc4521af02..f3c32327dc 100644 --- a/src/aiida/cmdline/commands/cmd_status.py +++ b/src/aiida/cmdline/commands/cmd_status.py @@ -58,6 +58,7 @@ class ServiceStatus(enum.IntEnum): def verdi_status(print_traceback, no_rmq): """Print status of AiiDA services.""" from aiida import __version__ + from aiida.common.docs import URL_NO_BROKER from aiida.common.exceptions import ConfigurationError from aiida.engine.daemon.client import DaemonException, DaemonNotRunningException from aiida.manage.configuration.settings import AIIDA_CONFIG_FOLDER @@ -141,7 +142,7 @@ def verdi_status(print_traceback, no_rmq): print_status( ServiceStatus.WARNING, 'broker', - 'No broker defined for this profile: certain functionality not available.', + f'No broker defined for this profile: certain functionality not available. See {URL_NO_BROKER}', ) # Getting the daemon status @@ -151,7 +152,7 @@ def verdi_status(print_traceback, no_rmq): print_status( ServiceStatus.WARNING, 'daemon', - 'No broker defined for this profile: daemon is not available.', + 'No broker defined for this profile: daemon is not available. See {URL_NO_BROKER}', ) except DaemonNotRunningException as exception: print_status(ServiceStatus.WARNING, 'daemon', str(exception)) diff --git a/src/aiida/cmdline/utils/decorators.py b/src/aiida/cmdline/utils/decorators.py index 84386710f9..5363926978 100644 --- a/src/aiida/cmdline/utils/decorators.py +++ b/src/aiida/cmdline/utils/decorators.py @@ -45,6 +45,7 @@ def with_broker(wrapped, _, args, kwargs): If the currently loaded profile does not define a broker, the command is aborted. """ + from aiida.common.docs import URL_NO_BROKER from aiida.manage import get_manager broker = get_manager().get_broker() @@ -54,6 +55,7 @@ def with_broker(wrapped, _, args, kwargs): if broker is None: echo.echo_critical( f'Profile `{profile.name}` does not support this functionality as it does not provide a broker.' + f'See {URL_NO_BROKER} for more details.' ) kwargs['broker'] = broker @@ -313,6 +315,7 @@ def start_daemon(): If the loaded profile does not define a broker, the command will exit with a critical error. """ + from aiida.common.docs import URL_NO_BROKER from aiida.manage import get_manager manager = get_manager() @@ -323,6 +326,9 @@ def start_daemon(): assert profile is not None if manager.get_broker() is None: - echo.echo_critical(f'profile `{profile.name}` does not define a broker and so cannot use this functionality.') + echo.echo_critical( + f'profile `{profile.name}` does not define a broker and so cannot use this functionality.' + f'See {URL_NO_BROKER} for more details.' + ) return wrapped(*args, **kwargs) diff --git a/src/aiida/common/docs.py b/src/aiida/common/docs.py new file mode 100644 index 0000000000..797c944387 --- /dev/null +++ b/src/aiida/common/docs.py @@ -0,0 +1,4 @@ +"""Collection of links to the documentation that can be used in log messages for reference.""" + +URL_BASE = 'https://aiida-core.readthedocs.io/en/stable' +URL_NO_BROKER = f'{URL_BASE}/installation/guide_quick.html#quick-install-limitations' diff --git a/src/aiida/engine/daemon/client.py b/src/aiida/engine/daemon/client.py index 67ca7b99b5..ef250802f7 100644 --- a/src/aiida/engine/daemon/client.py +++ b/src/aiida/engine/daemon/client.py @@ -91,6 +91,8 @@ def __init__(self, profile: Profile): :param profile: The profile instance. """ + from aiida.common.docs import URL_NO_BROKER + type_check(profile, Profile) config = get_config() self._profile = profile @@ -99,7 +101,8 @@ def __init__(self, profile: Profile): if self._profile.process_control_backend is None: raise ConfigurationError( - f'profile `{self._profile.name}` does not define a broker so the daemon cannot be used.' + f'profile `{self._profile.name}` does not define a broker so the daemon cannot be used. ' + f'See {URL_NO_BROKER} for more details.' ) @property diff --git a/src/aiida/engine/launch.py b/src/aiida/engine/launch.py index d37cf46905..34fd1d7c0d 100644 --- a/src/aiida/engine/launch.py +++ b/src/aiida/engine/launch.py @@ -103,6 +103,8 @@ def submit( :param kwargs: inputs to be passed to the process. This is an alternative to the positional ``inputs`` argument. :return: the calculation node of the process """ + from aiida.common.docs import URL_NO_BROKER + inputs = prepare_inputs(inputs, **kwargs) # Submitting from within another process requires ``self.submit``` unless it is a work function, in which case the @@ -117,7 +119,8 @@ def submit( 'Cannot submit because the runner does not have a process controller, probably because the profile does ' 'not define a broker like RabbitMQ. If a RabbitMQ server is available, the profile can be configured to ' 'use it with `verdi profile configure-rabbitmq`. Otherwise, use :meth:`aiida.engine.launch.run` instead to ' - 'run the process in the local Python interpreter instead of submitting it to the daemon.' + 'run the process in the local Python interpreter instead of submitting it to the daemon. ' + f'See {URL_NO_BROKER} for more details.' ) assert runner.persister is not None, 'runner does not have a persister' From 96d9fbdc0027b3fd357a6aab172ec6f7f3cc94af Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 18 Jul 2024 14:21:59 +0200 Subject: [PATCH 66/82] Docs: Remove note in installation guide regarding Python requirement This admonition is not that useful since it still refers the user to an external website and the target information is not immediately obvious either. Besides, if the installed Python version is not supported, this will automatically be reported by the package manager. --- docs/source/installation/guide_quick.rst | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/source/installation/guide_quick.rst b/docs/source/installation/guide_quick.rst index 25ef307d57..7246498a87 100644 --- a/docs/source/installation/guide_quick.rst +++ b/docs/source/installation/guide_quick.rst @@ -10,11 +10,6 @@ First, install the ``aiida-core`` Python package: pip install aiida-core -.. attention:: - - AiiDA requires a recent version of Python. - Please refer to the `Python Package Index `_ for the minimum required version. - Next, set up a profile where all data is stored: .. code-block:: console From d73731f428b031ad4b9f68a4af2a008adc9b3290 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 18 Jul 2024 14:23:16 +0200 Subject: [PATCH 67/82] Docs: Move limitations warning to top of quick install The fact that the quick installation guide can lead to a profile that has certain limitations should be the first thing to be read. --- docs/source/installation/guide_complete.rst | 21 ++++-- docs/source/installation/guide_quick.rst | 79 ++++++++++++--------- 2 files changed, 58 insertions(+), 42 deletions(-) diff --git a/docs/source/installation/guide_complete.rst b/docs/source/installation/guide_complete.rst index 764274616d..f576a17465 100644 --- a/docs/source/installation/guide_complete.rst +++ b/docs/source/installation/guide_complete.rst @@ -254,7 +254,7 @@ Common options The exact options available for the ``verdi profile setup`` command depend on the selected storage plugin, but there are a number of common options and functionality: -* ``--profile``: The name of the profile. +* ``--profile-name``: The name of the profile. * ``--set-as-default``: Whether the new profile should be defined as the new default. * ``--email``: Email for the default user that is created. * ``--first-name``: First name for the default user that is created. @@ -276,6 +276,10 @@ The exact options available for the ``verdi profile setup`` command depend on th ``core.sqlite_dos`` ------------------- +.. tip:: + + The ``verdi presto`` command provides a fully automated way to set up a profile with the ``core.sqlite_dos`` storage plugin if no configuration is required. + This storage plugin uses `SQLite `_ and the `disk-objectstore `_ to store data. The ``disk-objectstore`` is a Python package that is automatically installed as a dependency when installing ``aiida-core``, which was covered in the :ref:`Python package installation section `. The installation instructions for SQLite depend on your system; please visit the `SQLite website `_ for details. @@ -296,20 +300,23 @@ The options specific to the ``core.sqlite_dos`` storage plugin are: ``core.psql_dos`` ----------------- -This storage plugin uses `PostgreSQL `_ and the `disk-objectstore `_ to store data. -The ``disk-objectstore`` is a Python package that is automatically installed as a dependency when installing ``aiida-core``, which was covered in the :ref:`Python package installation section `. -The storage plugin can connect to a PostgreSQL instance running on the localhost or on a server that can be reached over the internet. -Instructions for installing PostgreSQL is beyond the scope of this guide. - .. tip:: The creation of the PostgreSQL user and database as explained below is implemented in an automated way in the ``verdi presto`` command. - Instead of performing the steps below manually and running ``verdi profile setup core.psql_dos`` manually, it is possible to run: + Instead of performing the steps below manually and running ``verdi profile setup core.psql_dos``, it is possible to run: .. code-block:: verdi presto --use-postgres + The ``verdi presto`` command also automatically tries to configure RabbitMQ as the broker if it is running locally. + Therefore, if the command succeeds in connecting to both PostgreSQL and RabbitMQ, ``verdi presto --use-postgres`` provides a fully automated way to create a profile suitable for production workloads. + +This storage plugin uses `PostgreSQL `_ and the `disk-objectstore `_ to store data. +The ``disk-objectstore`` is a Python package that is automatically installed as a dependency when installing ``aiida-core``, which was covered in the :ref:`Python package installation section `. +The storage plugin can connect to a PostgreSQL instance running on the localhost or on a server that can be reached over the internet. +Instructions for installing PostgreSQL is beyond the scope of this guide. + Before creating a profile, a database (and optionally a custom database user) has to be created. First, connect to PostgreSQL using ``psql``, the `native command line client for PostgreSQL `_: diff --git a/docs/source/installation/guide_quick.rst b/docs/source/installation/guide_quick.rst index 7246498a87..1805bde908 100644 --- a/docs/source/installation/guide_quick.rst +++ b/docs/source/installation/guide_quick.rst @@ -4,6 +4,12 @@ Quick installation guide ======================== +.. warning:: + + Not all AiiDA functionality is supported by the quick installation. + Please refer to the :ref:`section below ` for more information and see the :ref:`complete installation guide ` for instructions to set up a feature-complete and performant installation. + + First, install the ``aiida-core`` Python package: .. code-block:: console @@ -35,64 +41,67 @@ If none of the lines show a red cross, indicating a problem, the installation wa If you encountered any issues, please refer to the :ref:`troubleshooting section `. -.. warning:: - - Not all AiiDA functionality is supported by the quick installation. - Please refer to the :ref:`section below ` for more information. - .. _installation:guide-quick:limitations: Quick install limitations ========================= -By default, ``verdi presto`` creates a profile that uses SQLite instead of PostgreSQL and does not use the RabbitMQ message broker. -The table below gives a quick overview of the functionality that is not supported in those cases: +A setup that is ideal for production work requires the PostgreSQL and RabbitMQ services. +By default, ``verdi presto`` creates a profile that allows running AiiDA without these: -+-----------------------------------------+------------------------------------------------------------------------+ -| No RabbitMQ | SQLite instead of PostgreSQL | -+=========================================+========================================================================+ -| Cannot run the daemon | Not suitable for high-throughput workloads | -+-----------------------------------------+------------------------------------------------------------------------+ -| Cannot submit processes to the daemon\* | No support for ``has_key`` and ``contains`` operators in query builder | -+-----------------------------------------+------------------------------------------------------------------------+ -| Cannot play, pause, kill processes | No support for ``QueryBuilder.get_creation_statistics`` | -+-----------------------------------------+------------------------------------------------------------------------+ +* **Database**: The PostgreSQL database that is used to store queryable data, is replaced by SQLite. +* **Broker**: The RabbitMQ message broker that allows communication with and between processes is disabled. -\* Calculations can still be run on remote computers +The following matrix shows the possible combinations of the database and broker options and their use cases: -.. note:: - To enable the RabbitMQ broker for an existing profile, :ref:`install RabbitMQ ` and then run ``verdi profile configure-rabbitmq``. ++----------------------+----------------------------------------------------+-------------------------------------------------------------+ +| | **SQLite database** | **PostgreSQL database** | ++======================+====================================================+=============================================================+ +| **No broker** | Quick start with AiiDA | [*not really relevant for any usecase*] | ++----------------------+----------------------------------------------------+-------------------------------------------------------------+ +| **RabbitMQ** | Production (low-throughput; beta, has limitations) | Production (maximum performance, ideal for high-throughput) | ++----------------------+----------------------------------------------------+-------------------------------------------------------------+ + +The sections below provide details on the use of the PostgreSQL and RabbitMQ services and the limitations when running AiiDA without them. + +.. _installation:guide-quick:limitations:rabbitmq: -Functionality -------------- +RabbitMQ +-------- Part of AiiDA's functionality requires a `message broker `_, with the default implementation using `RabbitMQ `_. -The message broker is used to allow communication with the :ref:`daemon `. -Since RabbitMQ is a separate service and is not always trivial to install, the quick installation guide sets up a profile that does not require it. -As a result, the daemon cannot be started and processes cannot be submitted to it but can only be run in the current Python interpreter. +The message broker is used to allow communication with processes and the :ref:`daemon ` as well as between themselves. +Since RabbitMQ is a separate service and is not always trivial to install, the quick installation guide allows setting up a profile that does not require it. +However, as a result the profile: + +* is not suitable for high-throughput workloads (a polling-based mechanism is used rather than an event-based one) +* cannot run the daemon (no ``verdi daemon start/stop``) and therefore processes cannot be submitted to the daemon (i.e., one can only use ``run()`` instead of ``submit()`` to launch calculations and workflows) +* cannot play, pause, kill processes .. note:: The ``verdi presto`` command automatically checks if RabbitMQ is running on the localhost. - If it can successfully connect, it configures the profile with the message broker and therefore the daemon functionality will be available. + If it can successfully connect, it configures the profile with the message broker and therefore the limitations listed above do not apply. .. tip:: - The connection parameters of RabbitMQ can be (re)configured after the profile is set up with ``verdi profile configure-rabbitmq``. - This can be useful when the RabbitMQ setup is different from the default that AiiDA checks for and the automatic configuration of ``verdi presto`` failed. + A profile created by ``verdi presto`` can easily start using RabbitMQ as the broker at a later stage. + Once a RabbitMQ service is available (see :ref:`install RabbitMQ ` for instruction to install it) and run ``verdi profile configure-rabbitmq`` to configure its use for the profile. + +.. _installation:guide-quick:limitations:postgresql: +PostgreSQL +---------- -Performance ------------ +AiiDA stores (part of) the data of the provenance graph in a database and the `PostgreSQL `_ service provides great performance for use-cases that require high-throughput. +Since PostgreSQL is a separate service and is not always trivial to install, the quick installation guide allows setting up a profile that uses the serverless `SQLite `_ instead. +However, as a result the profile: -The quick installation guide by default creates a profile that uses `SQLite `_ for the database. -Since SQLite does not require running a service, it is easy to install and use on essentially any system. -However, for certain use cases it is not going to be the most performant solution. -AiiDA also supports `PostgreSQL `_ which is often going to be more performant compared to SQLite. +* is not suitable for high-throughput workloads (concurrent writes from multiple processes to the database are serialized) +* does not support the ``has_key`` and ``contains`` operators in the ``QueryBuilder`` +* does not support the ``get_creation_statistics`` method of the ``QueryBuilder`` .. tip:: If a PostgreSQL service is available, run ``verdi presto --use-postgres`` to set up a profile that uses PostgreSQL instead of SQLite. The command tries to connect to the service and automatically create a user account and database to use for the new profile. AiiDA provides defaults that work for most setups where PostgreSQL is installed on the localhost. Should this fail, the connection parameters can be customized using the ``--postgres-hostname``, ``--postgres-port``, ``--postgres-username``, ``--postgres-password`` options. - -Please refer to the :ref:`complete installation guide ` for instructions to set up a feature-complete and performant installation. From 71422eb872040a9ba23047d2ec031f6deaa6a7cc Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Tue, 23 Jul 2024 21:28:47 +0200 Subject: [PATCH 68/82] Add the `SshAutoTransport` transport plugin (#6154) This transport plugin subclasses the `SshTransport` plugin in order to remove all the configuration options. Instead, it parses the user's SSH config file using `paramiko.SSHConfig` when the transport is instantiated to determine the connection parameters automatically. The advantage of this approach is that when configuring a `Computer` using this plugin, the user is not prompted with a bunch of options. Rather, if they can connect to the target machine using `ssh` directly, the transport will also work. What's more, when the user updates their SSH config, the transport automatically uses these changes the next time it is instantiated as opposed to the `SshTransport` plugin which stores the configuration in an `AuthInfo` in the database and is therefore static. The original implementation of this plugin looked into the `fabric` library. This library builds on top of `paramiko` and aims to make configuration SSH connections easier, just as this new plugin was aiming to. However, after a closer look, it seems that fabric was not adding a lot of clever code when it comes to parsing the user's SSH config. It does implement some clever code for dealing with proxy jumps and commands but the `SshTransport` also already implements this. Therefore, it is not really justified to add `fabric` as a dependency but instead we opt to use `paramiko` to parse the config ourselves. --- pyproject.toml | 1 + src/aiida/transports/plugins/ssh_auto.py | 61 ++++++++++++++++++++++++ tests/cmdline/commands/test_computer.py | 14 ++++++ tests/engine/daemon/test_execmanager.py | 3 +- tests/transports/test_all_plugins.py | 12 ++++- 5 files changed, 87 insertions(+), 4 deletions(-) create mode 100644 src/aiida/transports/plugins/ssh_auto.py diff --git a/pyproject.toml b/pyproject.toml index 90e70ffb23..cb7df62131 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -174,6 +174,7 @@ requires-python = '>=3.9' [project.entry-points.'aiida.transports'] 'core.local' = 'aiida.transports.plugins.local:LocalTransport' 'core.ssh' = 'aiida.transports.plugins.ssh:SshTransport' +'core.ssh_auto' = 'aiida.transports.plugins.ssh_auto:SshAutoTransport' [project.entry-points.'aiida.workflows'] 'core.arithmetic.add_multiply' = 'aiida.workflows.arithmetic.add_multiply:add_multiply' diff --git a/src/aiida/transports/plugins/ssh_auto.py b/src/aiida/transports/plugins/ssh_auto.py new file mode 100644 index 0000000000..5d193cac87 --- /dev/null +++ b/src/aiida/transports/plugins/ssh_auto.py @@ -0,0 +1,61 @@ +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Plugin for transport over SSH (and SFTP for file transfer).""" + +import pathlib + +import paramiko + +from .ssh import SshTransport + +__all__ = ('SshAutoTransport',) + + +class SshAutoTransport(SshTransport): + """Support connection, command execution and data transfer to remote computers via SSH+SFTP.""" + + _valid_connect_params = [] + _valid_auth_options = [] + + FILEPATH_CONFIG: pathlib.Path = pathlib.Path('~').expanduser() / '.ssh' / 'config' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs, key_policy='AutoAddPolicy') + + config_ssh = paramiko.SSHConfig() + + try: + with self.FILEPATH_CONFIG.open() as handle: + config_ssh.parse(handle) + except FileNotFoundError as exception: + raise RuntimeError( + f'Could not determine connection configuration as the `{self.FILEPATH_CONFIG}` does not exist.' + ) from exception + except PermissionError as exception: + raise RuntimeError( + f'Could not determine connection configuration as the `{self.FILEPATH_CONFIG}` is not readable.' + ) from exception + + if self._machine not in config_ssh.get_hostnames(): + self.logger.warning( + f'The host `{self._machine}` is not defined in SSH config, connection will most likely fail to open.' + ) + + config_host = config_ssh.lookup(self._machine) + + self._connect_args = { + 'port': config_host.get('port', 22), + 'username': config_host.get('user'), + 'key_filename': config_host.get('identityfile', []), + 'timeout': config_host.get('connecttimeout', 60), + 'proxy_command': config_host.get('proxycommand', None), + 'proxy_jump': config_host.get('proxyjump', None), + } + + self._machine = config_host['hostname'] diff --git a/tests/cmdline/commands/test_computer.py b/tests/cmdline/commands/test_computer.py index dac1170770..6ae94c1634 100644 --- a/tests/cmdline/commands/test_computer.py +++ b/tests/cmdline/commands/test_computer.py @@ -971,3 +971,17 @@ def time_use_login_shell(authinfo, auth_params, use_login_shell, iterations) -> result = run_cli_command(computer_test, [aiida_localhost.label], use_subprocess=False) assert 'Success: all 6 tests succeeded' in result.output assert 'computer is configured to use a login shell, which is slower compared to a normal shell' in result.output + + +def test_computer_ssh_auto(run_cli_command, aiida_computer): + """Test setup of computer with ``core.ssh_auto`` entry point. + + The configure step should only require the common shared options ``safe_interval`` and ``use_login_shell``. + """ + computer = aiida_computer(transport_type='core.ssh_auto').store() + assert not computer.is_configured + + # It is important that no other options (except for `--safe-interval`) have to be specified for this transport type. + options = ['core.ssh_auto', computer.uuid, '--non-interactive', '--safe-interval', '0'] + run_cli_command(computer_configure, options, use_subprocess=False) + assert computer.is_configured diff --git a/tests/engine/daemon/test_execmanager.py b/tests/engine/daemon/test_execmanager.py index bb4209659d..5b34f099a7 100644 --- a/tests/engine/daemon/test_execmanager.py +++ b/tests/engine/daemon/test_execmanager.py @@ -16,7 +16,6 @@ from aiida.common.folders import SandboxFolder from aiida.engine.daemon import execmanager from aiida.orm import CalcJobNode, FolderData, PortableCode, RemoteData, SinglefileData -from aiida.plugins import entry_point from aiida.transports.plugins.local import LocalTransport @@ -40,7 +39,7 @@ def file_hierarchy_simple(): } -@pytest.fixture(params=entry_point.get_entry_point_names('aiida.transports')) +@pytest.fixture(params=('core.local', 'core.ssh')) def node_and_calc_info(aiida_localhost, aiida_computer_ssh, aiida_code_installed, request): """Return a ``CalcJobNode`` and associated ``CalcInfo`` instance.""" diff --git a/tests/transports/test_all_plugins.py b/tests/transports/test_all_plugins.py index 986dd465a9..c536b196a2 100644 --- a/tests/transports/test_all_plugins.py +++ b/tests/transports/test_all_plugins.py @@ -34,14 +34,22 @@ @pytest.fixture(scope='function', params=entry_point.get_entry_point_names('aiida.transports')) -def custom_transport(request) -> Transport: +def custom_transport(request, tmp_path, monkeypatch) -> Transport: """Fixture that parametrizes over all the registered implementations of the ``CommonRelaxWorkChain``.""" + plugin = TransportFactory(request.param) + if request.param == 'core.ssh': kwargs = {'machine': 'localhost', 'timeout': 30, 'load_system_host_keys': True, 'key_policy': 'AutoAddPolicy'} + elif request.param == 'core.ssh_auto': + kwargs = {'machine': 'localhost'} + filepath_config = tmp_path / 'config' + monkeypatch.setattr(plugin, 'FILEPATH_CONFIG', filepath_config) + if not filepath_config.exists(): + filepath_config.write_text('Host localhost') else: kwargs = {} - return TransportFactory(request.param)(**kwargs) + return plugin(**kwargs) def test_is_open(custom_transport): From 5611ddab0a275a08512c5974f9e4750a2eea227d Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Tue, 23 Jul 2024 22:59:04 +0200 Subject: [PATCH 69/82] Engine: Change signature of `set_process_state_change_timestamp` The function took a `Process` instance but then only uses it to fetch its associated node. Since a `Process` instance is way more difficult to mock, it makes testing the function unnecessarily complicated. Since it only needs the process node, the signature is changed to accept the node instead of the process. This utility function is unlikely to be used in client code, justifying this technically backwards incompatible change. --- src/aiida/engine/processes/process.py | 2 +- src/aiida/engine/utils.py | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/aiida/engine/processes/process.py b/src/aiida/engine/processes/process.py index 5eabfd56f7..695f6831d4 100644 --- a/src/aiida/engine/processes/process.py +++ b/src/aiida/engine/processes/process.py @@ -431,7 +431,7 @@ def on_entered(self, from_state: Optional[plumpy.process_states.State]) -> None: self.node.set_process_state(self._state.LABEL) # type: ignore[arg-type] self._save_checkpoint() - set_process_state_change_timestamp(self) + set_process_state_change_timestamp(self.node) super().on_entered(from_state) @override diff --git a/src/aiida/engine/utils.py b/src/aiida/engine/utils.py index 888089dc64..ac56fd98e8 100644 --- a/src/aiida/engine/utils.py +++ b/src/aiida/engine/utils.py @@ -18,6 +18,8 @@ from typing import TYPE_CHECKING, Any, Awaitable, Callable, Iterator, List, Optional, Tuple, Type, Union if TYPE_CHECKING: + from aiida.orm import ProcessNode + from .processes import Process, ProcessBuilder from .runners import Runner @@ -259,7 +261,7 @@ def loop_scope(loop) -> Iterator[None]: asyncio.set_event_loop(current) -def set_process_state_change_timestamp(process: 'Process') -> None: +def set_process_state_change_timestamp(node: 'ProcessNode') -> None: """Set the global setting that reflects the last time a process changed state, for the process type of the given process, to the current timestamp. The process type will be determined based on the class of the calculation node it has as its database container. @@ -270,15 +272,15 @@ def set_process_state_change_timestamp(process: 'Process') -> None: from aiida.manage import get_manager from aiida.orm import CalculationNode, ProcessNode, WorkflowNode - if isinstance(process.node, CalculationNode): + if isinstance(node, CalculationNode): process_type = 'calculation' - elif isinstance(process.node, WorkflowNode): + elif isinstance(node, WorkflowNode): process_type = 'work' - elif isinstance(process.node, ProcessNode): + elif isinstance(node, ProcessNode): # This will only occur for testing, as in general users cannot launch plain Process classes return else: - raise ValueError(f'unsupported calculation node type {type(process.node)}') + raise ValueError(f'unsupported calculation node type {type(node)}') key = PROCESS_STATE_CHANGE_KEY.format(process_type) description = PROCESS_STATE_CHANGE_DESCRIPTION.format(process_type) From 1b8c58be800b4542394f783c2e20aa2ca33d5234 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Tue, 23 Jul 2024 21:40:57 +0200 Subject: [PATCH 70/82] Engine: Ignore failing process state change for `core.sqlite_dos` For each process state change, the engine calls the utility function `aiida.engine.utils.set_process_state_change_timestamp`. This calls `set_global_variable` on the storage plugin to update the `process|state_change|.*` key in the settings table. This value is used in `verdi process list` to show when the last time a process changed its state, which serves as a proxy of daemon activity. When multiple processes would be running, this call would throw an exception for the `core.sqlite_dos` storage plugin. This is because SQLite does not support concurrent writes that touch the same page, which is the case when multiple writes are updating the same row. Since the updating of the timestamp is not crucial for AiiDA functioning properly, especially since it is because another process was trying to perform the same update, it is safe to ignore the failed update and simply log that as a warning. --- src/aiida/engine/utils.py | 14 +++++++++- tests/engine/test_utils.py | 52 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 1 deletion(-) diff --git a/src/aiida/engine/utils.py b/src/aiida/engine/utils.py index ac56fd98e8..4053156a97 100644 --- a/src/aiida/engine/utils.py +++ b/src/aiida/engine/utils.py @@ -268,6 +268,8 @@ def set_process_state_change_timestamp(node: 'ProcessNode') -> None: :param process: the Process instance that changed its state """ + from sqlalchemy.exc import OperationalError + from aiida.common import timezone from aiida.manage import get_manager from aiida.orm import CalculationNode, ProcessNode, WorkflowNode @@ -287,7 +289,17 @@ def set_process_state_change_timestamp(node: 'ProcessNode') -> None: value = timezone.now().isoformat() backend = get_manager().get_profile_storage() - backend.set_global_variable(key, value, description) + + try: + backend.set_global_variable(key, value, description) + except OperationalError: + # This typically happens for SQLite-based storage plugins like ``core.sqlite_dos``. Since this is merely an + # update of a timestamp that is likely to be updated soon again, just ignoring the failed update is not a + # problem. + LOGGER.warning( + f'Failed to write global variable `{key}` to `{value}` because the database was locked. If the storage ' + 'plugin being used is `core.sqlite_dos` this is to be expected and can be safely ignored.' + ) def get_process_state_change_timestamp(process_type: Optional[str] = None) -> Optional[datetime]: diff --git a/tests/engine/test_utils.py b/tests/engine/test_utils.py index 00bf79b7bc..cca5866343 100644 --- a/tests/engine/test_utils.py +++ b/tests/engine/test_utils.py @@ -9,6 +9,7 @@ """Test engine utilities such as the exponential backoff mechanism.""" import asyncio +import contextlib import pytest from aiida import orm @@ -16,9 +17,11 @@ from aiida.engine.utils import ( InterruptableFuture, exponential_backoff_retry, + get_process_state_change_timestamp, instantiate_process, interruptable_task, is_process_function, + set_process_state_change_timestamp, ) ITERATION = 0 @@ -225,3 +228,52 @@ async def coro(): result = await task_fut assert result == 'NOT ME!!!' + + +@pytest.mark.parametrize('with_transaction', (True, False)) +@pytest.mark.parametrize('monkeypatch_process_state_change', (True, False)) +def test_set_process_state_change_timestamp(manager, with_transaction, monkeypatch_process_state_change, monkeypatch): + """Test :func:`aiida.engine.utils.set_process_state_change_timestamp`. + + This function is known to except when the ``core.sqlite_dos`` storage plugin is used and multiple processes are run. + The function is called each time a process changes state and since it is updating the same row in the settings table + the limitation of SQLite to not allow concurrent writes to the same page causes an exception to be thrown because + the database is locked. This exception is caught in ``set_process_state_change_timestamp`` and simply is ignored. + This test makes sure that if this happens, any other state changes, e.g. an extra being set on a node, are not + accidentally reverted, when the changes are performed in an explicit transaction or not. + """ + storage = manager.get_profile_storage() + + node = orm.CalculationNode().store() + extra_key = 'some_key' + extra_value = 'some value' + + # Initialize the process state change timestamp so it is possible to check whether it was changed or not at the + # end of the test. + set_process_state_change_timestamp(node) + current_timestamp = get_process_state_change_timestamp() + assert current_timestamp is not None + + if monkeypatch_process_state_change: + + def set_global_variable(*_, **__): + from sqlalchemy.exc import OperationalError + + raise OperationalError('monkey failure', None, '', '') + + monkeypatch.setattr(storage, 'set_global_variable', set_global_variable) + + transaction_context = storage.transaction if with_transaction else contextlib.nullcontext + + with transaction_context(): + node.base.extras.set(extra_key, extra_value) + set_process_state_change_timestamp(node) + + # The node extra should always have been set, regardless if the process state change excepted + assert node.base.extras.get(extra_key) == extra_value + + # The process state change should have changed if the storage plugin was not monkeypatched to fail + if monkeypatch_process_state_change: + assert get_process_state_change_timestamp() == current_timestamp + else: + assert get_process_state_change_timestamp() != current_timestamp From 7402c17755b332cc9a06e88516621e575f0d1cce Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 25 Jul 2024 08:11:35 +0200 Subject: [PATCH 71/82] CLI: Fix `verdi storage migrate` for profile without broker (#6550) The command needs to make sure the daemon of the profile is not running so it instantiates the `DaemonClient` but this raises for profiles that do not define a broker. Since the daemon cannot be started for brokerless profiles anyway the command does not have to check in this case. --- src/aiida/cmdline/commands/cmd_storage.py | 10 ++++++---- tests/cmdline/commands/test_storage.py | 9 +++++++++ 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/aiida/cmdline/commands/cmd_storage.py b/src/aiida/cmdline/commands/cmd_storage.py index 5382ff455f..c4fd17601a 100644 --- a/src/aiida/cmdline/commands/cmd_storage.py +++ b/src/aiida/cmdline/commands/cmd_storage.py @@ -41,12 +41,14 @@ def storage_migrate(force): from aiida.engine.daemon.client import get_daemon_client from aiida.manage import get_manager - client = get_daemon_client() - if client.is_daemon_running: - echo.echo_critical('Migration aborted, the daemon for the profile is still running.') - manager = get_manager() profile = manager.get_profile() + + if profile.process_control_backend: + client = get_daemon_client() + if client.is_daemon_running: + echo.echo_critical('Migration aborted, the daemon for the profile is still running.') + storage_cls = profile.storage_cls if not force: diff --git a/tests/cmdline/commands/test_storage.py b/tests/cmdline/commands/test_storage.py index 8c374885a2..eb629d013f 100644 --- a/tests/cmdline/commands/test_storage.py +++ b/tests/cmdline/commands/test_storage.py @@ -35,6 +35,15 @@ def tests_storage_info(aiida_localhost, run_cli_command): assert node.node_type in result.output +@pytest.mark.usefixtures('stopped_daemon_client') +def tests_storage_migrate_no_broker(aiida_config_tmp, aiida_profile_factory, run_cli_command): + """Test the ``verdi storage migrate`` command for a profile without a broker.""" + with aiida_profile_factory(aiida_config_tmp) as profile: + assert profile.process_control_backend is None + result = run_cli_command(cmd_storage.storage_migrate, parameters=['--force'], use_subprocess=False) + assert 'Migrating to the head of the main branch' in result.output + + @pytest.mark.usefixtures('stopped_daemon_client') def tests_storage_migrate_force(run_cli_command): """Test the ``verdi storage migrate`` command (with force option).""" From ad1a431f33a6e57d8b6867447ecdfd8ff41bc8f5 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Mon, 5 Aug 2024 11:09:28 +0200 Subject: [PATCH 72/82] CLI: Validate storage in `verdi storage version` (#6551) The `verdi storage version`, in addition to printing the version of the code's and storage's schema, now also validates the storage. If the storage is corrupt or cannot be reached, the command returns the exit code 3. If the storage and code schema versions are incompatible, exit code 4 is returned. This way this command serves as an alternative to running `verdi storage migrate` as a way to check whether a profile needs to be migrated. The `verdi storage migrate` command needs to perform checks such as whether the daemon is running and so is always going to be slower. --- src/aiida/cmdline/commands/cmd_storage.py | 37 +++++++++++++++++++---- tests/cmdline/commands/test_storage.py | 20 ++++++++++++ 2 files changed, 51 insertions(+), 6 deletions(-) diff --git a/src/aiida/cmdline/commands/cmd_storage.py b/src/aiida/cmdline/commands/cmd_storage.py index c4fd17601a..f6f64b755e 100644 --- a/src/aiida/cmdline/commands/cmd_storage.py +++ b/src/aiida/cmdline/commands/cmd_storage.py @@ -8,6 +8,8 @@ ########################################################################### """`verdi storage` commands.""" +import sys + import click from click_spinner import spinner @@ -24,14 +26,37 @@ def verdi_storage(): @verdi_storage.command('version') def storage_version(): - """Print the current version of the storage schema.""" + """Print the current version of the storage schema. + + The command returns the following exit codes: + + * 0: If the storage schema is equal and compatible to the schema version of the code + * 3: If the storage cannot be reached or is corrupt + * 4: If the storage schema is compatible with the code schema version and probably needs to be migrated. + """ from aiida import get_profile + from aiida.common.exceptions import CorruptStorage, IncompatibleStorageSchema, UnreachableStorage - profile = get_profile() - head_version = profile.storage_cls.version_head() - profile_version = profile.storage_cls.version_profile(profile) - echo.echo(f'Latest storage schema version: {head_version!r}') - echo.echo(f'Storage schema version of {profile.name!r}: {profile_version!r}') + try: + profile = get_profile() + head_version = profile.storage_cls.version_head() + profile_version = profile.storage_cls.version_profile(profile) + echo.echo(f'Latest storage schema version: {head_version!r}') + echo.echo(f'Storage schema version of {profile.name!r}: {profile_version!r}') + except Exception as exception: + echo.echo_critical(f'Failed to determine the storage version: {exception}') + + try: + profile.storage_cls(profile) + except (CorruptStorage, UnreachableStorage) as exception: + echo.echo_error(f'The storage cannot be reached or is corrupt: {exception}') + sys.exit(3) + except IncompatibleStorageSchema: + echo.echo_error( + f'The storage schema version {profile_version} is incompatible with the code version {head_version}.' + 'Run `verdi storage migrate` to migrate the storage.' + ) + sys.exit(4) @verdi_storage.command('migrate') diff --git a/tests/cmdline/commands/test_storage.py b/tests/cmdline/commands/test_storage.py index eb629d013f..59698343e6 100644 --- a/tests/cmdline/commands/test_storage.py +++ b/tests/cmdline/commands/test_storage.py @@ -23,6 +23,26 @@ def tests_storage_version(run_cli_command): assert version in result.output +@pytest.mark.parametrize( + 'exception_cls, exit_code', + ( + (exceptions.CorruptStorage, 3), + (exceptions.UnreachableStorage, 3), + (exceptions.IncompatibleStorageSchema, 4), + ), +) +def tests_storage_version_non_zero_exit_code(aiida_profile, run_cli_command, monkeypatch, exception_cls, exit_code): + """Test the ``verdi storage version`` command when it returns a non-zero exit code.""" + + def validate_storage(self): + raise exception_cls() + + with monkeypatch.context() as context: + context.setattr(aiida_profile.storage_cls.migrator, 'validate_storage', validate_storage) + result = run_cli_command(cmd_storage.storage_version, raises=True) + assert result.exit_code == exit_code + + def tests_storage_info(aiida_localhost, run_cli_command): """Test the ``verdi storage info`` command with the ``--detailed`` option.""" from aiida import orm From cbf672f1ddf13b3b56b7f3666a7ed2a014bde0cc Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Tue, 6 Aug 2024 10:41:48 +0200 Subject: [PATCH 73/82] Engine: Ensure node is sealed when process excepts (#6549) Processes that hit a certain exception were not being sealed. This would cause problems when trying to export them, which only allows sealed nodes. The problem occurs when another exception occurs while handling the original exception. An example is when `Process.update_outputs` would raise a `ValueError` because an unstored node had be attached as an output. Since this method is called in `on_entered`, which is called when the process entered a new state, it would be called again when it entered the excepted state. Since the process was already excepted, the rest of the state changes is cut short by `plumpy`. This would cause the process to never go to the final `TERMINATED` state and so the `on_terminated` method would not be called, which is where the process' node is sealed. The solution is to check the current state in `on_entered` and if it is `EXCEPTED` to simply return and no longer perform any updates on the node. This should prevent any other exceptions from being hit and ensure the process transitions properly to the final terminated state. The only update that is still performed is to update the process state on the process' node, otherwise it would not properly be shown as excepted. --- src/aiida/engine/processes/process.py | 12 +++++++++++- tests/engine/test_work_chain.py | 1 + 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/src/aiida/engine/processes/process.py b/src/aiida/engine/processes/process.py index 695f6831d4..c5fc50bcc3 100644 --- a/src/aiida/engine/processes/process.py +++ b/src/aiida/engine/processes/process.py @@ -415,8 +415,19 @@ def on_create(self) -> None: @override def on_entered(self, from_state: Optional[plumpy.process_states.State]) -> None: """After entering a new state, save a checkpoint and update the latest process state change timestamp.""" + from plumpy import ProcessState + from aiida.engine.utils import set_process_state_change_timestamp + super().on_entered(from_state) + + if self._state.LABEL is ProcessState.EXCEPTED: + # The process is already excepted so simply update the process state on the node and let the process + # complete the state transition to the terminal state. If another exception is raised during this exception + # handling, the process transitioning is cut short and never makes it to the terminal state. + self.node.set_process_state(self._state.LABEL) + return + # For reasons unknown, it is important to update the outputs first, before doing anything else, otherwise there # is the risk that certain outputs do not get attached before the process reaches a terminal state. Nevertheless # we need to guarantee that the process state gets updated even if the ``update_outputs`` call excepts, for @@ -432,7 +443,6 @@ def on_entered(self, from_state: Optional[plumpy.process_states.State]) -> None: self._save_checkpoint() set_process_state_change_timestamp(self.node) - super().on_entered(from_state) @override def on_terminated(self) -> None: diff --git a/tests/engine/test_work_chain.py b/tests/engine/test_work_chain.py index e7288a3806..1fbb578b2a 100644 --- a/tests/engine/test_work_chain.py +++ b/tests/engine/test_work_chain.py @@ -422,6 +422,7 @@ def illegal(self): orm.QueryBuilder().append(orm.ProcessNode, tag='node').order_by({'node': {'id': 'desc'}}).first(flat=True) ) assert node.is_excepted + assert node.is_sealed assert 'ValueError: Workflow tried returning an unstored `Data` node.' in node.exception def test_same_input_node(self): From faab95add6c895951d2912966e1589d9ac5c6f01 Mon Sep 17 00:00:00 2001 From: Ali Khosravi Date: Tue, 6 Aug 2024 16:54:14 +0200 Subject: [PATCH 74/82] Docs: Correct signature of `get_daemon_client` example snippet (#6554) --- docs/source/topics/daemon.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/topics/daemon.rst b/docs/source/topics/daemon.rst index 87cf027f31..9958b331bd 100644 --- a/docs/source/topics/daemon.rst +++ b/docs/source/topics/daemon.rst @@ -41,7 +41,7 @@ It is also possible to explicitly specify a profile: .. code-block:: python - client = get_daemon_client(profile='some-profile') + client = get_daemon_client(profile_name='some-profile') The daemon can be started and stopped through the client: From fb3686271fcdeb5506838a5a3069955546b05460 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Wed, 7 Aug 2024 10:25:00 +0200 Subject: [PATCH 75/82] Post release: update version number and CHANGELOG after v2.6.2 release --- CHANGELOG.md | 29 +++++++++++++++++++++++++++++ src/aiida/__init__.py | 2 +- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fbc5ddf965..ea3c843d8a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,34 @@ # Changelog +## v2.6.2 - 2024-08-07 + +### Fixes +- `LocalTransport`: Fix typo for `ignore_nonexisting` in `put` (#6471) [[ecda558d0]](https://github.com/aiidateam/aiida-core/commit/ecda558d08c5608880308f69a21c05fe918be89f) +- CLI: `verdi computer test` report correct failed tests (#6536) [[9c3f2bb58]](https://github.com/aiidateam/aiida-core/commit/9c3f2bb589f1a6cc920ed2fbf0627924d8fce954) +- CLI: Fix `verdi storage migrate` for profile without broker (#6550) [[389fc487d]](https://github.com/aiidateam/aiida-core/commit/389fc487d092c2e34713d228e38c8164608bab2d) +- CLI: Fix bug `verdi presto` when tab-completing without config (#6535) [[efcf75e40]](https://github.com/aiidateam/aiida-core/commit/efcf75e405dcef8ca8c51b19d6262ca81f4413c3) +- Engine: Change signature of `set_process_state_change_timestamp` [[923fd9f6e]](https://github.com/aiidateam/aiida-core/commit/923fd9f6ec3cb39b8985ac149985046e720438f8) +- Engine: Ensure node is sealed when process excepts (#6549) [[e3ed9a2f3]](https://github.com/aiidateam/aiida-core/commit/e3ed9a2f3bf84e72cdc4c90e21494dc2b9c1cd56) +- Engine: Fix bug in upload calculation for `PortableCode` with SSH (#6519) [[740ae2040]](https://github.com/aiidateam/aiida-core/commit/740ae20408e4e3047c26c91221de5b96b8d7afbe) +- Engine: Ignore failing process state change for `core.sqlite_dos` [[fb4f9815f]](https://github.com/aiidateam/aiida-core/commit/fb4f9815fbd3bfe053cc0d1e3abb5b86bbc9dffd) + +### Dependencies +- Pin requirement to minor version `sphinx~=7.2.0` (#6527) [[25cb73188]](https://github.com/aiidateam/aiida-core/commit/25cb731880d45045773f7674fee9367d792aeda9) + +### Documentation +- Add `PluggableSchemaValidator` to nitpick exceptions (#6515) [[0ce3c0025]](https://github.com/aiidateam/aiida-core/commit/0ce3c0025384e95006d43e98da2a96b2cfadde70) +- Add `robots.txt` to only allow indexing of `latest` and `stable` (#6517) [[a492e3492]](https://github.com/aiidateam/aiida-core/commit/a492e349288ee90896193575edf319c2b7ea4c85) +- Add succint overview of limitations of no-services profile [[d7ca5657b]](https://github.com/aiidateam/aiida-core/commit/d7ca5657b0dec83cb8a440d0b8e11c9c84e38149) +- Correct signature of `get_daemon_client` example snippet (#6554) [[92d391658]](https://github.com/aiidateam/aiida-core/commit/92d391658e5e92113ad9a32d61444e23342ee0ae) +- Fix typo in pytest plugins codeblock (#6513) [[eb23688fe]](https://github.com/aiidateam/aiida-core/commit/eb23688febacb5b828d0f45ebdeedee65d0c00fe) +- Move limitations warning to top of quick install [[493e529a7]](https://github.com/aiidateam/aiida-core/commit/493e529a7aec052f81d1bbfed33e5b44db9434e2) +- Remove note in installation guide regarding Python requirement [[9aa2044e4]](https://github.com/aiidateam/aiida-core/commit/9aa2044e4ce665d64bb3a1c26f166512287aa28b) +- Update `redirects.txt` for installation pages (#6509) [[508a9fb2a]](https://github.com/aiidateam/aiida-core/commit/508a9fb2a7f6eb6f31b40b3f28d13c96f7875503) + +### Devops +- Fix pymatgen import causing mypy to fail (#6540) [[813374fe1]](https://github.com/aiidateam/aiida-core/commit/813374fe1ee003c8c05f9aa191147ec928dfa918) + + ## v2.6.1 - 2024-07-01 ### Fixes: diff --git a/src/aiida/__init__.py b/src/aiida/__init__.py index 8e86350e40..743c5009ab 100644 --- a/src/aiida/__init__.py +++ b/src/aiida/__init__.py @@ -27,7 +27,7 @@ 'For further information please visit http://www.aiida.net/. All rights reserved.' ) __license__ = 'MIT license, see LICENSE.txt file.' -__version__ = '2.6.1.post0' +__version__ = '2.6.2.post0' __authors__ = 'The AiiDA team.' __paper__ = ( 'S. P. Huber et al., "AiiDA 1.0, a scalable computational infrastructure for automated reproducible workflows and ' From c3b10b759a9cd062800ef120591d5c7fd0ae4ee7 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Mon, 19 Aug 2024 12:32:02 +0200 Subject: [PATCH 76/82] CLI: Fix exception for `verdi plugin list` (#6560) In e952d7717c1d8001555e8d19f54f4fa349da6c6e a bug in `verdi plugin list` was fixed where the conditional to check whether the plugin was a process class would always raise an `AttributeError` if the plugin was not a `Process` or a proces function. As a result, the code would never get to the else-clause. The else-clause contained itself another bug, which was now revealed by the fixing of the bug in the conditional. The else-clause would call the `get_description` classmethod of the plugin, but no classes in AiiDA that are pluginnable even define such a class method. Probably, the original author confused it with the instance method `get_description` but the `verdi plugin list` command just deals with the class. The `get_description` call is replaced with just getting `__doc__` which returns the docstring of the class/function, or `None` if it is not defined. In the latter case, a default message is displayed saying that no description is available. Since the else-clause code was never reached before the recent fix and the `get_description` method was never supported officially by AiiDA's pluginnable interfaces, it is fine to just change this behavior. --- src/aiida/cmdline/commands/cmd_plugin.py | 15 +++++++-------- tests/cmdline/commands/test_plugin.py | 23 ++++++++++------------- 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/src/aiida/cmdline/commands/cmd_plugin.py b/src/aiida/cmdline/commands/cmd_plugin.py index 9f43dd8b56..6cbbd958ce 100644 --- a/src/aiida/cmdline/commands/cmd_plugin.py +++ b/src/aiida/cmdline/commands/cmd_plugin.py @@ -47,14 +47,13 @@ def plugin_list(entry_point_group, entry_point): except EntryPointError as exception: echo.echo_critical(str(exception)) else: - try: - if (inspect.isclass(plugin) and issubclass(plugin, Process)) or ( - hasattr(plugin, 'is_process_function') and plugin.is_process_function - ): - print_process_info(plugin) - else: - echo.echo(str(plugin.get_description())) - except AttributeError: + if (inspect.isclass(plugin) and issubclass(plugin, Process)) or ( + hasattr(plugin, 'is_process_function') and plugin.is_process_function + ): + print_process_info(plugin) + elif plugin.__doc__: + echo.echo(plugin.__doc__) + else: echo.echo_error(f'No description available for {entry_point}') else: entry_points = get_entry_point_names(entry_point_group) diff --git a/tests/cmdline/commands/test_plugin.py b/tests/cmdline/commands/test_plugin.py index ec545ddea0..9960db7acd 100644 --- a/tests/cmdline/commands/test_plugin.py +++ b/tests/cmdline/commands/test_plugin.py @@ -11,7 +11,7 @@ import pytest from aiida.cmdline.commands import cmd_plugin from aiida.parsers import Parser -from aiida.plugins import CalculationFactory, ParserFactory, WorkflowFactory +from aiida.plugins import BaseFactory from aiida.plugins.entry_point import ENTRY_POINT_GROUP_TO_MODULE_PATH_MAP @@ -43,6 +43,7 @@ def test_plugin_list_non_existing(run_cli_command): 'entry_point_string', ( 'aiida.calculations:core.arithmetic.add', + 'aiida.data:core.array', 'aiida.workflows:core.arithmetic.multiply_add', 'aiida.workflows:core.arithmetic.add_multiply', ), @@ -52,24 +53,20 @@ def test_plugin_list_detail(run_cli_command, entry_point_string): from aiida.plugins.entry_point import parse_entry_point_string entry_point_group, entry_point_name = parse_entry_point_string(entry_point_string) - factory = CalculationFactory if entry_point_group == 'aiida.calculations' else WorkflowFactory - entry_point = factory(entry_point_name) + entry_point = BaseFactory(entry_point_group, entry_point_name) result = run_cli_command(cmd_plugin.plugin_list, [entry_point_group, entry_point_name]) assert entry_point.__doc__ in result.output -class CustomParser(Parser): - @classmethod - def get_description(cls) -> str: - return 'str69' +class NoDocStringPluginParser(Parser): + pass -def test_plugin_description(run_cli_command, entry_points): - """Test that ``verdi plugin list`` uses ``get_description`` if defined.""" - - entry_points.add(CustomParser, 'aiida.parsers:custom.parser') - assert ParserFactory('custom.parser') is CustomParser +def test_plugin_list_no_docstring(run_cli_command, entry_points): + """Test ``verdi plugin list`` does not fail if the plugin does not define a docstring.""" + entry_points.add(NoDocStringPluginParser, 'aiida.parsers:custom.parser') + assert BaseFactory('aiida.parsers', 'custom.parser') is NoDocStringPluginParser result = run_cli_command(cmd_plugin.plugin_list, ['aiida.parsers', 'custom.parser']) - assert result.output.strip() == 'str69' + assert result.output.strip() == 'Error: No description available for custom.parser' From c52ec6758a0d5c5191e4099cabbbd1a7314284ed Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Tue, 20 Aug 2024 17:19:19 +0200 Subject: [PATCH 77/82] Dependencies: Update requirement `paramiko~=3.0` (#6559) --- environment.yml | 2 +- pyproject.toml | 2 +- requirements/requirements-py-3.10.txt | 2 +- requirements/requirements-py-3.11.txt | 2 +- requirements/requirements-py-3.12.txt | 2 +- requirements/requirements-py-3.9.txt | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/environment.yml b/environment.yml index cb86eef22f..be05f5eb17 100644 --- a/environment.yml +++ b/environment.yml @@ -21,7 +21,7 @@ dependencies: - kiwipy[rmq]~=0.8.4 - importlib-metadata~=6.0 - numpy~=1.21 -- paramiko>=2.7.2,~=2.7 +- paramiko~=3.0 - plumpy~=0.22.3 - pgsu~=0.3.0 - psutil~=5.6 diff --git a/pyproject.toml b/pyproject.toml index cb7df62131..129746e0fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,7 @@ dependencies = [ 'kiwipy[rmq]~=0.8.4', 'importlib-metadata~=6.0', 'numpy~=1.21', - 'paramiko~=2.7,>=2.7.2', + 'paramiko~=3.0', 'plumpy~=0.22.3', 'pgsu~=0.3.0', 'psutil~=5.6', diff --git a/requirements/requirements-py-3.10.txt b/requirements/requirements-py-3.10.txt index 2a52146929..91b0f013cb 100644 --- a/requirements/requirements-py-3.10.txt +++ b/requirements/requirements-py-3.10.txt @@ -109,7 +109,7 @@ palettable==3.3.3 pamqp==3.3.0 pandas==2.0.2 pandocfilters==1.5.0 -paramiko==2.12.0 +paramiko==3.4.1 parso==0.8.3 pexpect==4.8.0 pg8000==1.29.8 diff --git a/requirements/requirements-py-3.11.txt b/requirements/requirements-py-3.11.txt index 67ffe6add5..422f729d1b 100644 --- a/requirements/requirements-py-3.11.txt +++ b/requirements/requirements-py-3.11.txt @@ -108,7 +108,7 @@ palettable==3.3.3 pamqp==3.3.0 pandas==2.0.2 pandocfilters==1.5.0 -paramiko==2.12.0 +paramiko==3.4.1 parso==0.8.3 pexpect==4.8.0 pg8000==1.29.8 diff --git a/requirements/requirements-py-3.12.txt b/requirements/requirements-py-3.12.txt index 4a6d8ec05f..a16c0c82f5 100644 --- a/requirements/requirements-py-3.12.txt +++ b/requirements/requirements-py-3.12.txt @@ -108,7 +108,7 @@ palettable==3.3.3 pamqp==3.3.0 pandas==2.1.1 pandocfilters==1.5.0 -paramiko==2.12.0 +paramiko==3.4.1 parso==0.8.3 pexpect==4.8.0 pg8000==1.30.2 diff --git a/requirements/requirements-py-3.9.txt b/requirements/requirements-py-3.9.txt index 6707b64057..08e6bcf5f8 100644 --- a/requirements/requirements-py-3.9.txt +++ b/requirements/requirements-py-3.9.txt @@ -111,7 +111,7 @@ palettable==3.3.3 pamqp==3.3.0 pandas==2.0.2 pandocfilters==1.5.0 -paramiko==2.12.0 +paramiko==3.4.1 parso==0.8.3 pexpect==4.8.0 pg8000==1.29.8 From 655da5acc183ef81120f5d77f1fdc760e186c64c Mon Sep 17 00:00:00 2001 From: Ali Khosravi Date: Fri, 20 Sep 2024 15:49:38 +0200 Subject: [PATCH 78/82] CI: Update ignore comment as the way that presumably updated mypy expects (#6566) Apparently, somehow `mypy` is updated and it is complaining about an `ignore[assignment]` comment that previously was imposed by `mypy` itself. (in src/aiida/orm/utils/serialize.py::L51) This commit removed `ignore[assignment]`, and `ci-style / pre-commit (pull_request)` is passing now. --- src/aiida/orm/utils/serialize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiida/orm/utils/serialize.py b/src/aiida/orm/utils/serialize.py index 320fae2935..2d5923b1bd 100644 --- a/src/aiida/orm/utils/serialize.py +++ b/src/aiida/orm/utils/serialize.py @@ -48,7 +48,7 @@ def represent_enum(dumper: yaml.Dumper, enum: Enum) -> yaml.ScalarNode: def enum_constructor(loader: yaml.Loader, serialized: yaml.Node) -> Enum: """Construct an enum from the serialized representation.""" - deserialized: str = loader.construct_scalar(serialized) # type: ignore[arg-type,assignment] + deserialized: str = loader.construct_scalar(serialized) # type: ignore[arg-type] identifier, value = deserialized.split('|') cls = get_object_loader().load_object(identifier) enum = cls(value) From 72a6b183b8048d5c31b2b827efe6b8b969038e28 Mon Sep 17 00:00:00 2001 From: Ali Khosravi Date: Tue, 24 Sep 2024 12:20:54 +0200 Subject: [PATCH 79/82] CLI: Add filters to verdi group delete. (#6556) This commit copies the behavior of `verdi group list`, simply by setting a filter, one can get rid of all matching groups at once. --- docs/source/reference/command_line.rst | 2 +- src/aiida/cmdline/commands/cmd_group.py | 183 +++++++++++++++++++---- tests/cmdline/commands/test_group.py | 191 ++++++++++++++++++++++-- 3 files changed, 335 insertions(+), 41 deletions(-) diff --git a/docs/source/reference/command_line.rst b/docs/source/reference/command_line.rst index 3982f75c53..c42c331ac9 100644 --- a/docs/source/reference/command_line.rst +++ b/docs/source/reference/command_line.rst @@ -220,7 +220,7 @@ Below is a list with all available subcommands. add-nodes Add nodes to a group. copy Duplicate a group. create Create an empty group with a given label. - delete Delete a group and (optionally) the nodes it contains. + delete Delete groups and (optionally) the nodes they contain. description Change the description of a group. list Show a list of existing groups. move-nodes Move the specified NODES from one group to another. diff --git a/src/aiida/cmdline/commands/cmd_group.py b/src/aiida/cmdline/commands/cmd_group.py index 092c21b8f0..4f340b386a 100644 --- a/src/aiida/cmdline/commands/cmd_group.py +++ b/src/aiida/cmdline/commands/cmd_group.py @@ -145,7 +145,33 @@ def group_move_nodes(source_group, target_group, force, nodes, all_entries): @verdi_group.command('delete') -@arguments.GROUP() +@arguments.GROUPS() +@options.ALL_USERS(help='Filter and delete groups for all users, rather than only for the current user.') +@options.USER(help='Add a filter to delete groups belonging to a specific user.') +@options.TYPE_STRING(help='Filter to only include groups of this type string.') +@options.PAST_DAYS(help='Add a filter to delete only groups created in the past N days.', default=None) +@click.option( + '-s', + '--startswith', + type=click.STRING, + default=None, + help='Add a filter to delete only groups for which the label begins with STRING.', +) +@click.option( + '-e', + '--endswith', + type=click.STRING, + default=None, + help='Add a filter to delete only groups for which the label ends with STRING.', +) +@click.option( + '-c', + '--contains', + type=click.STRING, + default=None, + help='Add a filter to delete only groups for which the label contains STRING.', +) +@options.NODE(help='Delete only the groups that contain a node.') @options.FORCE() @click.option( '--delete-nodes', is_flag=True, default=False, help='Delete all nodes in the group along with the group itself.' @@ -153,33 +179,138 @@ def group_move_nodes(source_group, target_group, force, nodes, all_entries): @options.graph_traversal_rules(GraphTraversalRules.DELETE.value) @options.DRY_RUN() @with_dbenv() -def group_delete(group, delete_nodes, dry_run, force, **traversal_rules): - """Delete a group and (optionally) the nodes it contains.""" +def group_delete( + groups, + delete_nodes, + dry_run, + force, + all_users, + user, + type_string, + past_days, + startswith, + endswith, + contains, + node, + **traversal_rules, +): + """Delete groups and (optionally) the nodes they contain.""" + from tabulate import tabulate + from aiida import orm from aiida.tools import delete_group_nodes - if not (force or dry_run): - click.confirm(f'Are you sure you want to delete {group}?', abort=True) - elif dry_run: - echo.echo_report(f'Would have deleted {group}.') + filters_provided = any( + [all_users or user or past_days or startswith or endswith or contains or node or type_string] + ) + + if groups and filters_provided: + echo.echo_critical('Cannot specify both GROUPS and any of the other filters.') + + if not groups and filters_provided: + import datetime + + from aiida.common import timezone + from aiida.common.escaping import escape_for_sql_like + + builder = orm.QueryBuilder() + filters = {} - if delete_nodes: + # Note: we could have set 'core' as a default value for type_string, + # but for the sake of uniform interface, we decided to keep the default value of None. + # Otherwise `verdi group delete 123 -T core` would have worked, but we say + # 'Cannot specify both GROUPS and any of the other filters'. + if type_string is None: + type_string = 'core' - def _dry_run_callback(pks): - if not pks or force: - return False - echo.echo_warning(f'YOU ARE ABOUT TO DELETE {len(pks)} NODES! THIS CANNOT BE UNDONE!') - return not click.confirm('Do you want to continue?', abort=True) + if '%' in type_string or '_' in type_string: + filters['type_string'] = {'like': type_string} + else: + filters['type_string'] = type_string + + # Creation time + if past_days: + filters['time'] = {'>': timezone.now() - datetime.timedelta(days=past_days)} + + # Query for specific group labels + filters['or'] = [] + if startswith: + filters['or'].append({'label': {'like': f'{escape_for_sql_like(startswith)}%'}}) + if endswith: + filters['or'].append({'label': {'like': f'%{escape_for_sql_like(endswith)}'}}) + if contains: + filters['or'].append({'label': {'like': f'%{escape_for_sql_like(contains)}%'}}) + + builder.append(orm.Group, filters=filters, tag='group', project='*') + + # Query groups that belong to specific user + if user: + user_email = user.email + else: + # By default: only groups of this user + user_email = orm.User.collection.get_default().email - _, nodes_deleted = delete_group_nodes([group.pk], dry_run=dry_run or _dry_run_callback, **traversal_rules) - if not nodes_deleted: - # don't delete the group if the nodes were not deleted + # Query groups that belong to all users + if not all_users: + builder.append(orm.User, filters={'email': user_email}, with_group='group') + + # Query groups that contain a particular node + if node: + builder.append(orm.Node, filters={'id': node.pk}, with_group='group') + + groups = builder.all(flat=True) + if not groups: + echo.echo_report('No groups found matching the specified criteria.') return - if not dry_run: + elif not groups and not filters_provided: + echo.echo_report('Nothing happened. Please specify at least one group or provide filters to query groups.') + return + + projection_lambdas = { + 'pk': lambda group: str(group.pk), + 'label': lambda group: group.label, + 'type_string': lambda group: group.type_string, + 'count': lambda group: group.count(), + 'user': lambda group: group.user.email.strip(), + 'description': lambda group: group.description, + } + + table = [] + projection_header = ['PK', 'Label', 'Type string', 'User'] + projection_fields = ['pk', 'label', 'type_string', 'user'] + for group in groups: + table.append([projection_lambdas[field](group) for field in projection_fields]) + + if not (force or dry_run): + echo.echo_report('The following groups will be deleted:') + echo.echo(tabulate(table, headers=projection_header)) + click.confirm('Are you sure you want to continue?', abort=True) + elif dry_run: + echo.echo_report('Would have deleted:') + echo.echo(tabulate(table, headers=projection_header)) + + for group in groups: group_str = str(group) - orm.Group.collection.delete(group.pk) - echo.echo_success(f'{group_str} deleted.') + + if delete_nodes: + + def _dry_run_callback(pks): + if not pks or force: + return False + echo.echo_warning( + f'YOU ARE ABOUT TO DELETE {len(pks)} NODES ASSOCIATED WITH {group_str}! THIS CANNOT BE UNDONE!' + ) + return not click.confirm('Do you want to continue?', abort=True) + + _, nodes_deleted = delete_group_nodes([group.pk], dry_run=dry_run or _dry_run_callback, **traversal_rules) + if not nodes_deleted: + # don't delete the group if the nodes were not deleted + return + + if not dry_run: + orm.Group.collection.delete(group.pk) + echo.echo_success(f'{group_str} deleted.') @verdi_group.command('relabel') @@ -273,7 +404,7 @@ def group_show(group, raw, limit, uuid): @options.ALL_USERS(help='Show groups for all users, rather than only for the current user.') @options.USER(help='Add a filter to show only groups belonging to a specific user.') @options.ALL(help='Show groups of all types.') -@options.TYPE_STRING() +@options.TYPE_STRING(default='core', help='Filter to only include groups of this type string.') @click.option( '-d', '--with-description', 'with_description', is_flag=True, default=False, help='Show also the group description.' ) @@ -302,7 +433,7 @@ def group_show(group, raw, limit, uuid): ) @options.ORDER_BY(type=click.Choice(['id', 'label', 'ctime']), default='label') @options.ORDER_DIRECTION() -@options.NODE(help='Show only the groups that contain the node.') +@options.NODE(help='Show only the groups that contain this node.') @with_dbenv() def group_list( all_users, @@ -331,12 +462,6 @@ def group_list( builder = orm.QueryBuilder() filters = {} - # Have to specify the default for `type_string` here instead of directly in the option otherwise it will always - # raise above if the user specifies just the `--group-type` option. Once that option is removed, the default can - # be moved to the option itself. - if type_string is None: - type_string = 'core' - if not all_entries: if '%' in type_string or '_' in type_string: filters['type_string'] = {'like': type_string} @@ -367,11 +492,11 @@ def group_list( # Query groups that belong to all users if not all_users: - builder.append(orm.User, filters={'email': {'==': user_email}}, with_group='group') + builder.append(orm.User, filters={'email': user_email}, with_group='group') # Query groups that contain a particular node if node: - builder.append(orm.Node, filters={'id': {'==': node.pk}}, with_group='group') + builder.append(orm.Node, filters={'id': node.pk}, with_group='group') builder.order_by({orm.Group: {order_by: order_dir}}) diff --git a/tests/cmdline/commands/test_group.py b/tests/cmdline/commands/test_group.py index b88bf6db91..fa319276f1 100644 --- a/tests/cmdline/commands/test_group.py +++ b/tests/cmdline/commands/test_group.py @@ -109,49 +109,218 @@ def test_delete(self, run_cli_command): orm.Group(label='group_test_delete_01').store() orm.Group(label='group_test_delete_02').store() orm.Group(label='group_test_delete_03').store() + do_not_delete_user = orm.User(email='user0@example.com') + do_not_delete_group = orm.Group(label='do_not_delete_group', user=do_not_delete_user).store() + do_not_delete_node = orm.CalculationNode().store() + do_not_delete_group.add_nodes(do_not_delete_node) + do_not_delete_user.store() - # dry run - result = run_cli_command(cmd_group.group_delete, ['--dry-run', 'group_test_delete_01'], use_subprocess=True) + # 0) do nothing if no groups or no filters are passed + result = run_cli_command(cmd_group.group_delete, ['--force']) + assert 'Nothing happened' in result.output + + # 1) dry run + result = run_cli_command( + cmd_group.group_delete, + ['--dry-run', 'group_test_delete_01'], + ) orm.load_group(label='group_test_delete_01') - result = run_cli_command(cmd_group.group_delete, ['--force', 'group_test_delete_01'], use_subprocess=True) + # 2) Delete group, basic test + result = run_cli_command( + cmd_group.group_delete, + ['--force', 'group_test_delete_01'], + ) + assert 'do_not_delete_group' not in result.output - # Verify that removed group is not present in list - result = run_cli_command(cmd_group.group_list, use_subprocess=True) + result = run_cli_command( + cmd_group.group_list, + ) assert 'group_test_delete_01' not in result.output + # 3) Add some nodes and then use `verdi group delete` to delete a group that contains nodes node_01 = orm.CalculationNode().store() node_02 = orm.CalculationNode().store() node_pks = {node_01.pk, node_02.pk} - # Add some nodes and then use `verdi group delete` to delete a group that contains nodes group = orm.load_group(label='group_test_delete_02') group.add_nodes([node_01, node_02]) assert group.count() == 2 - result = run_cli_command(cmd_group.group_delete, ['--force', 'group_test_delete_02'], use_subprocess=True) + result = run_cli_command( + cmd_group.group_delete, + ['--force', 'group_test_delete_02'], + ) with pytest.raises(exceptions.NotExistent): orm.load_group(label='group_test_delete_02') - # check nodes still exist for pk in node_pks: orm.load_node(pk) - # delete the group and the nodes it contains + # 4) Delete the group and the nodes it contains group = orm.load_group(label='group_test_delete_03') group.add_nodes([node_01, node_02]) result = run_cli_command( - cmd_group.group_delete, ['--force', '--delete-nodes', 'group_test_delete_03'], use_subprocess=True + cmd_group.group_delete, + ['--force', '--delete-nodes', 'group_test_delete_03'], ) - # check group and nodes no longer exist with pytest.raises(exceptions.NotExistent): orm.load_group(label='group_test_delete_03') for pk in node_pks: with pytest.raises(exceptions.NotExistent): orm.load_node(pk) + # 5) Should delete an empty group even if --delete-nodes option is passed + group = orm.Group(label='group_test_delete_04').store() + result = run_cli_command(cmd_group.group_delete, ['--force', '--delete-nodes', 'group_test_delete_04']) + with pytest.raises(exceptions.NotExistent): + orm.load_group(label='group_test_delete_04') + + # 6) Should raise if a group does not exist + result = run_cli_command(cmd_group.group_delete, ['--force', 'non_existent_group'], raises=True) + assert b'no Group found with LABEL' in result.stderr_bytes + + # 7) Should delete multiple groups + orm.Group(label='group_test_delete_05').store() + orm.Group(label='group_test_delete_06').store() + result = run_cli_command( + cmd_group.group_delete, + ['--force', 'group_test_delete_05', 'group_test_delete_06'], + ) + with pytest.raises(exceptions.NotExistent): + orm.load_group(label='group_test_delete_05') + with pytest.raises(exceptions.NotExistent): + orm.load_group(label='group_test_delete_06') + assert 'do_not_delete_group' not in result.output + + # 8) Should raise if both groups and query options are passed + result = run_cli_command( + cmd_group.group_delete, + ['--force', 'do_not_delete_group', '--all-users'], + raises=True, + ) + assert b'Cannot specify both GROUPS and any of the other filters' in result.stderr_bytes + result = run_cli_command( + cmd_group.group_delete, + ['--force', 'do_not_delete_group', '--user', do_not_delete_user.email], + raises=True, + ) + assert b'Cannot specify both GROUPS and any of the other filters' in result.stderr_bytes + result = run_cli_command( + cmd_group.group_delete, + ['--force', 'do_not_delete_group', '--type-string', 'non_existent'], + raises=True, + ) + assert b'Cannot specify both GROUPS and any of the other filters' in result.stderr_bytes + result = run_cli_command( + cmd_group.group_delete, + ['--force', 'do_not_delete_group', '--past-days', '1'], + raises=True, + ) + assert b'Cannot specify both GROUPS and any of the other filters' in result.stderr_bytes + result = run_cli_command( + cmd_group.group_delete, + ['--force', 'do_not_delete_group', '--startswith', 'non_existent'], + raises=True, + ) + assert b'Cannot specify both GROUPS and any of the other filters' in result.stderr_bytes + result = run_cli_command( + cmd_group.group_delete, + ['--force', 'do_not_delete_group', '--endswith', 'non_existent'], + raises=True, + ) + assert b'Cannot specify both GROUPS and any of the other filters' in result.stderr_bytes + result = run_cli_command( + cmd_group.group_delete, + ['--force', 'do_not_delete_group', '--contains', 'non_existent'], + raises=True, + ) + assert b'Cannot specify both GROUPS and any of the other filters' in result.stderr_bytes + result = run_cli_command( + cmd_group.group_delete, + ['--force', 'do_not_delete_group', '--node', do_not_delete_node.pk], + raises=True, + ) + assert b'Cannot specify both GROUPS and any of the other filters' in result.stderr_bytes + + # 9) --user should delete groups for a specific user + # --all-users should delete groups for all users + user1 = orm.User(email='user1@example.com') + user2 = orm.User(email='user2@example.com') + user3 = orm.User(email='user3@example.com') + user1.store() + user2.store() + user3.store() + + orm.Group(label='group_test_delete_08', user=user1).store() + orm.Group(label='group_test_delete_09', user=user2).store() + orm.Group(label='group_test_delete_10', user=user3).store() + + result = run_cli_command( + cmd_group.group_delete, + ['--force', '--user', user1.email], + ) + with pytest.raises(exceptions.NotExistent): + orm.load_group(label='group_test_delete_08') + assert 'group_test_delete_09' not in result.output + assert 'group_test_delete_10' not in result.output + + result = run_cli_command( + cmd_group.group_delete, + ['--force', '--all-users'], + ) + with pytest.raises(exceptions.NotExistent): + orm.load_group(label='group_test_delete_09') + with pytest.raises(exceptions.NotExistent): + orm.load_group(label='group_test_delete_10') + + # 10) --startswith, --endswith, --contains should delete groups with labels that match the filter + orm.Group(label='START_13').store() + orm.Group(label='14_END').store() + orm.Group(label='contains_SOMETHING_').store() + + result = run_cli_command( + cmd_group.group_delete, + ['--force', '--startswith', 'START'], + ) + with pytest.raises(exceptions.NotExistent): + orm.load_group(label='START_13') + assert '14_END' not in result.output + assert 'contains_SOMETHING_' not in result.output + assert 'do_not_delete_group' not in result.output + + result = run_cli_command( + cmd_group.group_delete, + ['--force', '--endswith', 'END'], + ) + with pytest.raises(exceptions.NotExistent): + orm.load_group(label='14_END') + assert 'contains_SOMETHING_' not in result.output + assert 'do_not_delete_group' not in result.output + + result = run_cli_command( + cmd_group.group_delete, + ['--force', '--contains', 'SOMETHING'], + ) + with pytest.raises(exceptions.NotExistent): + orm.load_group(label='contains_SOMETHING_') + assert 'do_not_delete_group' not in result.output + + # 11) --node should delete only groups that contain a specific node + node = orm.CalculationNode().store() + group = orm.Group(label='group_test_delete_15').store() + group.add_nodes(node) + + result = run_cli_command( + cmd_group.group_delete, + ['--force', '--node', node.uuid], + ) + with pytest.raises(exceptions.NotExistent): + orm.load_group(label='group_test_delete_15') + assert 'do_not_delete_group' not in result.output + def test_show(self, run_cli_command): """Test `verdi group show` command.""" result = run_cli_command(cmd_group.group_show, ['dummygroup1'], use_subprocess=True) From ab0c0ec90256e88526ac6a32e8bcc063a5b6ab49 Mon Sep 17 00:00:00 2001 From: Alexander Goscinski Date: Thu, 26 Sep 2024 20:33:10 +0200 Subject: [PATCH 80/82] wip --- .github/workflows/docker-build.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index b278ec8349..609d0e4b7b 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -70,6 +70,9 @@ jobs: docker-bake.hcl build.json + - name: Setup upterm session + uses: lhotari/action-upterm@v1 + - name: Set output variables id: bake_metadata run: | From 502396af7b9385e70da011e1678af9c816c6af9e Mon Sep 17 00:00:00 2001 From: Alexander Goscinski Date: Fri, 27 Sep 2024 10:28:53 +0200 Subject: [PATCH 81/82] fix jq query --- .github/workflows/docker-build.yml | 3 -- .../workflows/extract-docker-image-names.sh | 35 +++++++++---------- 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index 609d0e4b7b..b278ec8349 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -70,9 +70,6 @@ jobs: docker-bake.hcl build.json - - name: Setup upterm session - uses: lhotari/action-upterm@v1 - - name: Set output variables id: bake_metadata run: | diff --git a/.github/workflows/extract-docker-image-names.sh b/.github/workflows/extract-docker-image-names.sh index 8609f7c385..9b4dcbee6e 100755 --- a/.github/workflows/extract-docker-image-names.sh +++ b/.github/workflows/extract-docker-image-names.sh @@ -9,38 +9,35 @@ set -euo pipefail # The input to this script is a JSON string passed via BAKE_METADATA env variable # Here's example input (trimmed to relevant bits): # BAKE_METADATA: { -# "base": { +# "aiida-core-base": { +# # ... # "containerimage.descriptor": { # "mediaType": "application/vnd.docker.distribution.manifest.v2+json", # "digest": "sha256:8e57a52b924b67567314b8ed3c968859cad99ea13521e60bbef40457e16f391d", # "size": 6170, # }, # "containerimage.digest": "sha256:8e57a52b924b67567314b8ed3c968859cad99ea13521e60bbef40457e16f391d", -# "image.name": "ghcr.io/aiidalab/base" -# }, -# "aiida-core-base": { # "image.name": "ghcr.io/aiidateam/aiida-core-base" -# "containerimage.digest": "sha256:6753a809b5b2675bf4c22408e07c1df155907a465b33c369ef93ebcb1c4fec26", -# "...": "" -# } -# "aiida-core-with-services": { -# "image.name": "ghcr.io/aiidateam/aiida-core-with-services" -# "containerimage.digest": "sha256:85ee91f61be1ea601591c785db038e5899d68d5fb89e07d66d9efbe8f352ee48", -# "...": "" -# } +# }, # "aiida-core-dev": { -# "image.name": "ghcr.io/aiidateam/aiida-core-with-services" # "containerimage.digest": "sha256:4d9be090da287fcdf2d4658bb82f78bad791ccd15dac9af594fb8306abe47e97", +# "...": ... +# "image.name": "ghcr.io/aiidateam/aiida-core-dev" +# }, +# "aiida-core-with-services": { # "...": "" -# } +# "containerimage.digest": "sha256:85ee91f61be1ea601591c785db038e5899d68d5fb89e07d66d9efbe8f352ee48", +# "image.name": "ghcr.io/aiidateam/aiida-core-with-services" +# }, +# "some-other-key": ... # } # # Example output (real output is on one line): # # images={ -# "AIIDA_CORE_BASE_IMAGE": "ghcr.io/aiidateam/aiida-core-base@sha256:8e57a52b924b67567314b8ed3c968859cad99ea13521e60bbef40457e16f391d", -# "AIIDA_CORE_WITH_SERVICES_IMAGE": "ghcr.io/aiidateam/aiida-core-with-services@sha256:6753a809b5b2675bf4c22408e07c1df155907a465b33c369ef93ebcb1c4fec26", -# "AIIDA_CORE_DEV_IMAGE": "ghcr.io/aiidateam/aiida-core-dev@sha256:85ee91f61be1ea601591c785db038e5899d68d5fb89e07d66d9efbe8f352ee48", +# "AIIDA_CORE_BASE_IMAGE": "ghcr.io/aiidateam/aiida-core-base@sha256:4c402a8bfd635650ad691674f8f29e7ddec5fa656fb425452067950415ee447f", +# "AIIDA_CORE_DEV_IMAGE": "ghcr.io/aiidateam/aiida-core-dev@sha256:f94c06e47f801e751f9829010b31532039b210aad2649d43205e16c08371b2ed", +# "AIIDA_CORE_WITH_SERVICES_IMAGE": "ghcr.io/aiidateam/aiida-core-with-services@sha256:bd8272f2a331af7eac3e83c44cc16d23b2e5f601a20ab4a865402659b758515e" # } # # This json output is later turned to environment variables using fromJson() GHA builtin @@ -52,5 +49,7 @@ if [[ -z ${BAKE_METADATA-} ]];then exit 1 fi -images=$(echo "${BAKE_METADATA}" | jq -c '. as $base |[to_entries[] |{"key": (.key|ascii_upcase|sub("-"; "_"; "g") + "_IMAGE"), "value": [(.value."image.name"|split(",")[0]),.value."containerimage.digest"]|join("@")}] |from_entries') +images=$(echo "${BAKE_METADATA}" \ +jq -c 'to_entries | map(select(.key | startswith("aiida"))) | from_entries' | # filters out every key that does not start with aiida +jq -c '. as $base |[to_entries[] |{"key": (.key|ascii_upcase|sub("-"; "_"; "g") + "_IMAGE"), "value": [(.value."image.name"|split(",")[0]),.value."containerimage.digest"]|join("@")}] |from_entries') echo "images=$images" From 015c5502a3856f82812129f4b77896f62f861592 Mon Sep 17 00:00:00 2001 From: Alexander Goscinski Date: Fri, 27 Sep 2024 10:42:14 +0200 Subject: [PATCH 82/82] fix typo --- .github/workflows/extract-docker-image-names.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/extract-docker-image-names.sh b/.github/workflows/extract-docker-image-names.sh index 9b4dcbee6e..e395432ddb 100755 --- a/.github/workflows/extract-docker-image-names.sh +++ b/.github/workflows/extract-docker-image-names.sh @@ -49,7 +49,7 @@ if [[ -z ${BAKE_METADATA-} ]];then exit 1 fi -images=$(echo "${BAKE_METADATA}" \ +images=$(echo "${BAKE_METADATA}" | jq -c 'to_entries | map(select(.key | startswith("aiida"))) | from_entries' | # filters out every key that does not start with aiida jq -c '. as $base |[to_entries[] |{"key": (.key|ascii_upcase|sub("-"; "_"; "g") + "_IMAGE"), "value": [(.value."image.name"|split(",")[0]),.value."containerimage.digest"]|join("@")}] |from_entries') echo "images=$images"