From df6aeceefa2770dc78f25a757a175a22f7083050 Mon Sep 17 00:00:00 2001 From: Jusong Yu Date: Tue, 25 Apr 2023 15:41:27 +0200 Subject: [PATCH] New docker image based on pure ubuntu and gracefully handle services Minimal working version of the basic docker image build locally base image can run First working with-service stack adapt dodo script to build for correct arch Use mamba image back again workable s6-overlay solution Properly tear down the aiida daemon Fixes CI test pass locally fix env variable in s6 scripts suppress dev version warning [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci push ghcr only when it is pull request form org upload artifact which is time consuming only for PR from org Add a CI action only for container build not run some container action from fork Add doc for the docker build and how to use the new docker stack Fixes the package permission pre-commit fix - pg log to home folder Fixes CI errors [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Update pyproject.toml Update .github/workflows/docker.yml exclude .docker files from yapf and pylint Update .docker/dodo.py With git, vim, ssh client pre-installed more doc on how to start the exist container Add .local/bin to PATH for packages install by user Pin PyYAML<5.3 Looks like the particular issue is caused by a new release of Cython which breaks PyYAML builds. yaml/pyyaml#724 https://github.com/yaml/pyyaml/issues/724 Pinning PyYAML to earlier version seems to do the trick for now... --- .docker/README.md | 16 ++ .docker/base-with-services/Dockerfile | 42 +++++ .../s6-assets/config-quick-setup.yaml | 3 + .../s6-assets/init/postgresql-init.sh | 24 +++ .../s6-assets/init/postgresql-prepare.sh | 8 + .../s6-assets/init/rabbitmq-init.sh | 25 +++ .../s6-rc.d/aiida-prepare/dependencies.d/base | 0 .../aiida-prepare/dependencies.d/postgresql | 0 .../dependencies.d/postgresql-prepare | 0 .../postgresql-init/dependencies.d/base | 0 .../s6-rc.d/postgresql-init/timeout-up | 1 + .../s6-assets/s6-rc.d/postgresql-init/type | 1 + .../s6-assets/s6-rc.d/postgresql-init/up | 6 + .../postgresql-prepare/dependencies.d/base | 0 .../s6-rc.d/postgresql-prepare/timeout-up | 1 + .../s6-assets/s6-rc.d/postgresql-prepare/type | 1 + .../s6-assets/s6-rc.d/postgresql-prepare/up | 6 + .../s6-rc.d/postgresql/dependencies.d/base | 0 .../postgresql/dependencies.d/postgresql-init | 0 .../s6-assets/s6-rc.d/postgresql/down | 1 + .../s6-assets/s6-rc.d/postgresql/timeout-up | 1 + .../s6-assets/s6-rc.d/postgresql/type | 1 + .../s6-assets/s6-rc.d/postgresql/up | 5 + .../s6-rc.d/rabbitmq-init/dependencies.d/base | 0 .../s6-rc.d/rabbitmq-init/timeout-up | 1 + .../s6-assets/s6-rc.d/rabbitmq-init/type | 1 + .../s6-assets/s6-rc.d/rabbitmq-init/up | 5 + .../s6-rc.d/rabbitmq/dependencies.d/base | 0 .../rabbitmq/dependencies.d/rabbitmq-init | 0 .../s6-assets/s6-rc.d/rabbitmq/down-signal | 1 + .../s6-assets/s6-rc.d/rabbitmq/run | 6 + .../s6-assets/s6-rc.d/rabbitmq/type | 1 + .../s6-rc.d/user/contents.d/aiida-prepare | 0 .../s6-rc.d/user/contents.d/postgresql | 0 .../s6-rc.d/user/contents.d/postgresql-init | 0 .../user/contents.d/postgresql-prepare | 0 .../s6-rc.d/user/contents.d/rabbitmq | 0 .../s6-rc.d/user/contents.d/rabbitmq-init | 0 .docker/base/Dockerfile | 174 ++++++++++++++++++ .docker/base/fix-permissions | 35 ++++ .docker/base/initial-condarc | 6 + .../base/s6-assets/config-quick-setup.yaml | 15 ++ .../s6-assets/init/aiida-prepare.sh} | 80 ++++---- .../dependencies.d/aiida-prepare | 0 .../aiida-daemon-start/dependencies.d/base | 0 .../s6-assets/s6-rc.d/aiida-daemon-start/down | 1 + .../s6-rc.d/aiida-daemon-start/timeout-up | 1 + .../s6-assets/s6-rc.d/aiida-daemon-start/type | 1 + .../s6-assets/s6-rc.d/aiida-daemon-start/up | 3 + .../s6-rc.d/aiida-prepare/dependencies.d/base | 0 .../s6-rc.d/aiida-prepare/timeout-up | 1 + .../base/s6-assets/s6-rc.d/aiida-prepare/type | 1 + .../base/s6-assets/s6-rc.d/aiida-prepare/up | 4 + .../user/contents.d/aiida-daemon-start | 0 .../s6-rc.d/user/contents.d/aiida-prepare | 0 .docker/build.json | 13 ++ .docker/docker-bake.hcl | 68 +++++++ .docker/docker-compose.base-with-services.yml | 15 ++ .docker/docker-compose.base.yml | 49 +++++ .docker/docker-rabbitmq.yml | 34 ---- .docker/dodo.py | 123 +++++++++++++ .docker/my_init.d/configure-aiida.sh | 4 - .docker/pytest.ini | 5 + .docker/requirements.txt | 10 + .docker/tests/conftest.py | 61 ++++++ .docker/tests/test_aiida.py | 32 ++++ .dockerignore | 13 -- .github/actions/create-dev-env/action.yml | 27 +++ .github/actions/load-image/action.yml | 31 ++++ .../workflows/build_and_test_docker_on_pr.yml | 65 ------- .../workflows/docker-build-test-upload.yml | 63 +++++++ .github/workflows/docker-merge-tags.yml | 66 +++++++ .github/workflows/docker-push.yml | 96 ++++++++++ .github/workflows/docker.yml | 101 ++++++++++ .github/workflows/push_image_to_dockerhub.yml | 54 ------ .gitignore | 4 + .pre-commit-config.yaml | 9 +- Dockerfile | 23 --- docs/source/intro/run_docker.rst | 50 ++--- pyproject.toml | 2 +- 80 files changed, 1235 insertions(+), 262 deletions(-) create mode 100644 .docker/README.md create mode 100644 .docker/base-with-services/Dockerfile create mode 100644 .docker/base-with-services/s6-assets/config-quick-setup.yaml create mode 100755 .docker/base-with-services/s6-assets/init/postgresql-init.sh create mode 100755 .docker/base-with-services/s6-assets/init/postgresql-prepare.sh create mode 100755 .docker/base-with-services/s6-assets/init/rabbitmq-init.sh create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/base create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/postgresql create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/postgresql-prepare create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/postgresql-init/dependencies.d/base create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/postgresql-init/timeout-up create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/postgresql-init/type create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/postgresql-init/up create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/postgresql-prepare/dependencies.d/base create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/postgresql-prepare/timeout-up create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/postgresql-prepare/type create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/postgresql-prepare/up create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/postgresql/dependencies.d/base create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/postgresql/dependencies.d/postgresql-init create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/postgresql/down create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/postgresql/timeout-up create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/postgresql/type create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/postgresql/up create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/rabbitmq-init/dependencies.d/base create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/rabbitmq-init/timeout-up create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/rabbitmq-init/type create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/rabbitmq-init/up create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/dependencies.d/base create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/dependencies.d/rabbitmq-init create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/down-signal create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/run create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/type create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/aiida-prepare create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql-init create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql-prepare create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/rabbitmq create mode 100644 .docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/rabbitmq-init create mode 100644 .docker/base/Dockerfile create mode 100644 .docker/base/fix-permissions create mode 100644 .docker/base/initial-condarc create mode 100644 .docker/base/s6-assets/config-quick-setup.yaml rename .docker/{opt/configure-aiida.sh => base/s6-assets/init/aiida-prepare.sh} (56%) create mode 100644 .docker/base/s6-assets/s6-rc.d/aiida-daemon-start/dependencies.d/aiida-prepare create mode 100644 .docker/base/s6-assets/s6-rc.d/aiida-daemon-start/dependencies.d/base create mode 100644 .docker/base/s6-assets/s6-rc.d/aiida-daemon-start/down create mode 100644 .docker/base/s6-assets/s6-rc.d/aiida-daemon-start/timeout-up create mode 100644 .docker/base/s6-assets/s6-rc.d/aiida-daemon-start/type create mode 100644 .docker/base/s6-assets/s6-rc.d/aiida-daemon-start/up create mode 100644 .docker/base/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/base create mode 100644 .docker/base/s6-assets/s6-rc.d/aiida-prepare/timeout-up create mode 100644 .docker/base/s6-assets/s6-rc.d/aiida-prepare/type create mode 100644 .docker/base/s6-assets/s6-rc.d/aiida-prepare/up create mode 100644 .docker/base/s6-assets/s6-rc.d/user/contents.d/aiida-daemon-start create mode 100644 .docker/base/s6-assets/s6-rc.d/user/contents.d/aiida-prepare create mode 100644 .docker/build.json create mode 100644 .docker/docker-bake.hcl create mode 100644 .docker/docker-compose.base-with-services.yml create mode 100644 .docker/docker-compose.base.yml delete mode 100644 .docker/docker-rabbitmq.yml create mode 100644 .docker/dodo.py delete mode 100755 .docker/my_init.d/configure-aiida.sh create mode 100644 .docker/pytest.ini create mode 100644 .docker/requirements.txt create mode 100644 .docker/tests/conftest.py create mode 100644 .docker/tests/test_aiida.py delete mode 100644 .dockerignore create mode 100644 .github/actions/create-dev-env/action.yml create mode 100644 .github/actions/load-image/action.yml delete mode 100644 .github/workflows/build_and_test_docker_on_pr.yml create mode 100644 .github/workflows/docker-build-test-upload.yml create mode 100644 .github/workflows/docker-merge-tags.yml create mode 100644 .github/workflows/docker-push.yml create mode 100644 .github/workflows/docker.yml delete mode 100644 .github/workflows/push_image_to_dockerhub.yml delete mode 100644 Dockerfile diff --git a/.docker/README.md b/.docker/README.md new file mode 100644 index 0000000000..81c74e43de --- /dev/null +++ b/.docker/README.md @@ -0,0 +1,16 @@ +# AiiDA docker stacks + +### Build images locally + +To build the images, run `doit build` (tested with *docker buildx* version v0.8.2). + +The build system will attempt to detect the local architecture and automatically build images for it (tested with amd64 and arm64). +All commands `build`, `tests`, and `up` will use the locally detected platform and use a version tag based on the state of the local git repository. +However, you can also specify a custom platform or version with the `--platform` and `--version` parameters, example: `doit build --arch=amd64 --version=my-version`. + +You can specify target stacks to build with `--target`, example: `doit build --target base --target base`. + +### Trigger a build on ghcr.io and dockerhub + +Only the PR open to the organization repository will trigger a build on ghcr.io. +Push to dockerhub is triggered when making a release on github. diff --git a/.docker/base-with-services/Dockerfile b/.docker/base-with-services/Dockerfile new file mode 100644 index 0000000000..a8aeba7e52 --- /dev/null +++ b/.docker/base-with-services/Dockerfile @@ -0,0 +1,42 @@ +# syntax=docker/dockerfile:1 +FROM base + +LABEL maintainer="AiiDA Team " + +USER root +WORKDIR /opt/ + +ARG PGSQL_VERSION +ARG RMQ_VERSION + +ENV PGSQL_VERSION=${PGSQL_VERSION} +ENV RMQ_VERSION=${RMQ_VERSION} + +RUN mamba install --yes \ + --channel conda-forge \ + postgresql=${PGSQL_VERSION} && \ + mamba clean --all -f -y && \ + fix-permissions "${CONDA_DIR}" && \ + fix-permissions "/home/${SYSTEM_USER}" + +# Install erlang. +RUN apt-get update --yes && \ + apt-get install --yes --no-install-recommends \ + erlang \ + xz-utils && \ + apt-get clean && rm -rf /var/lib/apt/lists/* && \ + # Install rabbitmq. + wget -c --no-check-certificate https://github.com/rabbitmq/rabbitmq-server/releases/download/v${RMQ_VERSION}/rabbitmq-server-generic-unix-${RMQ_VERSION}.tar.xz && \ + tar -xf rabbitmq-server-generic-unix-${RMQ_VERSION}.tar.xz && \ + rm rabbitmq-server-generic-unix-${RMQ_VERSION}.tar.xz && \ + ln -sf /opt/rabbitmq_server-${RMQ_VERSION}/sbin/* /usr/local/bin/ && \ + fix-permissions /opt/rabbitmq_server-${RMQ_VERSION} + +# s6-overlay to start services +COPY --chown="${SYSTEM_UID}:${SYSTEM_GID}" s6-assets/config-quick-setup.yaml "/aiida/assets/config-quick-setup.yaml" +COPY s6-assets/s6-rc.d /etc/s6-overlay/s6-rc.d +COPY s6-assets/init /etc/init + +USER ${SYSTEM_UID} + +WORKDIR "/home/${SYSTEM_USER}" diff --git a/.docker/base-with-services/s6-assets/config-quick-setup.yaml b/.docker/base-with-services/s6-assets/config-quick-setup.yaml new file mode 100644 index 0000000000..24c516270d --- /dev/null +++ b/.docker/base-with-services/s6-assets/config-quick-setup.yaml @@ -0,0 +1,3 @@ +--- +db_name: aiida_db +db_username: aiida diff --git a/.docker/base-with-services/s6-assets/init/postgresql-init.sh b/.docker/base-with-services/s6-assets/init/postgresql-init.sh new file mode 100755 index 0000000000..0d3556f453 --- /dev/null +++ b/.docker/base-with-services/s6-assets/init/postgresql-init.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# make DB directory, if not existent +if [ ! -d /home/${SYSTEM_USER}/.postgresql ]; then + mkdir /home/${SYSTEM_USER}/.postgresql + initdb -D /home/${SYSTEM_USER}/.postgresql + echo "unix_socket_directories = '/tmp'" >> /home/${SYSTEM_USER}/.postgresql/postgresql.conf +fi + +PSQL_STATUS_CMD="pg_ctl -D /home/${SYSTEM_USER}/.postgresql status" + +# Fix problem with kubernetes cluster that adds rws permissions to the group +# for more details see: https://github.com/materialscloud-org/aiidalab-z2jh-eosc/issues/5 +chmod g-rwxs /home/${SYSTEM_USER}/.postgresql -R + +# stores return value in $? +running=true +${PSQL_STATUS_CMD} > /dev/null 2>&1 || running=false + +# Postgresql was probably not shutdown properly. Cleaning up the mess... +if ! $running ; then + echo "" > /home/${SYSTEM_USER}/.postgresql/logfile # empty log files + rm -vf /home/${SYSTEM_USER}/.postgresql/postmaster.pid +fi diff --git a/.docker/base-with-services/s6-assets/init/postgresql-prepare.sh b/.docker/base-with-services/s6-assets/init/postgresql-prepare.sh new file mode 100755 index 0000000000..580ee47106 --- /dev/null +++ b/.docker/base-with-services/s6-assets/init/postgresql-prepare.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +PG_ISREADY=1 +while [ "$PG_ISREADY" != "0" ]; do + sleep 1 + pg_isready --quiet + PG_ISREADY=$? +done diff --git a/.docker/base-with-services/s6-assets/init/rabbitmq-init.sh b/.docker/base-with-services/s6-assets/init/rabbitmq-init.sh new file mode 100755 index 0000000000..f4c6a0766f --- /dev/null +++ b/.docker/base-with-services/s6-assets/init/rabbitmq-init.sh @@ -0,0 +1,25 @@ +#!/bin/bash +RABBITMQ_DATA_DIR="/home/${SYSTEM_USER}/.rabbitmq" + +mkdir -p "${RABBITMQ_DATA_DIR}" +fix-permissions "${RABBITMQ_DATA_DIR}" + +# Fix issue where the erlang cookie permissions are corrupted. +chmod 400 "/home/${SYSTEM_USER}/.erlang.cookie" || echo "erlang cookie not created yet." + +# Set base directory for RabbitMQ to persist its data. This needs to be set to a folder in the system user's home +# directory as that is the only folder that is persisted outside of the container. +RMQ_ETC_DIR="/opt/rabbitmq_server-${RMQ_VERSION}/etc/rabbitmq" +echo MNESIA_BASE="${RABBITMQ_DATA_DIR}" >> "${RMQ_ETC_DIR}/rabbitmq-env.conf" +echo LOG_BASE="${RABBITMQ_DATA_DIR}/log" >> "${RMQ_ETC_DIR}/rabbitmq-env.conf" + +# using workaround from https://github.com/aiidateam/aiida-core/wiki/RabbitMQ-version-to-use +# set timeout to 100 hours +echo "consumer_timeout=3600000" >> "${RMQ_ETC_DIR}/rabbitmq.conf" + +# Explicitly define the node name. This is necessary because the mnesia subdirectory contains the hostname, which by +# default is set to the value of $(hostname -s), which for docker containers, will be a random hexadecimal string. Upon +# restart, this will be different and so the original mnesia folder with the persisted data will not be found. The +# reason RabbitMQ is built this way is through this way it allows to run multiple nodes on a single machine each with +# isolated mnesia directories. Since in the AiiDA setup we only need and run a single node, we can simply use localhost. +echo NODENAME=rabbit@localhost >> "${RMQ_ETC_DIR}/rabbitmq-env.conf" diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/base b/.docker/base-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/postgresql b/.docker/base-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/postgresql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/postgresql-prepare b/.docker/base-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/postgresql-prepare new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-init/dependencies.d/base b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-init/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-init/timeout-up b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-init/timeout-up new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-init/timeout-up @@ -0,0 +1 @@ +0 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-init/type b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-init/type new file mode 100644 index 0000000000..bdd22a1850 --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-init/type @@ -0,0 +1 @@ +oneshot diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-init/up b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-init/up new file mode 100644 index 0000000000..6fc0f06f57 --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-init/up @@ -0,0 +1,6 @@ +#!/command/execlineb -S0 + +with-contenv + +foreground { s6-echo "Calling /etc/init/postgresql-init" } +/etc/init/postgresql-init.sh diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-prepare/dependencies.d/base b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-prepare/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-prepare/timeout-up b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-prepare/timeout-up new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-prepare/timeout-up @@ -0,0 +1 @@ +0 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-prepare/type b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-prepare/type new file mode 100644 index 0000000000..bdd22a1850 --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-prepare/type @@ -0,0 +1 @@ +oneshot diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-prepare/up b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-prepare/up new file mode 100644 index 0000000000..df5f5f83f9 --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql-prepare/up @@ -0,0 +1,6 @@ +#!/command/execlineb -S0 + +with-contenv + +foreground { s6-echo "Calling /etc/init/postgresql-prepare" } +/etc/init/postgresql-prepare.sh diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/dependencies.d/base b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/dependencies.d/postgresql-init b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/dependencies.d/postgresql-init new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/down b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/down new file mode 100644 index 0000000000..f2cc3c69b8 --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/down @@ -0,0 +1 @@ +pg_ctl -D /home/aiida/.postgresql stop diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/timeout-up b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/timeout-up new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/timeout-up @@ -0,0 +1 @@ +0 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/type b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/type new file mode 100644 index 0000000000..bdd22a1850 --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/type @@ -0,0 +1 @@ +oneshot diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/up b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/up new file mode 100644 index 0000000000..776d110d6c --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/postgresql/up @@ -0,0 +1,5 @@ +#!/command/execlineb -P + +with-contenv + +pg_ctl -D /home/aiida/.postgresql -l /home/${SYSTEM_USER}/.postgresql/logfile start diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq-init/dependencies.d/base b/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq-init/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq-init/timeout-up b/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq-init/timeout-up new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq-init/timeout-up @@ -0,0 +1 @@ +0 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq-init/type b/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq-init/type new file mode 100644 index 0000000000..bdd22a1850 --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq-init/type @@ -0,0 +1 @@ +oneshot diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq-init/up b/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq-init/up new file mode 100644 index 0000000000..e574020053 --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq-init/up @@ -0,0 +1,5 @@ +#!/command/execlineb -S0 +with-contenv + +foreground { s6-echo "Calling /etc/init/rabbitmq-init.sh" } +/etc/init/rabbitmq-init.sh diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/dependencies.d/base b/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/dependencies.d/rabbitmq-init b/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/dependencies.d/rabbitmq-init new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/down-signal b/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/down-signal new file mode 100644 index 0000000000..d751378e19 --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/down-signal @@ -0,0 +1 @@ +SIGINT diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/run b/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/run new file mode 100644 index 0000000000..e5752294ff --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/run @@ -0,0 +1,6 @@ +#!/command/execlineb -P + +with-contenv + +foreground { s6-echo "Calling /etc/init/rabbitmq.sh" } +rabbitmq-server diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/type b/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/type new file mode 100644 index 0000000000..5883cff0cd --- /dev/null +++ b/.docker/base-with-services/s6-assets/s6-rc.d/rabbitmq/type @@ -0,0 +1 @@ +longrun diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/aiida-prepare b/.docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/aiida-prepare new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql b/.docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql-init b/.docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql-init new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql-prepare b/.docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql-prepare new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/rabbitmq b/.docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/rabbitmq new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/rabbitmq-init b/.docker/base-with-services/s6-assets/s6-rc.d/user/contents.d/rabbitmq-init new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base/Dockerfile b/.docker/base/Dockerfile new file mode 100644 index 0000000000..17307203ec --- /dev/null +++ b/.docker/base/Dockerfile @@ -0,0 +1,174 @@ +# syntax=docker/dockerfile:1 + +# Inspired by jupyter's docker-stacks-fundation image: +# https://github.com/jupyter/docker-stacks/blob/main/docker-stacks-foundation/Dockerfile + +ARG BASE=ubuntu:22.04 + +FROM $BASE + +LABEL maintainer="AiiDA Team " + +ARG SYSTEM_USER="aiida" +ARG SYSTEM_UID="1000" +ARG SYSTEM_GID="100" + + +# Fix: https://github.com/hadolint/hadolint/wiki/DL4006 +# Fix: https://github.com/koalaman/shellcheck/wiki/SC3014 +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +USER root + +ENV SYSTEM_USER="${SYSTEM_USER}" + +# Install all OS dependencies for notebook server that starts but lacks all +# features (e.g., download as all possible file formats) +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update --yes && \ + # - apt-get upgrade is run to patch known vulnerabilities in apt-get packages as + # the ubuntu base image is rebuilt too seldom sometimes (less than once a month) + apt-get upgrade --yes && \ + apt-get install --yes --no-install-recommends \ + # - bzip2 is necessary to extract the micromamba executable. + bzip2 \ + # - xz-utils is necessary to extract the s6-overlay. + xz-utils \ + ca-certificates \ + locales \ + sudo \ + # development tools + git \ + openssh-client \ + vim \ + # the gcc compiler need to build some python packages e.g. psutil and pymatgen + build-essential \ + wget && \ + apt-get clean && rm -rf /var/lib/apt/lists/* && \ + echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && \ + locale-gen + +# Install s6-overlay to handle startup and shutdown of services +ARG S6_OVERLAY_VERSION=3.1.5.0 +RUN wget --progress=dot:giga -O /tmp/s6-overlay-noarch.tar.xz \ + "https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz" && \ + tar -C / -Jxpf /tmp/s6-overlay-noarch.tar.xz && \ + rm /tmp/s6-overlay-noarch.tar.xz + +RUN set -x && \ + arch=$(uname -m) && \ + wget --progress=dot:giga -O /tmp/s6-overlay-binary.tar.xz \ + "https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${arch}.tar.xz" && \ + tar -C / -Jxpf /tmp/s6-overlay-binary.tar.xz && \ + rm /tmp/s6-overlay-binary.tar.xz + +# Configure environment +ENV CONDA_DIR=/opt/conda \ + SHELL=/bin/bash \ + SYSTEM_USER="${SYSTEM_USER}" \ + SYSTEM_UID=${SYSTEM_UID} \ + SYSTEM_GID=${SYSTEM_GID} \ + LC_ALL=en_US.UTF-8 \ + LANG=en_US.UTF-8 \ + LANGUAGE=en_US.UTF-8 +ENV PATH="${CONDA_DIR}/bin:${PATH}" \ + HOME="/home/${SYSTEM_USER}" + + +# Copy a script that we will use to correct permissions after running certain commands +COPY fix-permissions /usr/local/bin/fix-permissions +RUN chmod a+rx /usr/local/bin/fix-permissions + +# Enable prompt color in the skeleton .bashrc before creating the default SYSTEM_USER +# hadolint ignore=SC2016 +RUN sed -i 's/^#force_color_prompt=yes/force_color_prompt=yes/' /etc/skel/.bashrc && \ + # Add call to conda init script see https://stackoverflow.com/a/58081608/4413446 + echo 'eval "$(command conda shell.bash hook 2> /dev/null)"' >> /etc/skel/.bashrc + +# Create SYSTEM_USER with name jovyan user with UID=1000 and in the 'users' group +# and make sure these dirs are writable by the `users` group. +RUN echo "auth requisite pam_deny.so" >> /etc/pam.d/su && \ + sed -i.bak -e 's/^%admin/#%admin/' /etc/sudoers && \ + sed -i.bak -e 's/^%sudo/#%sudo/' /etc/sudoers && \ + useradd -l -m -s /bin/bash -N -u "${SYSTEM_UID}" "${SYSTEM_USER}" && \ + mkdir -p "${CONDA_DIR}" && \ + chown "${SYSTEM_USER}:${SYSTEM_GID}" "${CONDA_DIR}" && \ + chmod g+w /etc/passwd && \ + fix-permissions "${HOME}" && \ + fix-permissions "${CONDA_DIR}" + +USER ${SYSTEM_UID} + +# Pin python version here +ARG PYTHON_VERSION + +# Download and install Micromamba, and initialize Conda prefix. +# +# Similar projects using Micromamba: +# - Micromamba-Docker: +# - repo2docker: +# Install Python, Mamba and jupyter_core +# Cleanup temporary files and remove Micromamba +# Correct permissions +# Do all this in a single RUN command to avoid duplicating all of the +# files across image layers when the permissions change +COPY --chown="${SYSTEM_UID}:${SYSTEM_GID}" initial-condarc "${CONDA_DIR}/.condarc" +WORKDIR /tmp +RUN set -x && \ + arch=$(uname -m) && \ + if [ "${arch}" = "x86_64" ]; then \ + # Should be simpler, see + arch="64"; \ + fi && \ + wget --progress=dot:giga -O /tmp/micromamba.tar.bz2 \ + "https://micromamba.snakepit.net/api/micromamba/linux-${arch}/latest" && \ + tar -xvjf /tmp/micromamba.tar.bz2 --strip-components=1 bin/micromamba && \ + rm /tmp/micromamba.tar.bz2 && \ + PYTHON_SPECIFIER="python=${PYTHON_VERSION}" && \ + if [[ "${PYTHON_VERSION}" == "default" ]]; then PYTHON_SPECIFIER="python"; fi && \ + # Install the packages + ./micromamba install \ + --root-prefix="${CONDA_DIR}" \ + --prefix="${CONDA_DIR}" \ + --yes \ + "${PYTHON_SPECIFIER}" \ + 'mamba' && \ + rm micromamba && \ + # Pin major.minor version of python + mamba list python | grep '^python ' | tr -s ' ' | cut -d ' ' -f 1,2 >> "${CONDA_DIR}/conda-meta/pinned" && \ + mamba clean --all -f -y && \ + fix-permissions "${CONDA_DIR}" && \ + fix-permissions "/home/${SYSTEM_USER}" + +# Add ~/.local/bin to PATH where the dependencies get installed via pip +# This require the package installed with `--user` flag in pip +ENV PATH=${PATH}:/home/${NB_USER}/.local/bin + +# Switch to root to install AiiDA and set AiiDA as service +# Install AiiDA from source code +USER root +COPY --from=src . /tmp/aiida-core +RUN pip install /tmp/aiida-core --no-cache-dir && \ + rm -rf /tmp/aiida-core + +# Enable verdi autocompletion. +RUN mkdir -p "${CONDA_DIR}/etc/conda/activate.d" && \ + echo 'eval "$(_VERDI_COMPLETE=bash_source verdi)"' >> "${CONDA_DIR}/etc/conda/activate.d/activate_aiida_autocompletion.sh" && \ + chmod +x "${CONDA_DIR}/etc/conda/activate.d/activate_aiida_autocompletion.sh" && \ + fix-permissions "${CONDA_DIR}" + +# COPY AiiDA profile configuration for profile setup init script +COPY --chown="${SYSTEM_UID}:${SYSTEM_GID}" s6-assets/config-quick-setup.yaml "/aiida/assets/config-quick-setup.yaml" +COPY s6-assets/s6-rc.d /etc/s6-overlay/s6-rc.d +COPY s6-assets/init /etc/init + +# Otherwise will stuck on oneshot services +# https://github.com/just-containers/s6-overlay/issues/467 +ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 + +# Switch back to USER aiida to avoid accidental container runs as root +USER ${SYSTEM_UID} + +ENTRYPOINT ["/init"] + +WORKDIR "${HOME}" diff --git a/.docker/base/fix-permissions b/.docker/base/fix-permissions new file mode 100644 index 0000000000..840173c605 --- /dev/null +++ b/.docker/base/fix-permissions @@ -0,0 +1,35 @@ +#!/bin/bash +# This is brought from jupyter docker-stacks: +# https://github.com/jupyter/docker-stacks/blob/main/docker-stacks-foundation/fix-permissions +# set permissions on a directory +# after any installation, if a directory needs to be (human) user-writable, +# run this script on it. +# It will make everything in the directory owned by the group ${SYSTEM_GID} +# and writable by that group. + +# uses find to avoid touching files that already have the right permissions, +# which would cause massive image explosion + +# right permissions are: +# group=${SYSEM_GID} +# AND permissions include group rwX (directory-execute) +# AND directories have setuid,setgid bits set + +set -e + +for d in "$@"; do + find "${d}" \ + ! \( \ + -group "${SYSTEM_GID}" \ + -a -perm -g+rwX \ + \) \ + -exec chgrp "${SYSTEM_GID}" -- {} \+ \ + -exec chmod g+rwX -- {} \+ + # setuid, setgid *on directories only* + find "${d}" \ + \( \ + -type d \ + -a ! -perm -6000 \ + \) \ + -exec chmod +6000 -- {} \+ +done diff --git a/.docker/base/initial-condarc b/.docker/base/initial-condarc new file mode 100644 index 0000000000..383aad3cb0 --- /dev/null +++ b/.docker/base/initial-condarc @@ -0,0 +1,6 @@ +# Conda configuration see https://conda.io/projects/conda/en/latest/configuration.html + +auto_update_conda: false +show_channel_urls: true +channels: + - conda-forge diff --git a/.docker/base/s6-assets/config-quick-setup.yaml b/.docker/base/s6-assets/config-quick-setup.yaml new file mode 100644 index 0000000000..f910069e1d --- /dev/null +++ b/.docker/base/s6-assets/config-quick-setup.yaml @@ -0,0 +1,15 @@ +--- +db_engine: postgresql_psycopg2 +db_backend: core.psql_dos +db_host: database +db_port: 5432 +su_db_username: postgres +su_db_password: password +su_db_name: template1 +db_name: aiida_db +db_username: aiida +db_password: password +broker_host: messaging +broker_port: 5672 +broker_username: guest +broker_password: guest diff --git a/.docker/opt/configure-aiida.sh b/.docker/base/s6-assets/init/aiida-prepare.sh similarity index 56% rename from .docker/opt/configure-aiida.sh rename to .docker/base/s6-assets/init/aiida-prepare.sh index 92aa3ab45d..690ebff536 100755 --- a/.docker/opt/configure-aiida.sh +++ b/.docker/base/s6-assets/init/aiida-prepare.sh @@ -2,17 +2,20 @@ # This script is executed whenever the docker container is (re)started. -# Debugging. -set -x - # Environment. export SHELL=/bin/bash -# Setup AiiDA autocompletion. -grep _VERDI_COMPLETE /home/${SYSTEM_USER}/.bashrc &> /dev/null || echo 'eval "$(_VERDI_COMPLETE=source verdi)"' >> /home/${SYSTEM_USER}/.bashrc +# Configure AiiDA. +export SETUP_DEFAULT_AIIDA_PROFILE=true +export AIIDA_PROFILE_NAME=default +export AIIDA_USER_EMAIL=aiida@localhost +export AIIDA_USER_FIRST_NAME=Giuseppe +export AIIDA_USER_LAST_NAME=Verdi +export AIIDA_USER_INSTITUTION=Khedivial +export AIIDA_PROFILE_PATH=/aiida/assets/config-quick-setup.yaml # Check if user requested to set up AiiDA profile (and if it exists already) -if [[ ${SETUP_DEFAULT_PROFILE} == true ]] && ! verdi profile show ${PROFILE_NAME} &> /dev/null; then +if [[ ${SETUP_DEFAULT_AIIDA_PROFILE} == true ]] && ! verdi profile show ${AIIDA_PROFILE_NAME} &> /dev/null; then NEED_SETUP_PROFILE=true; else NEED_SETUP_PROFILE=false; @@ -22,15 +25,23 @@ fi if [[ ${NEED_SETUP_PROFILE} == true ]]; then # Create AiiDA profile. - verdi quicksetup \ - --non-interactive \ - --profile "${PROFILE_NAME}" \ - --email "${USER_EMAIL}" \ - --first-name "${USER_FIRST_NAME}" \ - --last-name "${USER_LAST_NAME}" \ - --institution "${USER_INSTITUTION}" \ - --db-host "${DB_HOST:localhost}" \ - --broker-host "${BROKER_HOST:localhost}" + verdi quicksetup \ + --non-interactive \ + --profile "${AIIDA_PROFILE_NAME}" \ + --email "${AIIDA_USER_EMAIL}" \ + --first-name "${AIIDA_USER_FIRST_NAME}" \ + --last-name "${AIIDA_USER_LAST_NAME}" \ + --institution "${AIIDA_USER_INSTITUTION}" \ + --config "${AIIDA_PROFILE_PATH}" + + # Supress verdi version warning because we are using a development version + verdi config set warnings.development_version False + + # Supress rabbitmq version warning + # If it is built using RMQ version > 3.8.15 (as we did for the `aiida-core` image) which has the issue as described in + # https://github.com/aiidateam/aiida-core/wiki/RabbitMQ-version-to-use + # We explicitly set consumer_timeout to 100 hours in /etc/rabbitmq/rabbitmq.conf + verdi config set warnings.rabbitmq_version False # Setup and configure local computer. computer_name=localhost @@ -52,18 +63,18 @@ if [[ ${NEED_SETUP_PROFILE} == true ]]; then exit 1 fi - verdi computer show ${computer_name} || verdi computer setup \ - --non-interactive \ - --label "${computer_name}" \ - --description "this computer" \ - --hostname "${computer_name}" \ + verdi computer show ${computer_name} &> /dev/null || verdi computer setup \ + --non-interactive \ + --label "${computer_name}" \ + --description "container computer" \ + --hostname "${computer_name}" \ --transport core.local \ - --scheduler core.direct \ - --work-dir /home/aiida/aiida_run/ \ - --mpirun-command "mpirun -np {tot_num_mpiprocs}" \ - --mpiprocs-per-machine ${LOCALHOST_MPI_PROCS_PER_MACHINE} && \ - verdi computer configure core.local "${computer_name}" \ - --non-interactive \ + --scheduler core.direct \ + --work-dir /home/${SYSTEM_USER}/aiida_run/ \ + --mpirun-command "mpirun -np {tot_num_mpiprocs}" \ + --mpiprocs-per-machine ${LOCALHOST_MPI_PROCS_PER_MACHINE} && \ + verdi computer configure core.local "${computer_name}" \ + --non-interactive \ --safe-interval 0.0 fi @@ -71,20 +82,5 @@ fi # Show the default profile verdi profile show || echo "The default profile is not set." -# Make sure that the daemon is not running, otherwise the migration will abort. -verdi daemon stop - # Migration will run for the default profile. -verdi storage migrate --force || echo "Database migration failed." - -# Supress rabbitmq version warning for arm64 since -# the it build using latest version rabbitmq from apt install -# We explicitly set consumer_timeout to 100 hours in /etc/rabbitmq/rabbitmq.conf -export ARCH=`uname -m` -if [ "$ARCH" = "aarch64" ]; then \ - verdi config set warnings.rabbitmq_version False -fi - - -# Daemon will start only if the database exists and is migrated to the latest version. -verdi daemon start || echo "AiiDA daemon is not running." +verdi storage migrate --force diff --git a/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/dependencies.d/aiida-prepare b/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/dependencies.d/aiida-prepare new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/dependencies.d/base b/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/down b/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/down new file mode 100644 index 0000000000..b8a14495ad --- /dev/null +++ b/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/down @@ -0,0 +1 @@ +verdi daemon stop diff --git a/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/timeout-up b/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/timeout-up new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/timeout-up @@ -0,0 +1 @@ +0 diff --git a/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/type b/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/type new file mode 100644 index 0000000000..bdd22a1850 --- /dev/null +++ b/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/type @@ -0,0 +1 @@ +oneshot diff --git a/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/up b/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/up new file mode 100644 index 0000000000..12f199a2b3 --- /dev/null +++ b/.docker/base/s6-assets/s6-rc.d/aiida-daemon-start/up @@ -0,0 +1,3 @@ +#!/command/execlineb -S0 + +verdi daemon start diff --git a/.docker/base/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/base b/.docker/base/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base/s6-assets/s6-rc.d/aiida-prepare/timeout-up b/.docker/base/s6-assets/s6-rc.d/aiida-prepare/timeout-up new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/.docker/base/s6-assets/s6-rc.d/aiida-prepare/timeout-up @@ -0,0 +1 @@ +0 diff --git a/.docker/base/s6-assets/s6-rc.d/aiida-prepare/type b/.docker/base/s6-assets/s6-rc.d/aiida-prepare/type new file mode 100644 index 0000000000..bdd22a1850 --- /dev/null +++ b/.docker/base/s6-assets/s6-rc.d/aiida-prepare/type @@ -0,0 +1 @@ +oneshot diff --git a/.docker/base/s6-assets/s6-rc.d/aiida-prepare/up b/.docker/base/s6-assets/s6-rc.d/aiida-prepare/up new file mode 100644 index 0000000000..60e82d7e43 --- /dev/null +++ b/.docker/base/s6-assets/s6-rc.d/aiida-prepare/up @@ -0,0 +1,4 @@ +#!/command/execlineb -S0 + +foreground { s6-echo "Calling /etc/init/aiida-prepare" } +/etc/init/aiida-prepare.sh diff --git a/.docker/base/s6-assets/s6-rc.d/user/contents.d/aiida-daemon-start b/.docker/base/s6-assets/s6-rc.d/user/contents.d/aiida-daemon-start new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/base/s6-assets/s6-rc.d/user/contents.d/aiida-prepare b/.docker/base/s6-assets/s6-rc.d/user/contents.d/aiida-prepare new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/build.json b/.docker/build.json new file mode 100644 index 0000000000..554a4d95c7 --- /dev/null +++ b/.docker/build.json @@ -0,0 +1,13 @@ +{ + "variable": { + "PYTHON_VERSION": { + "default": "3.9.13" + }, + "PGSQL_VERSION": { + "default": "15" + }, + "RMQ_VERSION": { + "default": "3.10.18" + } + } + } diff --git a/.docker/docker-bake.hcl b/.docker/docker-bake.hcl new file mode 100644 index 0000000000..a7d3f066a2 --- /dev/null +++ b/.docker/docker-bake.hcl @@ -0,0 +1,68 @@ +# docker-bake.hcl +variable "VERSION" { +} + +variable "PYTHON_VERSION" { +} + +variable "PGSQL_VERSION" { +} + +variable "ORGANIZATION" { + default = "aiidateam" +} + +variable "REGISTRY" { + default = "docker.io/" +} + +variable "PLATFORMS" { + default = ["linux/amd64"] +} + +variable "TARGETS" { + default = ["base", "base-with-services"] +} + +function "tags" { + params = [image] + result = [ + "${REGISTRY}${ORGANIZATION}/${image}:latest", + "${REGISTRY}${ORGANIZATION}/${image}:newly-build" + ] +} + +group "default" { + targets = "${TARGETS}" +} + +target "base-meta" { + tags = tags("base") +} +target "base-with-services-meta" { + tags = tags("aiida-core") +} + +target "base" { + inherits = ["base-meta"] + context = "base" + contexts = { + src = ".." + } + platforms = "${PLATFORMS}" + args = { + "PYTHON_VERSION" = "${PYTHON_VERSION}" + } +} +target "base-with-services" { + inherits = ["base-with-services-meta"] + context = "base-with-services" + contexts = { + base = "target:base" + } + platforms = "${PLATFORMS}" + args = { + "PGSQL_VERSION" = "${PGSQL_VERSION}" + "RMQ_VERSION" = "${RMQ_VERSION}" + } +} diff --git a/.docker/docker-compose.base-with-services.yml b/.docker/docker-compose.base-with-services.yml new file mode 100644 index 0000000000..cf78f252be --- /dev/null +++ b/.docker/docker-compose.base-with-services.yml @@ -0,0 +1,15 @@ +--- +version: '3.4' + +services: + + aiida: + image: ${REGISTRY:-}${BASE_IMAGE:-aiidateam/aiida-core}:${VERSION:-latest} + environment: + TZ: Europe/Zurich + SETUP_DEFAULT_AIIDA_PROFILE: 'true' + #volumes: + # - aiida-home-folder:/home/aiida + +volumes: + aiida-home-folder: diff --git a/.docker/docker-compose.base.yml b/.docker/docker-compose.base.yml new file mode 100644 index 0000000000..a6a8d48971 --- /dev/null +++ b/.docker/docker-compose.base.yml @@ -0,0 +1,49 @@ +--- +version: '3.4' + +services: + + database: + image: postgres:15 + environment: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: password + # volumes: + # - aiida-postgres-db:/var/lib/postgresql/data + healthcheck: + test: [ "CMD-SHELL", "pg_isready"] + interval: 5s + timeout: 5s + retries: 10 + + messaging: + image: rabbitmq:3.8.14-management + environment: + RABBITMQ_DEFAULT_USER: guest + RABBITMQ_DEFAULT_PASS: guest + # volumes: + # - aiida-rmq-data:/var/lib/rabbitmq/ + healthcheck: + test: rabbitmq-diagnostics check_port_connectivity + interval: 30s + timeout: 30s + retries: 10 + + aiida: + image: ${REGISTRY:-}${BASE_IMAGE:-aiidateam/base}:${VERSION:-latest} + environment: + RMQHOST: messaging + TZ: Europe/Zurich + SETUP_DEFAULT_AIIDA_PROFILE: 'true' + # volumes: + # - aiida-home-folder:/home/aiida + depends_on: + database: + condition: service_healthy + #messaging: + # condition: service_healthy + +#volumes: +# aiida-postgres-db: +# aiida-rmq-data: +# aiida-home-folder: diff --git a/.docker/docker-rabbitmq.yml b/.docker/docker-rabbitmq.yml deleted file mode 100644 index da266790ff..0000000000 --- a/.docker/docker-rabbitmq.yml +++ /dev/null @@ -1,34 +0,0 @@ -# A small configuration for use in local CI testing, -# if you wish to control the rabbitmq used. - -# Simply install docker, then run: -# $ docker-compose -f .docker/docker-rabbitmq.yml up -d - -# and to power down, after testing: -# $ docker-compose -f .docker/docker-rabbitmq.yml down - -# you can monitor rabbitmq use at: http://localhost:15672 - -version: '3.4' - -services: - - rabbit: - image: rabbitmq:3.8.3-management - container_name: aiida-rmq - environment: - RABBITMQ_DEFAULT_USER: guest - RABBITMQ_DEFAULT_PASS: guest - ports: - - '5672:5672' - - '15672:15672' - healthcheck: - test: rabbitmq-diagnostics -q ping - interval: 30s - timeout: 30s - retries: 5 - networks: - - aiida-rmq - -networks: - aiida-rmq: diff --git a/.docker/dodo.py b/.docker/dodo.py new file mode 100644 index 0000000000..670dfaf642 --- /dev/null +++ b/.docker/dodo.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- +# pylint: disable=missing-module-docstring +import json +from pathlib import Path +import platform + +import docker +from doit.tools import title_with_actions +from dunamai import Version + +_DOCKER_CLIENT = docker.from_env() +_DOCKER_ARCHITECTURE = _DOCKER_CLIENT.info()['Architecture'] + +DOIT_CONFIG = {'default_tasks': ['build']} + +VERSION = Version.from_git().serialize().replace('+', '_') + +_ARCH_MAPPING = { + 'x86_64': 'amd64', + 'aarch64': 'arm64', +} + +ARCH = _ARCH_MAPPING.get(_DOCKER_ARCHITECTURE) + +if ARCH is None: + raise RuntimeError(f'Unsupported architecture {ARCH} on platform {platform.system()}.') + +_REGISTRY_PARAM = { + 'name': 'registry', + 'short': 'r', + 'long': 'registry', + 'type': str, + 'default': '', + 'help': 'Specify the docker image registry.', +} + +_ORGANIZATION_PARAM = { + 'name': 'organization', + 'short': 'o', + 'long': 'organization', + 'type': str, + 'default': 'aiidalab', + 'help': 'Specify the docker image organization.', +} + +_VERSION_PARAM = { + 'name': 'version', + 'long': 'version', + 'type': str, + 'default': VERSION, + 'help': ( + 'Specify the version of the stack for building / testing. Defaults to a ' + 'version determined from the state of the local git repository.' + ), +} + +_ARCH_PARAM = { + 'name': 'architecture', + 'long': 'arch', + 'type': str, + 'default': ARCH, + 'help': 'Specify the platform to build for. Examples: arm64, amd64.', +} + + +def task_build(): + """Build all docker images.""" + + def generate_version_override(version, registry, targets, architecture, organization): + if len(targets) > 2: + # Workaround of issue of doit, which rather than override the default value, it appends + # https://github.com/pydoit/doit/issues/436 + targets = targets[2:] + + platforms = [f'linux/{architecture}'] + + Path('docker-bake.override.json').write_text( + json.dumps( + dict( + VERSION=version, + REGISTRY=registry, + TARGETS=targets, + ORGANIZATION=organization, + PLATFORMS=platforms, + ) + ), + encoding='utf-8', + ) + + return { + 'actions': [ + generate_version_override, + 'docker buildx bake -f docker-bake.hcl -f build.json ' + '-f docker-bake.override.json ' + '--load', + ], + 'title': title_with_actions, + 'params': [ + _ORGANIZATION_PARAM, + _REGISTRY_PARAM, + _VERSION_PARAM, + _ARCH_PARAM, + { + 'name': 'targets', + 'long': 'targets', + 'short': 't', + 'type': list, + 'default': ['base', 'base-with-services'], + 'help': 'Specify the target to build.', + }, + ], + 'verbosity': 2, + } + + +def task_tests(): + """Run tests with pytest.""" + + return { + 'actions': ['REGISTRY=%(registry)s VERSION=:%(version)s pytest -v'], + 'params': [_REGISTRY_PARAM, _VERSION_PARAM], + 'verbosity': 2, + } diff --git a/.docker/my_init.d/configure-aiida.sh b/.docker/my_init.d/configure-aiida.sh deleted file mode 100755 index 7ac4476b07..0000000000 --- a/.docker/my_init.d/configure-aiida.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -set -em - -su -c /opt/configure-aiida.sh ${SYSTEM_USER} diff --git a/.docker/pytest.ini b/.docker/pytest.ini new file mode 100644 index 0000000000..d1e7877377 --- /dev/null +++ b/.docker/pytest.ini @@ -0,0 +1,5 @@ +[pytest] +minversion = 7.0 +addopts = -ra -q +testpaths = + tests diff --git a/.docker/requirements.txt b/.docker/requirements.txt new file mode 100644 index 0000000000..2462e70580 --- /dev/null +++ b/.docker/requirements.txt @@ -0,0 +1,10 @@ +docker +pre-commit +pytest +requests +tabulate +pytest-docker +docker-compose +doit +dunamai +pyyaml<=5.3.1 diff --git a/.docker/tests/conftest.py b/.docker/tests/conftest.py new file mode 100644 index 0000000000..756fbc55ec --- /dev/null +++ b/.docker/tests/conftest.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# pylint: disable=missing-docstring, redefined-outer-name +import json +from pathlib import Path + +import pytest + + +@pytest.fixture(scope='session', params=['base', 'base-with-services']) +def variant(request): + return request.param + + +@pytest.fixture(scope='session') +def docker_compose_file(pytestconfig, variant): # pylint: disable=unused-argument + return f'docker-compose.{variant}.yml' + + +@pytest.fixture(scope='session') +def docker_compose(docker_services): + # pylint: disable=protected-access + return docker_services._docker_compose + + +@pytest.fixture +def timeout(): + """Container and service startup timeout""" + return 30 + + +@pytest.fixture +def container_user(): + return 'aiida' + + +@pytest.fixture +def aiida_exec(docker_compose): + + def execute(command, user=None, **kwargs): + if user: + command = f'exec -T --user={user} aiida {command}' + else: + command = f'exec -T aiida {command}' + return docker_compose.execute(command, **kwargs) + + return execute + + +@pytest.fixture(scope='session') +def _build_config(): + return json.loads(Path('build.json').read_text(encoding='utf-8'))['variable'] + + +@pytest.fixture(scope='session') +def python_version(_build_config): + return _build_config['PYTHON_VERSION']['default'] + + +@pytest.fixture(scope='session') +def pgsql_version(_build_config): + return _build_config['PGSQL_VERSION']['default'] diff --git a/.docker/tests/test_aiida.py b/.docker/tests/test_aiida.py new file mode 100644 index 0000000000..dc636fa695 --- /dev/null +++ b/.docker/tests/test_aiida.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# pylint: disable=missing-docstring +import json +import time + +from packaging.version import parse +import pytest + + +def test_correct_python_version_installed(aiida_exec, python_version): + info = json.loads(aiida_exec('mamba list --json --full-name python').decode())[0] + assert info['name'] == 'python' + assert parse(info['version']) == parse(python_version) + + +def test_correct_pgsql_version_installed(aiida_exec, pgsql_version, variant): + if variant == 'base': + pytest.skip('PostgreSQL is not installed in the base image') + + info = json.loads(aiida_exec('mamba list --json --full-name postgresql').decode())[0] + assert info['name'] == 'postgresql' + assert parse(info['version']).major == parse(pgsql_version).major + + +def test_verdi_status(aiida_exec, container_user, timeout): + time.sleep(timeout) + output = aiida_exec('verdi status', user=container_user).decode().strip() + assert 'Connected to RabbitMQ' in output + assert 'Daemon is running' in output + + # check that we have suppressed the warnings + assert 'Warning' not in output diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index dfe06bad59..0000000000 --- a/.dockerignore +++ /dev/null @@ -1,13 +0,0 @@ -.benchmarks -.cache -.coverage -.mypy_cache -.pytest_cache -.tox -.vscode -aiida_core.egg-info -docs/build -pip-wheel-metadata -**/.DS_Store -**/*.pyc -**/__pycache__ diff --git a/.github/actions/create-dev-env/action.yml b/.github/actions/create-dev-env/action.yml new file mode 100644 index 0000000000..1142844757 --- /dev/null +++ b/.github/actions/create-dev-env/action.yml @@ -0,0 +1,27 @@ +--- +name: Build environment +description: Create build environment + +inputs: + architecture: + description: architecture to be run on + required: true + type: string + +runs: + using: composite + steps: + # actions/setup-python doesn't support Linux arm64 runners + # See: https://github.com/actions/setup-python/issues/108 + # python3 is manually preinstalled in the arm64 VM self-hosted runner + - name: Set Up Python ๐Ÿ + uses: actions/setup-python@v4 + with: + python-version: 3.x + if: ${{ inputs.architecture == 'amd64' }} + + - name: Install Dev Dependencies ๐Ÿ“ฆ + run: | + pip install --upgrade pip + pip install --upgrade -r .docker/requirements.txt + shell: bash diff --git a/.github/actions/load-image/action.yml b/.github/actions/load-image/action.yml new file mode 100644 index 0000000000..5909cdd518 --- /dev/null +++ b/.github/actions/load-image/action.yml @@ -0,0 +1,31 @@ +--- +name: Load Docker image +description: Download image tar and load it to docker + +inputs: + image: + description: Image name + required: true + type: string + architecture: + description: Image architecture + required: true + type: string + +runs: + using: composite + steps: + - name: Download built image ๐Ÿ“ฅ + uses: actions/download-artifact@v3 + with: + name: ${{ inputs.image }}-${{ inputs.architecture }} + path: /tmp/ + - name: Load downloaded image to docker ๐Ÿ“ฅ + run: | + docker load --input /tmp/${{ inputs.image }}-${{ inputs.architecture }}.tar + docker image ls --all + shell: bash + - name: Delete the file ๐Ÿ—‘๏ธ + run: rm -f /tmp/${{ inputs.image }}-${{ inputs.architecture }}.tar + shell: bash + if: always() diff --git a/.github/workflows/build_and_test_docker_on_pr.yml b/.github/workflows/build_and_test_docker_on_pr.yml deleted file mode 100644 index 9078daefc9..0000000000 --- a/.github/workflows/build_and_test_docker_on_pr.yml +++ /dev/null @@ -1,65 +0,0 @@ -# Test the Docker image on every pull request. -# -# The steps are: -# 1. Build docker image using cached data. -# 2. Start the docker container. -# 3. Check that AiiDA is responsive. - -name: build-and-test-image-from-pull-request - -on: - pull_request: - path_ignore: - - 'docs/**' - -# https://docs.github.com/en/actions/using-jobs/using-concurrency -concurrency: - # only cancel in-progress jobs or runs for the current workflow - matches against branch & tags - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - - build-and-test: - - # Only run this job on the main repository and not on forks - if: github.repository == 'aiidateam/aiida-core' - - runs-on: ubuntu-latest - timeout-minutes: 30 - - steps: - - - uses: actions/checkout@v2 - - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Cache Docker layers - uses: actions/cache@v2 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - - name: Build image locally - uses: docker/build-push-action@v2 - with: - load: true - push: false - tags: aiida-core:latest - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Start and test the container - run: | - export DOCKERID=`docker run -d aiida-core:latest` - docker exec --tty $DOCKERID wait-for-services - docker logs $DOCKERID - docker exec --tty --user aiida $DOCKERID /bin/bash -l -c 'verdi profile show default' - docker exec --tty --user aiida $DOCKERID /bin/bash -l -c 'verdi computer show localhost' - docker exec --tty --user aiida $DOCKERID /bin/bash -l -c 'verdi daemon status' diff --git a/.github/workflows/docker-build-test-upload.yml b/.github/workflows/docker-build-test-upload.yml new file mode 100644 index 0000000000..b9b50ec48f --- /dev/null +++ b/.github/workflows/docker-build-test-upload.yml @@ -0,0 +1,63 @@ +--- +name: Build image and then upload the image, tags and manifests to GitHub artifacts + +env: + OWNER: ${{ github.repository_owner }} + +on: + workflow_call: + inputs: + architecture: + description: Image architecture, e.g. amd64, arm64 + required: true + type: string + runsOn: + description: GitHub Actions Runner image + required: true + type: string + +jobs: + build-test-upload: + runs-on: ${{ inputs.runsOn }} + defaults: + run: + shell: bash + working-directory: .docker + + steps: + - name: Checkout Repo โšก๏ธ + uses: actions/checkout@v3 + - name: Create dev environment ๐Ÿ“ฆ + uses: ./.github/actions/create-dev-env + with: + architecture: ${{ inputs.architecture }} + + - name: Build image base and base-with-services (output image name aiida-coer ยง) ๐Ÿ›  + run: doit build --target base --target base-with-services --arch ${{ inputs.architecture }} --organization ${{ env.OWNER }} + env: + # Full logs for CI build + BUILDKIT_PROGRESS: plain + + - name: Run tests โœ… + run: VERSION=newly-build python -m pytest -s tests + + - name: Save image as a tar for later use ๐Ÿ’พ + run: | + docker save ${{ env.OWNER }}/base -o /tmp/base-${{ inputs.architecture }}.tar + docker save ${{ env.OWNER }}/aiida-core -o /tmp/aiida-core-${{ inputs.architecture }}.tar + + - name: Upload base image as artifact ๐Ÿ’พ + uses: actions/upload-artifact@v3 + with: + name: base-${{ inputs.architecture }} + path: /tmp/base-${{ inputs.architecture }}.tar + retention-days: 3 + if: ${{ !github.event.pull_request.head.repo.fork }} + + - name: Upload aiida-core image as artifact ๐Ÿ’พ + uses: actions/upload-artifact@v3 + with: + name: aiida-core-${{ inputs.architecture }} + path: /tmp/aiida-core-${{ inputs.architecture }}.tar + retention-days: 3 + if: ${{ !github.event.pull_request.head.repo.fork }} diff --git a/.github/workflows/docker-merge-tags.yml b/.github/workflows/docker-merge-tags.yml new file mode 100644 index 0000000000..c9b54ffb4c --- /dev/null +++ b/.github/workflows/docker-merge-tags.yml @@ -0,0 +1,66 @@ +--- +name: Download images tags from GitHub artifacts and create multi-platform manifests + +on: + workflow_call: + inputs: + registry: + description: Docker registry, e.g. ghcr.io, docker.io + required: true + type: string + secrets: + REGISTRY_USERNAME: + required: true + REGISTRY_TOKEN: + required: true + + +jobs: + merge-tags: + runs-on: ubuntu-latest + strategy: + matrix: + image: ["base", "aiida-core"] + permissions: + packages: write + + steps: + - name: Checkout Repo โšก๏ธ + uses: actions/checkout@v3 + - name: Create dev environment ๐Ÿ“ฆ + uses: ./.github/actions/create-dev-env + with: + architecture: amd64 + + - name: Download amd64 tags file ๐Ÿ“ฅ + uses: actions/download-artifact@v3 + with: + name: ${{ inputs.registry }}-${{ matrix.image }}-amd64-tags + path: /tmp/ + - name: Download arm64 tags file ๐Ÿ“ฅ + uses: actions/download-artifact@v3 + with: + name: ${{ inputs.registry }}-${{ matrix.image }}-arm64-tags + path: /tmp/ + + - name: Login to Container Registry ๐Ÿ”‘ + uses: docker/login-action@v2 + with: + registry: ${{ inputs.registry }} + username: ${{ secrets.REGISTRY_USERNAME }} + password: ${{ secrets.REGISTRY_TOKEN }} + + - name: Merge tags for the images of different arch ๐Ÿ”€ + run: | + for arch_tag in $(cat /tmp/${{ matrix.image }}-amd64-tags.txt); do + tag=$(echo $arch_tag | sed "s/:amd64-/:/") + docker manifest create $tag --amend $arch_tag + docker manifest push $tag + done + + for arch_tag in $(cat /tmp/${{ matrix.image }}-arm64-tags.txt); do + tag=$(echo $arch_tag | sed "s/:arm64-/:/") + docker manifest create $tag --amend $arch_tag + docker manifest push $tag + done + shell: bash diff --git a/.github/workflows/docker-push.yml b/.github/workflows/docker-push.yml new file mode 100644 index 0000000000..494d2957cb --- /dev/null +++ b/.github/workflows/docker-push.yml @@ -0,0 +1,96 @@ +--- +name: Download Docker image and its tags from GitHub artifacts, apply them and push the image to container registry + +env: + OWNER: ${{ github.repository_owner }} + +on: + workflow_call: + inputs: + architecture: + description: Image architecture + required: true + type: string + registry: + description: Docker registry + required: true + type: string + secrets: + REGISTRY_USERNAME: + required: true + REGISTRY_TOKEN: + required: true + +jobs: + tag-push: + runs-on: ubuntu-latest + strategy: + matrix: + image: ["base", "aiida-core"] + defaults: + run: + shell: bash + working-directory: .docker + permissions: + packages: write + + steps: + - name: Checkout Repo โšก๏ธ + uses: actions/checkout@v3 + - name: Create dev environment ๐Ÿ“ฆ + uses: ./.github/actions/create-dev-env + with: + architecture: ${{ inputs.architecture }} + - name: Load image to Docker ๐Ÿ“ฅ + uses: ./.github/actions/load-image + with: + image: ${{ matrix.image }} + architecture: ${{ inputs.architecture }} + + - name: Read build variables + id: build_vars + run: | + vars=$(cat build.json | jq -c '[.variable | to_entries[] | {"key": .key, "value": .value.default}] | from_entries') + echo "vars=$vars" >> "${GITHUB_OUTPUT}" + + - name: Docker meta ๐Ÿ“ + id: meta + uses: docker/metadata-action@v4 + env: ${{ fromJson(steps.build_vars.outputs.vars) }} + with: + images: | + name=${{ inputs.registry }}/${{ env.OWNER }}/${{ matrix.image }} + tags: | + type=edge,enable={{is_default_branch}} + type=sha,enable=${{ github.ref_type != 'tag' }} + type=ref,event=pr + type=match,pattern=v(\d+\.\d+.\d+),group=1 + type=raw,value={{tag}},enable=${{ startsWith(github.ref, 'refs/tags/v') }} + type=raw,value=python-${{ env.PYTHON_VERSION }},enable=${{ startsWith(github.ref, 'refs/tags/v') }} + type=raw,value=postgresql-${{ env.PGSQL_VERSION }},enable=${{ startsWith(github.ref, 'refs/tags/v') }} + + - name: Login to Container Registry ๐Ÿ”‘ + uses: docker/login-action@v2 + with: + registry: ${{ inputs.registry }} + username: ${{ secrets.REGISTRY_USERNAME }} + password: ${{ secrets.REGISTRY_TOKEN }} + + - name: Set tags for image and push ๐Ÿท๏ธ๐Ÿ“ค๐Ÿ’พ + run: | + declare -a arr=(${{ steps.meta.outputs.tags }}) + for tag in "${arr[@]}"; do + arch_tag=$(echo ${tag} | sed "s/:/:${{ inputs.architecture }}-/") + docker tag ${{ env.OWNER }}/${{ matrix.image }}:newly-build ${arch_tag} + docker push ${arch_tag} + + # write tag to file + echo ${arch_tag} >> /tmp/${{ matrix.image }}-${{ inputs.architecture }}-tags.txt + done + + - name: Upload tags file ๐Ÿ“ค + uses: actions/upload-artifact@v3 + with: + name: ${{ inputs.registry }}-${{ matrix.image }}-${{ inputs.architecture }}-tags + path: /tmp/${{ matrix.image }}-${{ inputs.architecture }}-tags.txt + retention-days: 3 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 0000000000..c621d9e5d5 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,101 @@ +--- +name: Build, test and push Docker Images + +on: + pull_request: + paths: + - .docker/** + - .github/workflows/docker-*.yml + push: + branches: + - main + tags: + - "v*" + paths: + - .docker/** + - .github/workflows/docker-*.yml + workflow_dispatch: + +# https://docs.github.com/en/actions/using-jobs/using-concurrency +concurrency: + # only cancel in-progress jobs or runs for the current workflow - matches against branch & tags + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + amd64-build: + uses: ./.github/workflows/docker-build-test-upload.yml + with: + architecture: amd64 + runsOn: ubuntu-latest + + arm64-build: + uses: ./.github/workflows/docker-build-test-upload.yml + with: + architecture: arm64 + runsOn: buildjet-2vcpu-ubuntu-2204-arm + if: ${{ !github.event.pull_request.head.repo.fork }} + + amd64-push-ghcr: + if: github.repository == 'aiidateam/aiida-core' + uses: ./.github/workflows/docker-push.yml + with: + architecture: amd64 + registry: ghcr.io + secrets: + REGISTRY_USERNAME: ${{ github.actor }} + REGISTRY_TOKEN: ${{ secrets.GITHUB_TOKEN }} + needs: [amd64-build] + + arm64-push-ghcr: + uses: ./.github/workflows/docker-push.yml + with: + architecture: arm64 + registry: ghcr.io + secrets: + REGISTRY_USERNAME: ${{ github.actor }} + REGISTRY_TOKEN: ${{ secrets.GITHUB_TOKEN }} + needs: [arm64-build] + if: ${{ !github.event.pull_request.head.repo.fork }} + + merge-tags-ghcr: + uses: ./.github/workflows/docker-merge-tags.yml + with: + registry: ghcr.io + secrets: + REGISTRY_USERNAME: ${{ github.actor }} + REGISTRY_TOKEN: ${{ secrets.GITHUB_TOKEN }} + needs: [amd64-push-ghcr, arm64-push-ghcr] + if: ${{ !github.event.pull_request.head.repo.fork }} + + amd64-push-dockerhub: + if: github.repository == 'aiidateam/aiida-core' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')) + uses: ./.github/workflows/docker-push.yml + with: + architecture: amd64 + registry: docker.io + secrets: + REGISTRY_USERNAME: ${{ secrets.DOCKER_USERNAME }} + REGISTRY_TOKEN: ${{ secrets.DOCKER_TOKEN }} + needs: [amd64-build] + + arm64-push-dockerhub: + if: github.repository == 'aiidateam/aiida-core' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')) + uses: ./.github/workflows/docker-push.yml + with: + architecture: arm64 + registry: docker.io + secrets: + REGISTRY_USERNAME: ${{ secrets.DOCKER_USERNAME }} + REGISTRY_TOKEN: ${{ secrets.DOCKER_TOKEN }} + needs: [arm64-build] + + merge-tags-dockerhub: + if: github.repository == 'aiidateam/aiida-core' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')) + uses: ./.github/workflows/docker-merge-tags.yml + with: + registry: docker.io + secrets: + REGISTRY_USERNAME: ${{ secrets.DOCKER_USERNAME }} + REGISTRY_TOKEN: ${{ secrets.DOCKER_TOKEN }} + needs: [amd64-push-dockerhub, arm64-push-dockerhub] diff --git a/.github/workflows/push_image_to_dockerhub.yml b/.github/workflows/push_image_to_dockerhub.yml deleted file mode 100644 index 3178e78e04..0000000000 --- a/.github/workflows/push_image_to_dockerhub.yml +++ /dev/null @@ -1,54 +0,0 @@ -# Build the new Docker image on every commit to the main branch and on every new tag. -# No caching is involved for the image build. The new image is then pushed to the Docker Hub. - -name: build-and-push-to-dockerhub - -on: - push: - branches: - - main - tags: - - "v[0-9]+.[0-9]+.[0-9]+*" - -jobs: - - build-and-push: - - # Only run this job on the main repository and not on forks - if: github.repository == 'aiidateam/aiida-core' - - runs-on: ubuntu-latest - timeout-minutes: 30 - - steps: - - - uses: actions/checkout@v2 - - - name: Docker meta - id: meta - uses: docker/metadata-action@v3 - with: - images: ${{ github.repository }} - tags: | - type=ref,event=branch - type=semver,pattern={{version}} - - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_TOKEN }} - - - name: Build and push - id: docker_build - uses: docker/build-push-action@v2 - with: - push: true - platforms: linux/amd64, linux/arm64 - tags: ${{ steps.meta.outputs.tags }} diff --git a/.gitignore b/.gitignore index 3cf188d3f3..ef7191ae15 100644 --- a/.gitignore +++ b/.gitignore @@ -38,3 +38,7 @@ docs/source/reference/apidoc _sandbox pplot_out/ + +# docker +.doit.* +docker-bake.override.json diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e8ee1526b4..54074affef 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -42,7 +42,7 @@ repos: - id: yapf name: yapf types: [python] - exclude: &exclude_files > + exclude: | (?x)^( docs/.*| )$ @@ -74,6 +74,7 @@ repos: (?x)^( .github/.*| .molecule/.*| + .docker/.*| docs/.*| utils/.*| @@ -201,7 +202,11 @@ repos: entry: pylint types: [python] language: system - exclude: *exclude_files + exclude: | + (?x)^( + docs/.*| + .docker/.*| + )$ - id: dm-generate-all name: Update all requirements files diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 9085a5e3ab..0000000000 --- a/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM aiidateam/aiida-prerequisites:0.7.0 - -USER root - -ENV SETUP_DEFAULT_PROFILE true - -ENV PROFILE_NAME default -ENV USER_EMAIL aiida@localhost -ENV USER_FIRST_NAME Giuseppe -ENV USER_LAST_NAME Verdi -ENV USER_INSTITUTION Khedivial -ENV AIIDADB_BACKEND core.psql_dos - -# Copy and install AiiDA -COPY . aiida-core -RUN pip install ./aiida-core[atomic_tools] - -# Configure aiida for the user -COPY .docker/opt/configure-aiida.sh /opt/configure-aiida.sh -COPY .docker/my_init.d/configure-aiida.sh /etc/my_init.d/40_configure-aiida.sh - -# Use phusion baseimage docker init system. -CMD ["/sbin/my_init"] diff --git a/docs/source/intro/run_docker.rst b/docs/source/intro/run_docker.rst index 071eb35dab..669dffae6c 100644 --- a/docs/source/intro/run_docker.rst +++ b/docs/source/intro/run_docker.rst @@ -15,7 +15,15 @@ This image contains a fully pre-configured AiiDA environment which makes it part .. grid:: 1 :gutter: 3 - .. grid-item-card:: Start container + .. grid-item-card:: Install Docker on your warkstation or laptop + + To install Docker, please refer to the `official documentation `__. + + .. note:: + + If you are using Linux, you need to have root privileges to do `post-installation steps for the Docker Engine `__. + + .. grid-item-card:: Start container and use AiiDA interactively First, pull the image: @@ -27,23 +35,23 @@ This image contains a fully pre-configured AiiDA environment which makes it part .. parsed-literal:: - $ docker run -d --name aiida-container aiidateam/aiida-core:latest + $ docker run -it aiidateam/aiida-core:latest bash - You can use the following command to block until all services have started up: + You can specify a name for the container with the ``--name`` option for easier reference later on: - .. code-block:: console + .. parsed-literal:: - $ docker exec -t aiida-container wait-for-services + $ docker run -it --name aiida-container aiidateam/aiida-core:latest bash .. grid-item-card:: Check setup - The default profile is created under the ``aiida`` user, so to execute commands you must add the ``--user aiida`` option. + The prfile named ``default`` is created under the ``aiida`` user. - For example, to check the verdi status, execute: + For example, to check the verdi status, execute the following command inside the container: .. code-block:: console - $ docker exec -t --user aiida aiida-container /bin/bash -l -c 'verdi status' + $ verdi status โœ“ config dir: /home/aiida/.aiida โœ“ profile: On profile default โœ“ repository: /home/aiida/.aiida/repository/default @@ -51,24 +59,13 @@ This image contains a fully pre-configured AiiDA environment which makes it part โœ“ rabbitmq: Connected as amqp://127.0.0.1?heartbeat=600 โœ“ daemon: Daemon is running as PID 1795 since 2020-05-20 02:54:00 - .. grid-item-card:: Use container interactively - - To "enter" the container and run commands directly in the shell, use: - - .. code-block:: console - - $ docker exec -it --user aiida aiida-container /bin/bash - - This will drop you into the shell within the container as the user "aiida". - .. grid-item-card:: Persist data across different containers - If you stop the container and start it again, any data you created will persist. + If you stop the container (`docker stop` or simply `Ctrl+D` from container) and start it again, any data you created will persist. .. code-block:: console - $ docker stop aiida-container - $ docker start aiida-container + $ docker start -i aiida-container However, if you remove the container, **all data will be removed as well**. @@ -78,19 +75,24 @@ This image contains a fully pre-configured AiiDA environment which makes it part $ docker rm aiida-container The preferred way to persistently store data is to `create a volume `__. + To create a simple volume, run: .. code-block:: console - $ docker volume create my-data + $ docker volume create container-home-data Then make sure to mount that volume when running the aiida container: .. parsed-literal:: - $ docker run -d --name aiida-container --mount source=my-data,target=/tmp/my_data aiidateam/aiida-core:latest + $ docker run -it --name aiida-container -v container-home-data:/home/aiida aiidateam/aiida-core:latest + + Starting the container with the above command, ensures that any data stored in the ``/home/aiida`` path within the container is stored in the ``conatiner-home-data`` volume and therefore persists even if the container is removed. + + To persistently store the python packages installed in the container, use `--user` flag when installing packages with pip, the packages will be installed in the ``/home/aiida/.local`` path which is mounted to the ``container-home-data`` volume. - Starting the container with the above command, ensures that any data stored in the ``/tmp/my_data`` path within the container is stored in the ``my-data`` volume and therefore persists even if the container is removed. + You can also mount a local directory instead of a volume and to other container path, please refer to the `Docker documentation `__ for more information. .. button-ref:: intro:get_started:next :ref-type: ref diff --git a/pyproject.toml b/pyproject.toml index e2f314e8e1..9e1d57c0f8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,7 +101,7 @@ notebook = [ ] pre-commit = [ "mypy==0.991", - "packaging==20.3", + "packaging~=20.9", "pre-commit~=2.2", "pylint~=2.17.4", "pylint-aiida~=0.1.1",