diff --git a/aiida/backends/djsite/db/migrations/0048_computer_name_to_label.py b/aiida/backends/djsite/db/migrations/0048_computer_name_to_label.py index fb64ffa167..2a65aa3a91 100644 --- a/aiida/backends/djsite/db/migrations/0048_computer_name_to_label.py +++ b/aiida/backends/djsite/db/migrations/0048_computer_name_to_label.py @@ -30,5 +30,13 @@ class Migration(migrations.Migration): old_name='name', new_name='label', ), + migrations.RunSQL( + 'ALTER INDEX db_dbcomputer_name_key rename TO db_dbcomputer_label_bc480bab_uniq', + 'ALTER INDEX db_dbcomputer_label_bc480bab_uniq rename TO db_dbcomputer_name_key', + ), + migrations.RunSQL( + 'ALTER INDEX db_dbcomputer_name_f1800b1a_like rename TO db_dbcomputer_label_bc480bab_like', + 'ALTER INDEX db_dbcomputer_label_bc480bab_like rename TO db_dbcomputer_name_f1800b1a_like', + ), upgrade_schema_version(REVISION, DOWN_REVISION), ] diff --git a/aiida/backends/sqlalchemy/manager.py b/aiida/backends/sqlalchemy/manager.py index f6932092aa..a49cd014ac 100644 --- a/aiida/backends/sqlalchemy/manager.py +++ b/aiida/backends/sqlalchemy/manager.py @@ -31,7 +31,7 @@ class SqlaBackendManager(BackendManager): @staticmethod @contextlib.contextmanager - def alembic_config(): + def alembic_config(start_transaction=True): """Context manager to return an instance of an Alembic configuration. The current database connection is added in the `attributes` property, through which it can then also be @@ -41,7 +41,16 @@ def alembic_config(): from . import ENGINE - with ENGINE.begin() as connection: + # Certain migrations, such as altering tables, require that there is no existing transactions + # locking the tables. + # Presently, ``SqlaSettingsManager.get`` has been found to leave idle transactions, + # and so we need to ensure that they are closed. + transaction = get_scoped_session().get_transaction() + if transaction: + transaction.close() + + engine_context = ENGINE.begin if start_transaction else ENGINE.connect + with engine_context() as connection: dir_path = os.path.dirname(os.path.realpath(__file__)) config = Config() config.set_main_option('script_location', os.path.join(dir_path, ALEMBIC_REL_PATH)) @@ -142,7 +151,7 @@ def migrate_up(self, version: str): :param version: string with schema version to migrate to """ - with self.alembic_config() as config: + with self.alembic_config(start_transaction=False) as config: upgrade(config, version) def migrate_down(self, version: str): @@ -150,7 +159,7 @@ def migrate_down(self, version: str): :param version: string with schema version to migrate to """ - with self.alembic_config() as config: + with self.alembic_config(start_transaction=False) as config: downgrade(config, version) def _migrate_database_version(self): diff --git a/aiida/backends/sqlalchemy/migrations/versions/1de112340b16_django_parity_1.py b/aiida/backends/sqlalchemy/migrations/versions/1de112340b16_django_parity_1.py new file mode 100644 index 0000000000..97c50d7f73 --- /dev/null +++ b/aiida/backends/sqlalchemy/migrations/versions/1de112340b16_django_parity_1.py @@ -0,0 +1,171 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +# pylint: disable=invalid-name,no-member +"""Parity with Django backend (rev: 0048), +part 1: Ensure fields to make non-nullable are not currently null + +Revision ID: 1de112340b16 +Revises: 34a831f4286d +Create Date: 2021-08-24 18:52:45.882712 + +""" +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import JSONB, UUID + +from aiida.common import timezone +from aiida.common.utils import get_new_uuid + +# revision identifiers, used by Alembic. +revision = '1de112340b16' +down_revision = '34a831f4286d' +branch_labels = None +depends_on = None + + +def upgrade(): # pylint: disable=too-many-statements + """Convert null values to default values. + + This migration is performed in preparation for the next migration, + which will make these fields non-nullable. + + Note, it is technically possible that the following foreign keys could also be null + (due to no explicit nullable=False): + `db_dbauthinfo.aiidauser_id`, `db_dbauthinfo.dbcomputer_id`, + `db_dbcomment.dbnode_id`, `db_dbcomment.user_id`, + `db_dbgroup.user_id`, `db_dbgroup_dbnode.dbgroup_id`, `db_dbgroup_dbnode.dbnode_id`, + `db_dblink.input_id`, `db_dblink.output_id` + + However, there is no default value for these fields, and the Python API does not allow them to be set to `None`, + so it would be extremely unlikely for this to be the case. + + Also, `db_dbnode.node_type` and `db_dblink.type` should not be null but, since this would critically corrupt + the provence graph if we were to set this to an empty string, we leave this to fail the non-null migration. + If a user runs into this exception, they will contact us and we can come up with a custom fix for the database. + + """ + db_dbauthinfo = sa.sql.table( + 'db_dbauthinfo', + sa.Column('enabled', sa.Boolean), + sa.Column('auth_params', JSONB), + sa.Column('metadata', JSONB), + ) + + op.execute(db_dbauthinfo.update().where(db_dbauthinfo.c.enabled.is_(None)).values(enabled=True)) + op.execute(db_dbauthinfo.update().where(db_dbauthinfo.c.auth_params.is_(None)).values(auth_params={})) + op.execute(db_dbauthinfo.update().where(db_dbauthinfo.c.metadata.is_(None)).values(metadata={})) + op.execute(db_dbauthinfo.update().where(db_dbauthinfo.c.auth_params == JSONB.NULL).values(auth_params={})) + op.execute(db_dbauthinfo.update().where(db_dbauthinfo.c.metadata == JSONB.NULL).values(metadata={})) + + db_dbcomment = sa.sql.table( + 'db_dbcomment', + sa.Column('content', sa.Text), + sa.Column('ctime', sa.DateTime(timezone=True)), + sa.Column('mtime', sa.DateTime(timezone=True)), + sa.Column('uuid', UUID(as_uuid=True)), + ) + + op.execute(db_dbcomment.update().where(db_dbcomment.c.content.is_(None)).values(content='')) + op.execute(db_dbcomment.update().where(db_dbcomment.c.mtime.is_(None)).values(mtime=timezone.now())) + op.execute(db_dbcomment.update().where(db_dbcomment.c.ctime.is_(None)).values(ctime=timezone.now())) + op.execute(db_dbcomment.update().where(db_dbcomment.c.uuid.is_(None)).values(uuid=get_new_uuid())) + + db_dbcomputer = sa.sql.table( + 'db_dbcomputer', + sa.Column('description', sa.Text), + sa.Column('hostname', sa.String(255)), + sa.Column('metadata', JSONB), + sa.Column('scheduler_type', sa.String(255)), + sa.Column('transport_type', sa.String(255)), + sa.Column('uuid', UUID(as_uuid=True)), + ) + + op.execute(db_dbcomputer.update().where(db_dbcomputer.c.description.is_(None)).values(description='')) + op.execute(db_dbcomputer.update().where(db_dbcomputer.c.hostname.is_(None)).values(hostname='')) + op.execute(db_dbcomputer.update().where(db_dbcomputer.c.metadata.is_(None)).values(metadata={})) + op.execute(db_dbcomputer.update().where(db_dbcomputer.c.metadata == JSONB.NULL).values(metadata={})) + op.execute(db_dbcomputer.update().where(db_dbcomputer.c.scheduler_type.is_(None)).values(scheduler_type='')) + op.execute(db_dbcomputer.update().where(db_dbcomputer.c.transport_type.is_(None)).values(transport_type='')) + op.execute(db_dbcomputer.update().where(db_dbcomputer.c.uuid.is_(None)).values(uuid=get_new_uuid())) + + db_dbgroup = sa.sql.table( + 'db_dbgroup', + sa.Column('description', sa.Text), + sa.Column('label', sa.String(255)), + sa.Column('time', sa.DateTime(timezone=True)), + sa.Column('type_string', sa.String(255)), + sa.Column('uuid', UUID(as_uuid=True)), + ) + + op.execute(db_dbgroup.update().where(db_dbgroup.c.description.is_(None)).values(description='')) + op.execute(db_dbgroup.update().where(db_dbgroup.c.label.is_(None)).values(label=get_new_uuid())) + op.execute(db_dbgroup.update().where(db_dbgroup.c.time.is_(None)).values(time=timezone.now())) + op.execute(db_dbgroup.update().where(db_dbgroup.c.type_string.is_(None)).values(type_string='core')) + op.execute(db_dbgroup.update().where(db_dbgroup.c.uuid.is_(None)).values(uuid=get_new_uuid())) + + db_dblog = sa.sql.table( + 'db_dblog', + sa.Column('levelname', sa.String(255)), + sa.Column('loggername', sa.String(255)), + sa.Column('message', sa.Text), + sa.Column('metadata', JSONB), + sa.Column('time', sa.DateTime(timezone=True)), + sa.Column('uuid', UUID(as_uuid=True)), + ) + + op.execute(db_dblog.update().where(db_dblog.c.levelname.is_(None)).values(levelname='')) + op.execute(db_dblog.update().values(levelname=db_dblog.c.levelname.cast(sa.String(50)))) + op.execute(db_dblog.update().where(db_dblog.c.loggername.is_(None)).values(loggername='')) + op.execute(db_dblog.update().where(db_dblog.c.message.is_(None)).values(message='')) + op.execute(db_dblog.update().where(db_dblog.c.metadata.is_(None)).values(metadata={})) + op.execute(db_dblog.update().where(db_dblog.c.metadata == JSONB.NULL).values(metadata={})) + op.execute(db_dblog.update().where(db_dblog.c.time.is_(None)).values(time=timezone.now())) + op.execute(db_dblog.update().where(db_dblog.c.uuid.is_(None)).values(uuid=get_new_uuid())) + + db_dbnode = sa.sql.table( + 'db_dbnode', + sa.Column('ctime', sa.DateTime(timezone=True)), + sa.Column('description', sa.Text), + sa.Column('label', sa.String(255)), + sa.Column('mtime', sa.DateTime(timezone=True)), + sa.Column('node_type', sa.String(255)), + sa.Column('uuid', UUID(as_uuid=True)), + ) + + op.execute(db_dbnode.update().where(db_dbnode.c.ctime.is_(None)).values(ctime=timezone.now())) + op.execute(db_dbnode.update().where(db_dbnode.c.description.is_(None)).values(description='')) + op.execute(db_dbnode.update().where(db_dbnode.c.label.is_(None)).values(label='')) + op.execute(db_dbnode.update().where(db_dbnode.c.mtime.is_(None)).values(mtime=timezone.now())) + op.execute(db_dbnode.update().where(db_dbnode.c.uuid.is_(None)).values(uuid=get_new_uuid())) + + db_dbsetting = sa.sql.table( + 'db_dbsetting', + sa.Column('time', sa.DateTime(timezone=True)), + ) + + op.execute(db_dbsetting.update().where(db_dbsetting.c.time.is_(None)).values(time=timezone.now())) + + db_dbuser = sa.sql.table( + 'db_dbuser', + sa.Column('email', sa.String(254)), + sa.Column('first_name', sa.String(254)), + sa.Column('last_name', sa.String(254)), + sa.Column('institution', sa.String(254)), + ) + + op.execute(db_dbuser.update().where(db_dbuser.c.email.is_(None)).values(email=get_new_uuid())) + op.execute(db_dbuser.update().where(db_dbuser.c.first_name.is_(None)).values(first_name='')) + op.execute(db_dbuser.update().where(db_dbuser.c.last_name.is_(None)).values(last_name='')) + op.execute(db_dbuser.update().where(db_dbuser.c.institution.is_(None)).values(institution='')) + + +def downgrade(): + """Downgrade database schema.""" + # No need to convert the values back to null diff --git a/aiida/backends/sqlalchemy/migrations/versions/1de112340b17_django_parity_2.py b/aiida/backends/sqlalchemy/migrations/versions/1de112340b17_django_parity_2.py new file mode 100644 index 0000000000..84c1da2286 --- /dev/null +++ b/aiida/backends/sqlalchemy/migrations/versions/1de112340b17_django_parity_2.py @@ -0,0 +1,147 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +# pylint: disable=invalid-name,no-member +"""Parity with Django backend (rev: 0048), +part 2: Alter columns to be non-nullable and change type of some columns. + +Revision ID: 1de112340b17 +Revises: 1de112340b16 +Create Date: 2021-08-25 04:28:52.102767 + +""" +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import JSONB, UUID + +# revision identifiers, used by Alembic. +revision = '1de112340b17' +down_revision = '1de112340b16' +branch_labels = None +depends_on = None + + +def upgrade(): + """Upgrade database schema.""" + op.alter_column('db_dbauthinfo', 'aiidauser_id', existing_type=sa.INTEGER(), nullable=False) + op.alter_column('db_dbauthinfo', 'dbcomputer_id', existing_type=sa.INTEGER(), nullable=False) + op.alter_column('db_dbauthinfo', 'metadata', existing_type=JSONB, nullable=False) + op.alter_column('db_dbauthinfo', 'auth_params', existing_type=JSONB, nullable=False) + op.alter_column('db_dbauthinfo', 'enabled', existing_type=sa.BOOLEAN(), nullable=False) + + op.alter_column('db_dbcomment', 'dbnode_id', existing_type=sa.INTEGER(), nullable=False) + op.alter_column('db_dbcomment', 'user_id', existing_type=sa.INTEGER(), nullable=False) + op.alter_column('db_dbcomment', 'content', existing_type=sa.TEXT(), nullable=False) + op.alter_column('db_dbcomment', 'ctime', existing_type=sa.DateTime(timezone=True), nullable=False) + op.alter_column('db_dbcomment', 'mtime', existing_type=sa.DateTime(timezone=True), nullable=False) + op.alter_column('db_dbcomment', 'uuid', existing_type=UUID(as_uuid=True), nullable=False) + + op.alter_column('db_dbcomputer', 'description', existing_type=sa.TEXT(), nullable=False) + op.alter_column('db_dbcomputer', 'hostname', existing_type=sa.String(255), nullable=False) + op.alter_column('db_dbcomputer', 'metadata', existing_type=JSONB, nullable=False) + op.alter_column('db_dbcomputer', 'scheduler_type', existing_type=sa.String(255), nullable=False) + op.alter_column('db_dbcomputer', 'transport_type', existing_type=sa.String(255), nullable=False) + op.alter_column('db_dbcomputer', 'uuid', existing_type=UUID(as_uuid=True), nullable=False) + + op.alter_column('db_dbgroup', 'user_id', existing_type=sa.INTEGER(), nullable=False) + op.alter_column('db_dbgroup', 'description', existing_type=sa.TEXT(), nullable=False) + op.alter_column('db_dbgroup', 'label', existing_type=sa.String(255), nullable=False) + op.alter_column('db_dbgroup', 'time', existing_type=sa.DateTime(timezone=True), nullable=False) + op.alter_column('db_dbgroup', 'type_string', existing_type=sa.String(255), nullable=False) + op.alter_column('db_dbgroup', 'uuid', existing_type=UUID(as_uuid=True), nullable=False) + + op.alter_column('db_dbgroup_dbnodes', 'dbnode_id', existing_type=sa.INTEGER(), nullable=False) + op.alter_column('db_dbgroup_dbnodes', 'dbgroup_id', existing_type=sa.INTEGER(), nullable=False) + + op.alter_column('db_dblink', 'type', existing_type=sa.String(255), nullable=False) + op.alter_column('db_dblink', 'input_id', existing_type=sa.INTEGER(), nullable=False) + op.alter_column('db_dblink', 'output_id', existing_type=sa.INTEGER(), nullable=False) + + op.alter_column('db_dblog', 'levelname', existing_type=sa.String(255), type_=sa.String(50), nullable=False) + op.alter_column('db_dblog', 'loggername', existing_type=sa.String(255), nullable=False) + op.alter_column('db_dblog', 'message', existing_type=sa.TEXT(), nullable=False) + op.alter_column('db_dblog', 'time', existing_type=sa.DateTime(timezone=True), nullable=False) + op.alter_column('db_dblog', 'uuid', existing_type=UUID(as_uuid=True), nullable=False) + op.alter_column('db_dblog', 'metadata', existing_type=JSONB, nullable=False) + + op.alter_column('db_dbnode', 'ctime', existing_type=sa.DateTime(timezone=True), nullable=False) + op.alter_column('db_dbnode', 'description', existing_type=sa.TEXT(), nullable=False) + op.alter_column('db_dbnode', 'label', existing_type=sa.String(255), nullable=False) + op.alter_column('db_dbnode', 'mtime', existing_type=sa.DateTime(timezone=True), nullable=False) + op.alter_column('db_dbnode', 'node_type', existing_type=sa.String(255), nullable=False) + op.alter_column('db_dbnode', 'uuid', existing_type=UUID(as_uuid=True), nullable=False) + + op.alter_column('db_dbsetting', 'time', existing_type=sa.DateTime(timezone=True), nullable=False) + op.alter_column('db_dbsetting', 'key', existing_type=sa.String(255), type_=sa.String(1024), nullable=False) + op.alter_column('db_dbsetting', 'description', existing_type=sa.String(255), type_=sa.Text(), nullable=False) + + op.alter_column('db_dbuser', 'email', existing_type=sa.String(254), nullable=False) + op.alter_column('db_dbuser', 'first_name', existing_type=sa.String(254), nullable=False) + op.alter_column('db_dbuser', 'last_name', existing_type=sa.String(254), nullable=False) + op.alter_column('db_dbuser', 'institution', existing_type=sa.String(254), nullable=False) + + +def downgrade(): + """Downgrade database schema.""" + op.alter_column('db_dbuser', 'institution', existing_type=sa.String(254), nullable=True) + op.alter_column('db_dbuser', 'last_name', existing_type=sa.String(254), nullable=True) + op.alter_column('db_dbuser', 'first_name', existing_type=sa.String(254), nullable=True) + op.alter_column('db_dbuser', 'email', existing_type=sa.String(254), nullable=True) + + op.alter_column('db_dbsetting', 'time', existing_type=sa.DateTime(timezone=True), nullable=True) + op.alter_column('db_dbsetting', 'key', existing_type=sa.String(1024), type_=sa.String(255), nullable=False) + op.alter_column('db_dbsetting', 'description', existing_type=sa.Text(), type_=sa.String(255), nullable=False) + + op.alter_column('db_dbnode', 'ctime', existing_type=sa.DateTime(timezone=True), nullable=True) + op.alter_column('db_dbnode', 'description', existing_type=sa.TEXT(), nullable=True) + op.alter_column('db_dbnode', 'label', existing_type=sa.String(255), nullable=True) + op.alter_column('db_dbnode', 'mtime', existing_type=sa.DateTime(timezone=True), nullable=True) + op.alter_column('db_dbnode', 'node_type', existing_type=sa.String(255), nullable=True) + op.alter_column('db_dbnode', 'uuid', existing_type=UUID(as_uuid=True), nullable=True) + + op.alter_column('db_dblog', 'metadata', existing_type=JSONB, nullable=True) + op.alter_column('db_dblog', 'message', existing_type=sa.TEXT(), nullable=True) + op.alter_column('db_dblog', 'levelname', existing_type=sa.String(50), type_=sa.String(255), nullable=True) + op.alter_column('db_dblog', 'loggername', existing_type=sa.String(255), nullable=True) + op.alter_column('db_dblog', 'time', existing_type=sa.DateTime(timezone=True), nullable=True) + op.alter_column('db_dblog', 'uuid', existing_type=UUID(as_uuid=True), nullable=True) + + op.alter_column('db_dblink', 'output_id', existing_type=sa.INTEGER(), nullable=True) + op.alter_column('db_dblink', 'input_id', existing_type=sa.INTEGER(), nullable=True) + op.alter_column('db_dblink', 'type', existing_type=sa.String(255), nullable=True) + + op.alter_column('db_dbgroup_dbnodes', 'dbgroup_id', existing_type=sa.INTEGER(), nullable=True) + op.alter_column('db_dbgroup_dbnodes', 'dbnode_id', existing_type=sa.INTEGER(), nullable=True) + + op.alter_column('db_dbgroup', 'user_id', existing_type=sa.INTEGER(), nullable=True) + op.alter_column('db_dbgroup', 'description', existing_type=sa.TEXT(), nullable=True) + op.alter_column('db_dbgroup', 'time', existing_type=sa.DateTime(timezone=True), nullable=True) + op.alter_column('db_dbgroup', 'type_string', existing_type=sa.String(255), nullable=True) + op.alter_column('db_dbgroup', 'label', existing_type=sa.String(255), nullable=True) + op.alter_column('db_dbgroup', 'uuid', existing_type=UUID(as_uuid=True), nullable=True) + + op.alter_column('db_dbcomputer', 'metadata', existing_type=JSONB, nullable=True) + op.alter_column('db_dbcomputer', 'transport_type', existing_type=sa.String(255), nullable=True) + op.alter_column('db_dbcomputer', 'scheduler_type', existing_type=sa.String(255), nullable=True) + op.alter_column('db_dbcomputer', 'description', existing_type=sa.TEXT(), nullable=True) + op.alter_column('db_dbcomputer', 'hostname', existing_type=sa.String(255), nullable=True) + op.alter_column('db_dbcomputer', 'uuid', existing_type=UUID(as_uuid=True), nullable=True) + + op.alter_column('db_dbcomment', 'user_id', existing_type=sa.INTEGER(), nullable=True) + op.alter_column('db_dbcomment', 'dbnode_id', existing_type=sa.INTEGER(), nullable=True) + op.alter_column('db_dbcomment', 'content', existing_type=sa.TEXT(), nullable=True) + op.alter_column('db_dbcomment', 'ctime', existing_type=sa.DateTime(timezone=True), nullable=True) + op.alter_column('db_dbcomment', 'mtime', existing_type=sa.DateTime(timezone=True), nullable=True) + op.alter_column('db_dbcomment', 'uuid', existing_type=UUID(as_uuid=True), nullable=True) + + op.alter_column('db_dbauthinfo', 'dbcomputer_id', existing_type=sa.INTEGER(), nullable=True) + op.alter_column('db_dbauthinfo', 'aiidauser_id', existing_type=sa.INTEGER(), nullable=True) + op.alter_column('db_dbauthinfo', 'enabled', existing_type=sa.BOOLEAN(), nullable=True) + op.alter_column('db_dbauthinfo', 'auth_params', existing_type=JSONB, nullable=True) + op.alter_column('db_dbauthinfo', 'metadata', existing_type=JSONB, nullable=True) diff --git a/aiida/backends/sqlalchemy/migrations/versions/1de112340b18_django_parity_3.py b/aiida/backends/sqlalchemy/migrations/versions/1de112340b18_django_parity_3.py new file mode 100644 index 0000000000..b6caf6531f --- /dev/null +++ b/aiida/backends/sqlalchemy/migrations/versions/1de112340b18_django_parity_3.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +# pylint: disable=invalid-name,no-member +"""Parity with Django backend (rev: 0048), +part 3: Add PostgreSQL-specific indexes + +Revision ID: 1de112340b18 +Revises: 1de112340b17 +Create Date: 2021-08-25 04:28:52.102767 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = '1de112340b18' +down_revision = '1de112340b17' +branch_labels = None +depends_on = None + +# table name, column name, index name +MISSING_STANDARD_INDEXES = ( + ('db_dbauthinfo', ('aiidauser_id',), False, 'db_dbauthinfo_aiidauser_id_0684fdfb'), + ('db_dbauthinfo', ('dbcomputer_id',), False, 'db_dbauthinfo_dbcomputer_id_424f7ac4'), + ('db_dbcomment', ('dbnode_id',), False, 'db_dbcomment_dbnode_id_3b812b6b'), + ('db_dbcomment', ('user_id',), False, 'db_dbcomment_user_id_8ed5e360'), + ('db_dbgroup', ('user_id',), False, 'db_dbgroup_user_id_100f8a51'), + ('db_dblog', ('dbnode_id',), False, 'db_dblog_dbnode_id_da34b732'), + ('db_dbnode', ('ctime',), False, 'db_dbnode_ctime_71626ef5'), + ('db_dbnode', ('mtime',), False, 'db_dbnode_mtime_0554ea3d'), + ('db_dbnode', ('dbcomputer_id',), False, 'db_dbnode_dbcomputer_id_315372a3'), + ('db_dbnode', ('user_id',), False, 'db_dbnode_user_id_12e7aeaf'), +) + +# table name, column name, index name +MISSING_VARCHAR_INDEXES = ( + ('db_dbcomputer', 'label', 'db_dbcomputer_label_bc480bab_like'), + ('db_dbgroup', 'label', 'db_dbgroup_name_66c75272_like'), + ('db_dbgroup', 'type_string', 'db_dbgroup_type_23b2a748_like'), + ('db_dblink', 'label', 'db_dblink_label_f1343cfb_like'), + ('db_dblink', 'type', 'db_dblink_type_229f212b_like'), + ('db_dblog', 'levelname', 'db_dblog_levelname_ad5dc346_like'), + ('db_dblog', 'loggername', 'db_dblog_loggername_00b5ba16_like'), + ('db_dbnode', 'label', 'db_dbnode_label_6469539e_like'), + ('db_dbnode', 'node_type', 'db_dbnode_type_a8ce9753_like'), + ('db_dbnode', 'process_type', 'db_dbnode_process_type_df7298d0_like'), + ('db_dbsetting', 'key', 'db_dbsetting_key_1b84beb4_like'), + ('db_dbuser', 'email', 'db_dbuser_email_30150b7e_like'), +) + +# table name, column names, constraint name +DROP_UNIQUE_CONSTRAINTS = ( + ('db_dbauthinfo', ('aiidauser_id', 'dbcomputer_id'), 'db_dbauthinfo_aiidauser_id_dbcomputer_id_key'), + ('db_dbcomment', ('uuid',), 'db_dbcomment_uuid_key'), + ('db_dbcomputer', ('label',), 'db_dbcomputer_label_key'), + ('db_dbcomputer', ('uuid',), 'db_dbcomputer_uuid_key'), + ('db_dbgroup', ('label', 'type_string'), 'db_dbgroup_label_type_string_key'), + ('db_dbgroup_dbnodes', ('dbgroup_id', 'dbnode_id'), 'db_dbgroup_dbnodes_dbgroup_id_dbnode_id_key'), + ('db_dbgroup', ('uuid',), 'db_dbgroup_uuid_key'), + ('db_dblog', ('uuid',), 'db_dblog_uuid_key'), + ('db_dbnode', ('uuid',), 'db_dbnode_uuid_key'), + ('db_dbsetting', ('key',), 'db_dbsetting_key_key'), +) + +# table name, column names, constraint name +ADD_UNIQUE_CONSTRAINTS = ( + ('db_dbauthinfo', ('aiidauser_id', 'dbcomputer_id'), 'db_dbauthinfo_aiidauser_id_dbcomputer_id_777cdaa8_uniq'), + ('db_dbcomment', ('uuid',), 'db_dbcomment_uuid_49bac08c_uniq'), + ('db_dbcomputer', ('label',), 'db_dbcomputer_label_bc480bab_uniq'), + ('db_dbcomputer', ('uuid',), 'db_dbcomputer_uuid_f35defa6_uniq'), + ('db_dbgroup', ('label', 'type_string'), 'db_dbgroup_name_type_12656f33_uniq'), + ('db_dbgroup', ('uuid',), 'db_dbgroup_uuid_af896177_uniq'), + ('db_dbgroup_dbnodes', ('dbgroup_id', 'dbnode_id'), 'db_dbgroup_dbnodes_dbgroup_id_dbnode_id_eee23cce_uniq'), + ('db_dblog', ('uuid',), 'db_dblog_uuid_9cf77df3_uniq'), + ('db_dbnode', ('uuid',), 'db_dbnode_uuid_62e0bf98_uniq'), + ('db_dbuser', ('email',), 'db_dbuser_email_30150b7e_uniq'), + ('db_dbsetting', ('key',), 'db_dbsetting_key_1b84beb4_uniq'), +) + +# table name, column names, unique, old name, new name +RENAMED_INDEXES = ( + ('db_dbgroup', ('label',), False, 'ix_db_dbgroup_label', 'db_dbgroup_name_66c75272'), + ('db_dbgroup', ('type_string',), False, 'ix_db_dbgroup_type_string', 'db_dbgroup_type_23b2a748'), + ( + 'db_dbgroup_dbnodes', ('dbgroup_id',), False, 'db_dbgroup_dbnodes_dbgroup_id_idx', + 'db_dbgroup_dbnodes_dbgroup_id_9d3a0f9d' + ), + ( + 'db_dbgroup_dbnodes', ('dbnode_id',), False, 'db_dbgroup_dbnodes_dbnode_id_idx', + 'db_dbgroup_dbnodes_dbnode_id_118b9439' + ), + ('db_dblink', ('input_id',), False, 'ix_db_dblink_input_id', 'db_dblink_input_id_9245bd73'), + ('db_dblink', ('label',), False, 'ix_db_dblink_label', 'db_dblink_label_f1343cfb'), + ('db_dblink', ('output_id',), False, 'ix_db_dblink_output_id', 'db_dblink_output_id_c0167528'), + ('db_dblink', ('type',), False, 'ix_db_dblink_type', 'db_dblink_type_229f212b'), + ('db_dblog', ('levelname',), False, 'ix_db_dblog_levelname', 'db_dblog_levelname_ad5dc346'), + ('db_dblog', ('loggername',), False, 'ix_db_dblog_loggername', 'db_dblog_loggername_00b5ba16'), + ('db_dbnode', ('label',), False, 'ix_db_dbnode_label', 'db_dbnode_label_6469539e'), + ('db_dbnode', ('node_type',), False, 'ix_db_dbnode_node_type', 'db_dbnode_type_a8ce9753'), + ('db_dbnode', ('process_type',), False, 'ix_db_dbnode_process_type', 'db_dbnode_process_type_df7298d0'), +) + +# table name, column names, unique, name +DROP_INDEXES = ( + ('db_dbsetting', ('key',), True, 'ix_db_dbsetting_key'), + ('db_dbuser', ('email',), True, 'ix_db_dbuser_email'), +) + + +def upgrade(): + """Add indexes.""" + # drop unique constraints + for tbl_name, _, con_name in DROP_UNIQUE_CONSTRAINTS: + op.drop_constraint( + con_name, + tbl_name, + ) + # drop indexes + for tbl_name, _, _, con_name in DROP_INDEXES: + op.drop_index( + con_name, + table_name=tbl_name, + ) + # Add missing standard indexes + for tbl_name, col_names, unique, key_name in MISSING_STANDARD_INDEXES: + op.create_index( + key_name, + table_name=tbl_name, + columns=col_names, + unique=unique, + ) + + # Add missing PostgreSQL-specific indexes for strings + # these improve perform for filtering on string regexes + for tbl_name, col_name, key_name in MISSING_VARCHAR_INDEXES: + op.create_index( + key_name, + tbl_name, + [col_name], + unique=False, + postgresql_using='btree', + postgresql_ops={col_name: 'varchar_pattern_ops'}, + ) + # rename indexes + for tbl_name, columns, unique, old_col_name, new_col_name in RENAMED_INDEXES: + op.drop_index( + old_col_name, + table_name=tbl_name, + ) + op.create_index( + new_col_name, + tbl_name, + columns, + unique=unique, + ) + # add unique constraints + for tbl_name, columns, con_name in ADD_UNIQUE_CONSTRAINTS: + op.create_unique_constraint( + con_name, + tbl_name, + columns, + ) + + +def downgrade(): + """Remove indexes.""" + # drop unique constraints + for tbl_name, _, con_name in ADD_UNIQUE_CONSTRAINTS: + op.drop_constraint( + con_name, + tbl_name, + ) + # Drop missing standard indexes + for tbl_name, _, _, key_name in MISSING_STANDARD_INDEXES: + op.drop_index( + key_name, + table_name=tbl_name, + ) + + # Drop missing postgresql-specific indexes + for tbl_name, col_name, key_name in MISSING_VARCHAR_INDEXES: + op.drop_index( + key_name, + table_name=tbl_name, + postgresql_using='btree', + postgresql_ops={col_name: 'varchar_pattern_ops'}, + ) + # drop renamed indexes + for tbl_name, _, _, _, new_col_name in RENAMED_INDEXES: + op.drop_index( + new_col_name, + table_name=tbl_name, + ) + # add renamed indexes + for tbl_name, columns, unique, old_col_name, _ in RENAMED_INDEXES: + op.create_index( + old_col_name, + tbl_name, + columns, + unique=unique, + ) + # add indexes + for tbl_name, columns, unique, con_name in DROP_INDEXES: + op.create_index( + con_name, + tbl_name, + columns, + unique=unique, + ) + # add unique constraints + for tbl_name, columns, con_name in DROP_UNIQUE_CONSTRAINTS: + op.create_unique_constraint( + con_name, + tbl_name, + columns, + ) diff --git a/aiida/backends/sqlalchemy/migrations/versions/1feaea71bd5a_migrate_repository.py b/aiida/backends/sqlalchemy/migrations/versions/1feaea71bd5a_migrate_repository.py index cc33aac2ca..d7cdfb10fe 100644 --- a/aiida/backends/sqlalchemy/migrations/versions/1feaea71bd5a_migrate_repository.py +++ b/aiida/backends/sqlalchemy/migrations/versions/1feaea71bd5a_migrate_repository.py @@ -111,8 +111,8 @@ def upgrade(): container_id = backend.get_repository().uuid statement = text( f""" - INSERT INTO db_dbsetting (key, val, description) - VALUES ('repository|uuid', to_json('{container_id}'::text), 'Repository UUID') + INSERT INTO db_dbsetting (key, val, description, time) + VALUES ('repository|uuid', to_json('{container_id}'::text), 'Repository UUID', NOW()) ON CONFLICT (key) DO NOTHING; """ ) diff --git a/aiida/backends/sqlalchemy/migrations/versions/7536a82b2cc4_add_node_repository_metadata.py b/aiida/backends/sqlalchemy/migrations/versions/7536a82b2cc4_add_node_repository_metadata.py index 0a2d9148ea..a9dcc55679 100644 --- a/aiida/backends/sqlalchemy/migrations/versions/7536a82b2cc4_add_node_repository_metadata.py +++ b/aiida/backends/sqlalchemy/migrations/versions/7536a82b2cc4_add_node_repository_metadata.py @@ -30,10 +30,7 @@ def upgrade(): """Migrations for the upgrade.""" # We add the column with a `server_default` because otherwise the migration would fail since existing rows will not # have a value and violate the not-nullable clause. - op.add_column( - 'db_dbnode', - sa.Column('repository_metadata', postgresql.JSONB(astext_type=sa.Text()), nullable=False, server_default='{}') - ) + op.add_column('db_dbnode', sa.Column('repository_metadata', postgresql.JSONB(astext_type=sa.Text()), nullable=True)) def downgrade(): diff --git a/aiida/backends/sqlalchemy/models/authinfo.py b/aiida/backends/sqlalchemy/models/authinfo.py index 3aba011b8b..b8c7944235 100644 --- a/aiida/backends/sqlalchemy/models/authinfo.py +++ b/aiida/backends/sqlalchemy/models/authinfo.py @@ -14,6 +14,7 @@ from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.orm import relationship from sqlalchemy.schema import Column, UniqueConstraint +from sqlalchemy.sql.schema import Index from sqlalchemy.types import Boolean, Integer from .base import Base @@ -31,21 +32,32 @@ class DbAuthInfo(Base): id = Column(Integer, primary_key=True) # pylint: disable=invalid-name aiidauser_id = Column( - Integer, ForeignKey('db_dbuser.id', ondelete='CASCADE', deferrable=True, initially='DEFERRED') + Integer, + ForeignKey('db_dbuser.id', ondelete='CASCADE', deferrable=True, initially='DEFERRED'), + nullable=False, ) dbcomputer_id = Column( - Integer, ForeignKey('db_dbcomputer.id', ondelete='CASCADE', deferrable=True, initially='DEFERRED') + Integer, + ForeignKey('db_dbcomputer.id', ondelete='CASCADE', deferrable=True, initially='DEFERRED'), + nullable=False ) aiidauser = relationship('DbUser', backref='authinfos') dbcomputer = relationship('DbComputer', backref='authinfos') - _metadata = Column('metadata', JSONB) - auth_params = Column(JSONB) + _metadata = Column('metadata', JSONB, default=dict, nullable=False) + auth_params = Column(JSONB, default=dict, nullable=False) - enabled = Column(Boolean, default=True) + enabled = Column(Boolean, default=True, nullable=False) - __table_args__ = (UniqueConstraint('aiidauser_id', 'dbcomputer_id'),) + __table_args__ = ( + # constraint/index names mirror django's auto-generated ones + UniqueConstraint( + 'aiidauser_id', 'dbcomputer_id', name='db_dbauthinfo_aiidauser_id_dbcomputer_id_777cdaa8_uniq' + ), + Index('db_dbauthinfo_aiidauser_id_0684fdfb', aiidauser_id), + Index('db_dbauthinfo_dbcomputer_id_424f7ac4', dbcomputer_id), + ) def __init__(self, *args, **kwargs): self._metadata = {} diff --git a/aiida/backends/sqlalchemy/models/comment.py b/aiida/backends/sqlalchemy/models/comment.py index 6fd1fa1642..fe555c5315 100644 --- a/aiida/backends/sqlalchemy/models/comment.py +++ b/aiida/backends/sqlalchemy/models/comment.py @@ -14,6 +14,7 @@ from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from sqlalchemy.schema import Column +from sqlalchemy.sql.schema import Index, UniqueConstraint from sqlalchemy.types import DateTime, Integer, Text from aiida.backends.sqlalchemy.models.base import Base @@ -28,18 +29,29 @@ class DbComment(Base): id = Column(Integer, primary_key=True) # pylint: disable=invalid-name - uuid = Column(UUID(as_uuid=True), default=get_new_uuid, unique=True) - dbnode_id = Column(Integer, ForeignKey('db_dbnode.id', ondelete='CASCADE', deferrable=True, initially='DEFERRED')) + uuid = Column(UUID(as_uuid=True), default=get_new_uuid, nullable=False) + dbnode_id = Column( + Integer, ForeignKey('db_dbnode.id', ondelete='CASCADE', deferrable=True, initially='DEFERRED'), nullable=False + ) - ctime = Column(DateTime(timezone=True), default=timezone.now) - mtime = Column(DateTime(timezone=True), default=timezone.now, onupdate=timezone.now) + ctime = Column(DateTime(timezone=True), default=timezone.now, nullable=False) + mtime = Column(DateTime(timezone=True), default=timezone.now, onupdate=timezone.now, nullable=False) - user_id = Column(Integer, ForeignKey('db_dbuser.id', ondelete='CASCADE', deferrable=True, initially='DEFERRED')) - content = Column(Text, nullable=True) + user_id = Column( + Integer, ForeignKey('db_dbuser.id', ondelete='CASCADE', deferrable=True, initially='DEFERRED'), nullable=False + ) + content = Column(Text, default='', nullable=False) dbnode = relationship('DbNode', backref='dbcomments') user = relationship('DbUser') + __table_args__ = ( + # index/constraint names mirror django's auto-generated ones + UniqueConstraint(uuid, name='db_dbcomment_uuid_49bac08c_uniq'), + Index('db_dbcomment_dbnode_id_3b812b6b', dbnode_id), + Index('db_dbcomment_user_id_8ed5e360', user_id), + ) + def __str__(self): return 'DbComment for [{} {}] on {}'.format( self.dbnode.get_simple_name(), self.dbnode.id, diff --git a/aiida/backends/sqlalchemy/models/computer.py b/aiida/backends/sqlalchemy/models/computer.py index e7ad7ea328..6a2d65eba0 100644 --- a/aiida/backends/sqlalchemy/models/computer.py +++ b/aiida/backends/sqlalchemy/models/computer.py @@ -11,6 +11,7 @@ """Module to manage computers for the SQLA backend.""" from sqlalchemy.dialects.postgresql import JSONB, UUID from sqlalchemy.schema import Column +from sqlalchemy.sql.schema import Index, UniqueConstraint from sqlalchemy.types import Integer, String, Text from aiida.backends.sqlalchemy.models.base import Base @@ -31,13 +32,25 @@ class DbComputer(Base): __tablename__ = 'db_dbcomputer' id = Column(Integer, primary_key=True) # pylint: disable=invalid-name - uuid = Column(UUID(as_uuid=True), default=get_new_uuid, unique=True) - label = Column(String(255), unique=True, nullable=False) - hostname = Column(String(255)) - description = Column(Text, nullable=True) - scheduler_type = Column(String(255)) - transport_type = Column(String(255)) - _metadata = Column('metadata', JSONB) + uuid = Column(UUID(as_uuid=True), default=get_new_uuid, nullable=False) + label = Column(String(255), nullable=False) + hostname = Column(String(255), default='', nullable=False) + description = Column(Text, default='', nullable=False) + scheduler_type = Column(String(255), default='', nullable=False) + transport_type = Column(String(255), default='', nullable=False) + _metadata = Column('metadata', JSONB, default=dict, nullable=False) + + __table_args__ = ( + # index names mirror django's auto-generated ones + UniqueConstraint(uuid, name='db_dbcomputer_uuid_f35defa6_uniq'), + UniqueConstraint(label, name='db_dbcomputer_label_bc480bab_uniq'), + Index( + 'db_dbcomputer_label_bc480bab_like', + label, + postgresql_using='btree', + postgresql_ops={'label': 'varchar_pattern_ops'} + ), + ) def __init__(self, *args, **kwargs): """Provide _metadata and description attributes to the class.""" diff --git a/aiida/backends/sqlalchemy/models/group.py b/aiida/backends/sqlalchemy/models/group.py index 6a97c0f8f5..8c50d49744 100644 --- a/aiida/backends/sqlalchemy/models/group.py +++ b/aiida/backends/sqlalchemy/models/group.py @@ -13,7 +13,7 @@ from sqlalchemy import ForeignKey from sqlalchemy.dialects.postgresql import JSONB, UUID from sqlalchemy.orm import backref, relationship -from sqlalchemy.schema import Column, Index, Table, UniqueConstraint +from sqlalchemy.schema import Column, Index, UniqueConstraint from sqlalchemy.types import DateTime, Integer, String, Text from aiida.common import timezone @@ -21,20 +21,23 @@ from .base import Base -table_groups_nodes = Table( # pylint: disable=invalid-name - 'db_dbgroup_dbnodes', - Base.metadata, - Column('id', Integer, primary_key=True), - Column('dbnode_id', Integer, ForeignKey('db_dbnode.id', deferrable=True, initially='DEFERRED')), - Column('dbgroup_id', Integer, ForeignKey('db_dbgroup.id', deferrable=True, initially='DEFERRED')), - UniqueConstraint('dbgroup_id', 'dbnode_id', name='db_dbgroup_dbnodes_dbgroup_id_dbnode_id_key'), -) - class DbGroupNode(Base): """Database model to store group-to-nodes relations.""" - __tablename__ = table_groups_nodes.name - __table__ = table_groups_nodes + __tablename__ = 'db_dbgroup_dbnodes' + + id = Column(Integer, primary_key=True) + dbnode_id = Column(Integer, ForeignKey('db_dbnode.id', deferrable=True, initially='DEFERRED'), nullable=False) + dbgroup_id = Column(Integer, ForeignKey('db_dbgroup.id', deferrable=True, initially='DEFERRED'), nullable=False) + + __table_args__ = ( + UniqueConstraint('dbgroup_id', 'dbnode_id', name='db_dbgroup_dbnodes_dbgroup_id_dbnode_id_eee23cce_uniq'), + Index('db_dbgroup_dbnodes_dbgroup_id_9d3a0f9d', dbgroup_id), + Index('db_dbgroup_dbnodes_dbnode_id_118b9439', dbnode_id), + ) + + +table_groups_nodes = DbGroupNode.__table__ class DbGroup(Base): @@ -50,25 +53,45 @@ class DbGroup(Base): id = Column(Integer, primary_key=True) # pylint: disable=invalid-name - uuid = Column(UUID(as_uuid=True), default=get_new_uuid, unique=True) - label = Column(String(255), index=True) + uuid = Column(UUID(as_uuid=True), default=get_new_uuid, nullable=False) + label = Column(String(255), nullable=False) - type_string = Column(String(255), default='', index=True) + type_string = Column(String(255), default='', nullable=False) - time = Column(DateTime(timezone=True), default=timezone.now) - description = Column(Text, nullable=True) + time = Column(DateTime(timezone=True), default=timezone.now, nullable=False) + description = Column(Text, default='', nullable=False) extras = Column(JSONB, default=dict, nullable=False) - user_id = Column(Integer, ForeignKey('db_dbuser.id', ondelete='CASCADE', deferrable=True, initially='DEFERRED')) + user_id = Column( + Integer, + ForeignKey('db_dbuser.id', ondelete='CASCADE', deferrable=True, initially='DEFERRED'), + nullable=False, + ) user = relationship('DbUser', backref=backref('dbgroups', cascade='merge')) dbnodes = relationship('DbNode', secondary=table_groups_nodes, backref='dbgroups', lazy='dynamic') - __table_args__ = (UniqueConstraint('label', 'type_string'),) - - Index('db_dbgroup_dbnodes_dbnode_id_idx', table_groups_nodes.c.dbnode_id) - Index('db_dbgroup_dbnodes_dbgroup_id_idx', table_groups_nodes.c.dbgroup_id) + __table_args__ = ( + # index/constrinat names mirror django's auto-generated ones + UniqueConstraint('label', 'type_string', name='db_dbgroup_name_type_12656f33_uniq'), + UniqueConstraint(uuid, name='db_dbgroup_uuid_af896177_uniq'), + Index('db_dbgroup_name_66c75272', label), + Index('db_dbgroup_type_23b2a748', type_string), + Index('db_dbgroup_user_id_100f8a51', user_id), + Index( + 'db_dbgroup_name_66c75272_like', + label, + postgresql_using='btree', + postgresql_ops={'label': 'varchar_pattern_ops'} + ), + Index( + 'db_dbgroup_type_23b2a748_like', + type_string, + postgresql_using='btree', + postgresql_ops={'type_string': 'varchar_pattern_ops'} + ), + ) @property def pk(self): diff --git a/aiida/backends/sqlalchemy/models/log.py b/aiida/backends/sqlalchemy/models/log.py index 502f578d5d..e60b52fa99 100644 --- a/aiida/backends/sqlalchemy/models/log.py +++ b/aiida/backends/sqlalchemy/models/log.py @@ -14,6 +14,7 @@ from sqlalchemy.dialects.postgresql import JSONB, UUID from sqlalchemy.orm import backref, relationship from sqlalchemy.schema import Column +from sqlalchemy.sql.schema import Index, UniqueConstraint from sqlalchemy.types import DateTime, Integer, String, Text from aiida.backends.sqlalchemy.models.base import Base @@ -26,18 +27,38 @@ class DbLog(Base): __tablename__ = 'db_dblog' id = Column(Integer, primary_key=True) # pylint: disable=invalid-name - uuid = Column(UUID(as_uuid=True), default=get_new_uuid, unique=True) - time = Column(DateTime(timezone=True), default=timezone.now) - loggername = Column(String(255), index=True, doc='What process recorded the message') - levelname = Column(String(255), index=True, doc='How critical the message is') + uuid = Column(UUID(as_uuid=True), default=get_new_uuid, nullable=False) + time = Column(DateTime(timezone=True), default=timezone.now, nullable=False) + loggername = Column(String(255), nullable=False, doc='What process recorded the message') + levelname = Column(String(50), nullable=False, doc='How critical the message is') dbnode_id = Column( Integer, ForeignKey('db_dbnode.id', deferrable=True, initially='DEFERRED', ondelete='CASCADE'), nullable=False ) - message = Column(Text(), nullable=True) - _metadata = Column('metadata', JSONB) + message = Column(Text(), default='', nullable=False) + _metadata = Column('metadata', JSONB, default=dict, nullable=False) dbnode = relationship('DbNode', backref=backref('dblogs', passive_deletes='all', cascade='merge')) + __table_args__ = ( + # index/constrain names mirror django's auto-generated ones + UniqueConstraint(uuid, name='db_dblog_uuid_9cf77df3_uniq'), + Index('db_dblog_loggername_00b5ba16', loggername), + Index('db_dblog_levelname_ad5dc346', levelname), + Index('db_dblog_dbnode_id_da34b732', dbnode_id), + Index( + 'db_dblog_loggername_00b5ba16_like', + loggername, + postgresql_using='btree', + postgresql_ops={'loggername': 'varchar_pattern_ops'} + ), + Index( + 'db_dblog_levelname_ad5dc346_like', + levelname, + postgresql_using='btree', + postgresql_ops={'levelname': 'varchar_pattern_ops'} + ), + ) + def __str__(self): return f'DbLog: {self.levelname} for node {self.dbnode.id}: {self.message}' diff --git a/aiida/backends/sqlalchemy/models/node.py b/aiida/backends/sqlalchemy/models/node.py index 7535927692..410acefa52 100644 --- a/aiida/backends/sqlalchemy/models/node.py +++ b/aiida/backends/sqlalchemy/models/node.py @@ -10,13 +10,14 @@ # pylint: disable=import-error,no-name-in-module """Module to manage nodes for the SQLA backend.""" -from sqlalchemy import ForeignKey +from sqlalchemy import ForeignKey, text # Specific to PGSQL. If needed to be agnostic # http://docs.sqlalchemy.org/en/rel_0_9/core/custom_types.html?highlight=guid#backend-agnostic-guid-type # Or maybe rely on sqlalchemy-utils UUID type from sqlalchemy.dialects.postgresql import JSONB, UUID from sqlalchemy.orm import backref, relationship from sqlalchemy.schema import Column +from sqlalchemy.sql.schema import Index, UniqueConstraint from sqlalchemy.types import DateTime, Integer, String, Text from aiida.backends.sqlalchemy.models.base import Base @@ -43,18 +44,16 @@ class DbNode(Base): __tablename__ = 'db_dbnode' id = Column(Integer, primary_key=True) # pylint: disable=invalid-name - uuid = Column(UUID(as_uuid=True), default=get_new_uuid, unique=True) - node_type = Column(String(255), index=True) - process_type = Column(String(255), index=True) - label = Column( - String(255), index=True, nullable=True, default='' - ) # Does it make sense to be nullable and have a default? - description = Column(Text(), nullable=True, default='') - ctime = Column(DateTime(timezone=True), default=timezone.now) - mtime = Column(DateTime(timezone=True), default=timezone.now, onupdate=timezone.now) + uuid = Column(UUID(as_uuid=True), default=get_new_uuid, nullable=False) + node_type = Column(String(255), default='', nullable=False) + process_type = Column(String(255)) + label = Column(String(255), nullable=False, default='') + description = Column(Text(), nullable=False, default='') + ctime = Column(DateTime(timezone=True), default=timezone.now, nullable=False) + mtime = Column(DateTime(timezone=True), default=timezone.now, onupdate=timezone.now, nullable=False) attributes = Column(JSONB) extras = Column(JSONB) - repository_metadata = Column(JSONB, nullable=False, default=dict, server_default='{}') + repository_metadata = Column(JSONB, nullable=True, default=dict) dbcomputer_id = Column( Integer, @@ -94,6 +93,36 @@ class DbNode(Base): passive_deletes=True ) + __table_args__ = ( + # index/constraint names mirror django's auto-generated ones + UniqueConstraint(uuid, name='db_dbnode_uuid_62e0bf98_uniq'), + Index('db_dbnode_label_6469539e', label), + Index('db_dbnode_type_a8ce9753', node_type), + Index('db_dbnode_process_type_df7298d0', process_type), + Index('db_dbnode_ctime_71626ef5', ctime), + Index('db_dbnode_mtime_0554ea3d', mtime), + Index('db_dbnode_dbcomputer_id_315372a3', dbcomputer_id), + Index('db_dbnode_user_id_12e7aeaf', user_id), + Index( + 'db_dbnode_label_6469539e_like', + label, + postgresql_using='btree', + postgresql_ops={'label': 'varchar_pattern_ops'} + ), + Index( + 'db_dbnode_type_a8ce9753_like', + node_type, + postgresql_using='btree', + postgresql_ops={'node_type': 'varchar_pattern_ops'} + ), + Index( + 'db_dbnode_process_type_df7298d0_like', + process_type, + postgresql_using='btree', + postgresql_ops={'process_type': 'varchar_pattern_ops'} + ), + ) + def __init__(self, *args, **kwargs): """Add three additional attributes to the base class: mtime, attributes and extras.""" super().__init__(*args, **kwargs) @@ -171,17 +200,17 @@ class DbLink(Base): __tablename__ = 'db_dblink' id = Column(Integer, primary_key=True) # pylint: disable=invalid-name - input_id = Column(Integer, ForeignKey('db_dbnode.id', deferrable=True, initially='DEFERRED'), index=True) + input_id = Column(Integer, ForeignKey('db_dbnode.id', deferrable=True, initially='DEFERRED'), nullable=False) output_id = Column( - Integer, ForeignKey('db_dbnode.id', ondelete='CASCADE', deferrable=True, initially='DEFERRED'), index=True + Integer, ForeignKey('db_dbnode.id', ondelete='CASCADE', deferrable=True, initially='DEFERRED'), nullable=False ) # https://docs.sqlalchemy.org/en/14/errors.html#relationship-x-will-copy-column-q-to-column-p-which-conflicts-with-relationship-s-y input = relationship('DbNode', primaryjoin='DbLink.input_id == DbNode.id', overlaps='inputs_q,outputs_q') output = relationship('DbNode', primaryjoin='DbLink.output_id == DbNode.id', overlaps='inputs_q,outputs_q') - label = Column(String(255), index=True, nullable=False) - type = Column(String(255), index=True) + label = Column(String(255), nullable=False) + type = Column(String(255), nullable=False) # A calculation can have both a 'return' and a 'create' link to # a single data output node, which would violate the unique constraint @@ -192,6 +221,23 @@ class DbLink(Base): # I cannot add twice the same link # I want unique labels among all inputs of a node # UniqueConstraint('output_id', 'label'), + # index names mirror django's auto-generated ones + Index('db_dblink_input_id_9245bd73', input_id), + Index('db_dblink_output_id_c0167528', output_id), + Index('db_dblink_label_f1343cfb', label), + Index('db_dblink_type_229f212b', type), + Index( + 'db_dblink_label_f1343cfb_like', + label, + postgresql_using='btree', + postgresql_ops={'label': 'varchar_pattern_ops'} + ), + Index( + 'db_dblink_type_229f212b_like', + type, + postgresql_using='btree', + postgresql_ops={'type': 'varchar_pattern_ops'} + ), ) def __str__(self): diff --git a/aiida/backends/sqlalchemy/models/settings.py b/aiida/backends/sqlalchemy/models/settings.py index 2b6e98b22c..349d69f532 100644 --- a/aiida/backends/sqlalchemy/models/settings.py +++ b/aiida/backends/sqlalchemy/models/settings.py @@ -14,7 +14,8 @@ from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.orm.attributes import flag_modified from sqlalchemy.schema import UniqueConstraint -from sqlalchemy.types import DateTime, Integer, String +from sqlalchemy.sql.schema import Index +from sqlalchemy.types import DateTime, Integer, String, Text from aiida.backends import sqlalchemy as sa from aiida.backends.sqlalchemy.models.base import Base @@ -24,15 +25,26 @@ class DbSetting(Base): """Database model to store global settings.""" __tablename__ = 'db_dbsetting' - __table_args__ = (UniqueConstraint('key'),) + id = Column(Integer, primary_key=True) # pylint: disable=invalid-name - key = Column(String(255), index=True, nullable=False) + key = Column(String(1024), nullable=False) val = Column(JSONB, default={}) # I also add a description field for the variables - description = Column(String(255), default='', nullable=False) - time = Column(DateTime(timezone=True), default=UTC, onupdate=timezone.now) + description = Column(Text, default='', nullable=False) + time = Column(DateTime(timezone=True), default=timezone.now, onupdate=timezone.now, nullable=False) + + __table_args__ = ( + # index/constraint names mirror django's auto-generated ones + UniqueConstraint(key, name='db_dbsetting_key_1b84beb4_uniq'), + Index( + 'db_dbsetting_key_1b84beb4_like', + key, + postgresql_using='btree', + postgresql_ops={'key': 'varchar_pattern_ops'} + ), + ) def __str__(self): return f"'{self.key}'={self.getvalue()}" diff --git a/aiida/backends/sqlalchemy/models/user.py b/aiida/backends/sqlalchemy/models/user.py index c651ebf94d..f16037d2c2 100644 --- a/aiida/backends/sqlalchemy/models/user.py +++ b/aiida/backends/sqlalchemy/models/user.py @@ -11,6 +11,7 @@ """Module to manage users for the SQLA backend.""" from sqlalchemy.schema import Column +from sqlalchemy.sql.schema import Index, UniqueConstraint from sqlalchemy.types import Integer, String from aiida.backends.sqlalchemy.models.base import Base @@ -24,10 +25,21 @@ class DbUser(Base): __tablename__ = 'db_dbuser' id = Column(Integer, primary_key=True) # pylint: disable=invalid-name - email = Column(String(254), unique=True, index=True) - first_name = Column(String(254), nullable=True) - last_name = Column(String(254), nullable=True) - institution = Column(String(254), nullable=True) + email = Column(String(254), nullable=False) + first_name = Column(String(254), default='', nullable=False) + last_name = Column(String(254), default='', nullable=False) + institution = Column(String(254), default='', nullable=False) + + __table_args__ = ( + # index/constraint names mirror django's auto-generated ones + UniqueConstraint(email, name='db_dbuser_email_30150b7e_uniq'), + Index( + 'db_dbuser_email_30150b7e_like', + email, + postgresql_using='btree', + postgresql_ops={'email': 'varchar_pattern_ops'} + ), + ) def __init__(self, email, first_name='', last_name='', institution='', **kwargs): """Set additional class attributes with respect to the base class.""" diff --git a/aiida/orm/implementation/django/nodes.py b/aiida/orm/implementation/django/nodes.py index 1b2d180b6c..44c1adc66e 100644 --- a/aiida/orm/implementation/django/nodes.py +++ b/aiida/orm/implementation/django/nodes.py @@ -147,7 +147,7 @@ def description(self, value): @property def repository_metadata(self): - return self._dbmodel.repository_metadata + return self._dbmodel.repository_metadata or {} @repository_metadata.setter def repository_metadata(self, value): diff --git a/aiida/orm/implementation/sqlalchemy/nodes.py b/aiida/orm/implementation/sqlalchemy/nodes.py index 1060b24be1..cb5936ebbc 100644 --- a/aiida/orm/implementation/sqlalchemy/nodes.py +++ b/aiida/orm/implementation/sqlalchemy/nodes.py @@ -148,7 +148,7 @@ def description(self, value): @property def repository_metadata(self): - return self._dbmodel.repository_metadata + return self._dbmodel.repository_metadata or {} @repository_metadata.setter def repository_metadata(self, value): diff --git a/tests/backends/__init__.py b/tests/backends/__init__.py index 2776a55f97..606024c337 100644 --- a/tests/backends/__init__.py +++ b/tests/backends/__init__.py @@ -7,3 +7,4 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### +"""Tests for the backends.""" diff --git a/tests/backends/aiida_sqlalchemy/migrations/test_11_v2_repository.py b/tests/backends/aiida_sqlalchemy/migrations/test_11_v2_repository.py index 0feb1106f2..d2ae9a1cc4 100644 --- a/tests/backends/aiida_sqlalchemy/migrations/test_11_v2_repository.py +++ b/tests/backends/aiida_sqlalchemy/migrations/test_11_v2_repository.py @@ -46,7 +46,7 @@ def test_node_repository_metadata(perform_migrations: Migrator): with perform_migrations.session() as session: node = session.query(DbNode).filter(DbNode.id == node_id).one() assert hasattr(node, 'repository_metadata') - assert node.repository_metadata == {} + assert node.repository_metadata is None def test_entry_point_core_prefix(perform_migrations: Migrator): @@ -204,7 +204,7 @@ def test_repository_migration(perform_migrations: Migrator): # pylint: disable= } } } - assert node_03.repository_metadata == {} + assert node_03.repository_metadata is None assert node_05.repository_metadata == { 'o': { 'input.txt': { diff --git a/tests/backends/aiida_sqlalchemy/migrations/test_12_sqla_django_parity.py b/tests/backends/aiida_sqlalchemy/migrations/test_12_sqla_django_parity.py new file mode 100644 index 0000000000..08da595f20 --- /dev/null +++ b/tests/backends/aiida_sqlalchemy/migrations/test_12_sqla_django_parity.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Tests for migrations to bring parity between SQLAlchemy and Django.""" +# pylint: disable=invalid-name,too-many-locals,too-many-statements +from .conftest import Migrator + + +def test_non_nullable(perform_migrations: Migrator): + """Test making columns non-nullable.""" + # starting revision + perform_migrations.migrate_down('34a831f4286d') + + # setup the database + DbAuthInfo = perform_migrations.get_current_table('db_dbauthinfo') + DbComment = perform_migrations.get_current_table('db_dbcomment') + DbComputer = perform_migrations.get_current_table('db_dbcomputer') + DbGroup = perform_migrations.get_current_table('db_dbgroup') + Dblog = perform_migrations.get_current_table('db_dblog') + DbNode = perform_migrations.get_current_table('db_dbnode') + DbSetting = perform_migrations.get_current_table('db_dbsetting') + DbUser = perform_migrations.get_current_table('db_dbuser') + with perform_migrations.session() as session: + setting = DbSetting(key='test_key', val='test_value', description='', time=None) + session.add(setting) + user = DbUser(email=None, first_name=None, last_name=None, institution=None) + session.add(user) + computer = DbComputer( + label='computer', + hostname=None, + description=None, + metadata=None, + scheduler_type=None, + transport_type=None, + ) + session.add(computer) + session.commit() + setting_id = setting.id + user_id = user.id + computer_id = computer.id + group = DbGroup(label=None, description=None, time=None, type_string=None, extras={}, user_id=user_id) + session.add(group) + session.commit() + group_id = group.id + authinfo = DbAuthInfo( + aiidauser_id=user_id, dbcomputer_id=computer_id, enabled=None, auth_params=None, metadata=None + ) + session.add(authinfo) + session.commit() + authinfo_id = authinfo.id + node = DbNode( + user_id=user_id, + ctime=None, + mtime=None, + description=None, + label=None, + node_type='', + uuid=None, + attributes={}, + extras={} + ) + session.add(node) + session.commit() + node_id = node.id + comment = DbComment(dbnode_id=node_id, user_id=user_id, content=None, ctime=None, mtime=None, uuid=None) + session.add(comment) + session.commit() + comment_id = comment.id + log = Dblog(dbnode_id=node_id, levelname='x' * 100) + session.add(log) + session.commit() + log_id = log.id + + # migrate up + perform_migrations.migrate_up('1de112340b18') + + # perform some checks + DbAuthInfo = perform_migrations.get_current_table('db_dbauthinfo') + DbComment = perform_migrations.get_current_table('db_dbcomment') + DbComputer = perform_migrations.get_current_table('db_dbcomputer') + DbGroup = perform_migrations.get_current_table('db_dbgroup') + Dblog = perform_migrations.get_current_table('db_dblog') + DbNode = perform_migrations.get_current_table('db_dbnode') + DbSetting = perform_migrations.get_current_table('db_dbsetting') + DbUser = perform_migrations.get_current_table('db_dbuser') + with perform_migrations.session() as session: + setting = session.query(DbSetting).filter(DbSetting.id == setting_id).one() + assert setting.time is not None + user = session.query(DbUser).filter(DbUser.id == user_id).one() + assert user.email is not None + assert user.first_name is not None + assert user.last_name is not None + assert user.institution is not None + computer = session.query(DbComputer).filter(DbComputer.id == computer_id).one() + assert computer.hostname is not None + assert computer.description is not None + assert computer.metadata is not None + assert computer.scheduler_type is not None + assert computer.transport_type is not None + assert computer.uuid is not None + group = session.query(DbGroup).filter(DbGroup.id == group_id).one() + assert group.label is not None + assert group.description is not None + assert group.time is not None + assert group.type_string is not None + assert group.uuid is not None + authinfo = session.query(DbAuthInfo).filter(DbAuthInfo.id == authinfo_id).one() + assert authinfo.enabled is not None + assert authinfo.auth_params is not None + assert authinfo.metadata is not None + node = session.query(DbNode).filter(DbNode.id == node_id).one() + assert node.ctime is not None + assert node.mtime is not None + assert node.description is not None + assert node.label is not None + assert node.node_type is not None + assert node.uuid is not None + comment = session.query(DbComment).filter(DbComment.id == comment_id).one() + assert comment.content is not None + assert comment.ctime is not None + assert comment.mtime is not None + assert comment.uuid is not None + log = session.query(Dblog).filter(Dblog.id == log_id).one() + assert log.uuid is not None + assert log.time is not None + assert log.loggername is not None + assert log.levelname == 'x' * 50 + assert log.message is not None + assert log.metadata is not None diff --git a/tests/backends/test_schema_parity.py b/tests/backends/test_schema_parity.py new file mode 100644 index 0000000000..17ddf964a9 --- /dev/null +++ b/tests/backends/test_schema_parity.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Check the schema parity between Django and SQLAlchemy.""" + + +def test_columns(backend, data_regression): + """Test parity of table columns.""" + data = {} + for tbl_name, col_name, data_type, is_nullable, column_default, char_max_length in get_table_fields(backend): + data.setdefault(tbl_name, {})[col_name] = { + 'data_type': data_type, + 'is_nullable': is_nullable, + 'default': column_default, + } + if char_max_length: + data[tbl_name][col_name]['max_length'] = char_max_length + data_regression.check(data) + + +def test_primary_keys(backend, data_regression): + """Test parity of primary key constraints.""" + data = {} + for tbl_name, name, col_names in sorted(get_constraints(backend, 'p')): + data.setdefault(tbl_name, {})[name] = col_names + data_regression.check(data) + + +def test_unique_constraints(backend, data_regression): + """Test parity of unique constraints.""" + data = {} + for tbl_name, name, col_names in sorted(get_constraints(backend, 'u')): + data.setdefault(tbl_name, {})[name] = sorted(col_names) + data_regression.check(data) + + +def test_indexes(backend, data_regression): + """Test parity of indexes.""" + data = {} + for tbl_name, name, definition in sorted(get_indexes(backend)): + data.setdefault(tbl_name, {})[name] = definition + data_regression.check(data) + + +def get_table_fields(backend): + """Get the fields of all AiiDA tables.""" + # see https://www.postgresql.org/docs/9.1/infoschema-columns.html + rows = backend.execute_raw( + 'SELECT table_name,column_name,data_type,is_nullable,column_default,character_maximum_length ' + 'FROM information_schema.columns ' + "WHERE table_schema = 'public' AND table_name LIKE 'db_%';" + ) + rows = [list(row) for row in rows] + for row in rows: + row[3] = row[3].upper() == 'YES' + return rows + + +def get_constraints(backend, ctype): + """Get the constraints of all AiiDA tables, for a particular constraint type.""" + # see https://www.postgresql.org/docs/9.1/catalog-pg-constraint.html + rows = backend.execute_raw( + 'SELECT tbl.relname,c.conname,ARRAY_AGG(a.attname) FROM pg_constraint AS c ' + 'INNER JOIN pg_class AS tbl ON tbl.oid = c.conrelid ' + 'INNER JOIN pg_attribute AS a ON a.attrelid = c.conrelid AND a.attnum = ANY(c.conkey) ' + f"WHERE c.contype='{ctype}' AND tbl.relname LIKE 'db_%' " + 'GROUP BY tbl.relname,c.conname;' + ) + rows = [list(row) for row in rows] + return rows + + +def get_indexes(backend): + """Get the indexes of all AiiDA tables.""" + # see https://www.postgresql.org/docs/9.1/view-pg-indexes.html + rows = backend.execute_raw( + 'SELECT tablename,indexname,indexdef FROM pg_indexes ' + "WHERE tablename LIKE 'db_%' " + 'ORDER BY tablename,indexname;' + ) + rows = [list(row) for row in rows] + return rows diff --git a/tests/backends/test_schema_parity/test_columns.yml b/tests/backends/test_schema_parity/test_columns.yml new file mode 100644 index 0000000000..836cc8dad2 --- /dev/null +++ b/tests/backends/test_schema_parity/test_columns.yml @@ -0,0 +1,300 @@ +db_dbauthinfo: + aiidauser_id: + data_type: integer + default: null + is_nullable: false + auth_params: + data_type: jsonb + default: null + is_nullable: false + dbcomputer_id: + data_type: integer + default: null + is_nullable: false + enabled: + data_type: boolean + default: null + is_nullable: false + id: + data_type: integer + default: nextval('db_dbauthinfo_id_seq'::regclass) + is_nullable: false + metadata: + data_type: jsonb + default: null + is_nullable: false +db_dbcomment: + content: + data_type: text + default: null + is_nullable: false + ctime: + data_type: timestamp with time zone + default: null + is_nullable: false + dbnode_id: + data_type: integer + default: null + is_nullable: false + id: + data_type: integer + default: nextval('db_dbcomment_id_seq'::regclass) + is_nullable: false + mtime: + data_type: timestamp with time zone + default: null + is_nullable: false + user_id: + data_type: integer + default: null + is_nullable: false + uuid: + data_type: uuid + default: null + is_nullable: false +db_dbcomputer: + description: + data_type: text + default: null + is_nullable: false + hostname: + data_type: character varying + default: null + is_nullable: false + max_length: 255 + id: + data_type: integer + default: nextval('db_dbcomputer_id_seq'::regclass) + is_nullable: false + label: + data_type: character varying + default: null + is_nullable: false + max_length: 255 + metadata: + data_type: jsonb + default: null + is_nullable: false + scheduler_type: + data_type: character varying + default: null + is_nullable: false + max_length: 255 + transport_type: + data_type: character varying + default: null + is_nullable: false + max_length: 255 + uuid: + data_type: uuid + default: null + is_nullable: false +db_dbgroup: + description: + data_type: text + default: null + is_nullable: false + extras: + data_type: jsonb + default: null + is_nullable: false + id: + data_type: integer + default: nextval('db_dbgroup_id_seq'::regclass) + is_nullable: false + label: + data_type: character varying + default: null + is_nullable: false + max_length: 255 + time: + data_type: timestamp with time zone + default: null + is_nullable: false + type_string: + data_type: character varying + default: null + is_nullable: false + max_length: 255 + user_id: + data_type: integer + default: null + is_nullable: false + uuid: + data_type: uuid + default: null + is_nullable: false +db_dbgroup_dbnodes: + dbgroup_id: + data_type: integer + default: null + is_nullable: false + dbnode_id: + data_type: integer + default: null + is_nullable: false + id: + data_type: integer + default: nextval('db_dbgroup_dbnodes_id_seq'::regclass) + is_nullable: false +db_dblink: + id: + data_type: integer + default: nextval('db_dblink_id_seq'::regclass) + is_nullable: false + input_id: + data_type: integer + default: null + is_nullable: false + label: + data_type: character varying + default: null + is_nullable: false + max_length: 255 + output_id: + data_type: integer + default: null + is_nullable: false + type: + data_type: character varying + default: null + is_nullable: false + max_length: 255 +db_dblog: + dbnode_id: + data_type: integer + default: null + is_nullable: false + id: + data_type: integer + default: nextval('db_dblog_id_seq'::regclass) + is_nullable: false + levelname: + data_type: character varying + default: null + is_nullable: false + max_length: 50 + loggername: + data_type: character varying + default: null + is_nullable: false + max_length: 255 + message: + data_type: text + default: null + is_nullable: false + metadata: + data_type: jsonb + default: null + is_nullable: false + time: + data_type: timestamp with time zone + default: null + is_nullable: false + uuid: + data_type: uuid + default: null + is_nullable: false +db_dbnode: + attributes: + data_type: jsonb + default: null + is_nullable: true + ctime: + data_type: timestamp with time zone + default: null + is_nullable: false + dbcomputer_id: + data_type: integer + default: null + is_nullable: true + description: + data_type: text + default: null + is_nullable: false + extras: + data_type: jsonb + default: null + is_nullable: true + id: + data_type: integer + default: nextval('db_dbnode_id_seq'::regclass) + is_nullable: false + label: + data_type: character varying + default: null + is_nullable: false + max_length: 255 + mtime: + data_type: timestamp with time zone + default: null + is_nullable: false + node_type: + data_type: character varying + default: null + is_nullable: false + max_length: 255 + process_type: + data_type: character varying + default: null + is_nullable: true + max_length: 255 + repository_metadata: + data_type: jsonb + default: null + is_nullable: true + user_id: + data_type: integer + default: null + is_nullable: false + uuid: + data_type: uuid + default: null + is_nullable: false +db_dbsetting: + description: + data_type: text + default: null + is_nullable: false + id: + data_type: integer + default: nextval('db_dbsetting_id_seq'::regclass) + is_nullable: false + key: + data_type: character varying + default: null + is_nullable: false + max_length: 1024 + time: + data_type: timestamp with time zone + default: null + is_nullable: false + val: + data_type: jsonb + default: null + is_nullable: true +db_dbuser: + email: + data_type: character varying + default: null + is_nullable: false + max_length: 254 + first_name: + data_type: character varying + default: null + is_nullable: false + max_length: 254 + id: + data_type: integer + default: nextval('db_dbuser_id_seq'::regclass) + is_nullable: false + institution: + data_type: character varying + default: null + is_nullable: false + max_length: 254 + last_name: + data_type: character varying + default: null + is_nullable: false + max_length: 254 diff --git a/tests/backends/test_schema_parity/test_indexes.yml b/tests/backends/test_schema_parity/test_indexes.yml new file mode 100644 index 0000000000..afd288ce8b --- /dev/null +++ b/tests/backends/test_schema_parity/test_indexes.yml @@ -0,0 +1,122 @@ +db_dbauthinfo: + db_dbauthinfo_aiidauser_id_0684fdfb: CREATE INDEX db_dbauthinfo_aiidauser_id_0684fdfb + ON public.db_dbauthinfo USING btree (aiidauser_id) + db_dbauthinfo_aiidauser_id_dbcomputer_id_777cdaa8_uniq: CREATE UNIQUE INDEX db_dbauthinfo_aiidauser_id_dbcomputer_id_777cdaa8_uniq + ON public.db_dbauthinfo USING btree (aiidauser_id, dbcomputer_id) + db_dbauthinfo_dbcomputer_id_424f7ac4: CREATE INDEX db_dbauthinfo_dbcomputer_id_424f7ac4 + ON public.db_dbauthinfo USING btree (dbcomputer_id) + db_dbauthinfo_pkey: CREATE UNIQUE INDEX db_dbauthinfo_pkey ON public.db_dbauthinfo + USING btree (id) +db_dbcomment: + db_dbcomment_dbnode_id_3b812b6b: CREATE INDEX db_dbcomment_dbnode_id_3b812b6b ON + public.db_dbcomment USING btree (dbnode_id) + db_dbcomment_pkey: CREATE UNIQUE INDEX db_dbcomment_pkey ON public.db_dbcomment + USING btree (id) + db_dbcomment_user_id_8ed5e360: CREATE INDEX db_dbcomment_user_id_8ed5e360 ON public.db_dbcomment + USING btree (user_id) + db_dbcomment_uuid_49bac08c_uniq: CREATE UNIQUE INDEX db_dbcomment_uuid_49bac08c_uniq + ON public.db_dbcomment USING btree (uuid) +db_dbcomputer: + db_dbcomputer_label_bc480bab_like: CREATE INDEX db_dbcomputer_label_bc480bab_like + ON public.db_dbcomputer USING btree (label varchar_pattern_ops) + db_dbcomputer_label_bc480bab_uniq: CREATE UNIQUE INDEX db_dbcomputer_label_bc480bab_uniq + ON public.db_dbcomputer USING btree (label) + db_dbcomputer_pkey: CREATE UNIQUE INDEX db_dbcomputer_pkey ON public.db_dbcomputer + USING btree (id) + db_dbcomputer_uuid_f35defa6_uniq: CREATE UNIQUE INDEX db_dbcomputer_uuid_f35defa6_uniq + ON public.db_dbcomputer USING btree (uuid) +db_dbgroup: + db_dbgroup_name_66c75272: CREATE INDEX db_dbgroup_name_66c75272 ON public.db_dbgroup + USING btree (label) + db_dbgroup_name_66c75272_like: CREATE INDEX db_dbgroup_name_66c75272_like ON public.db_dbgroup + USING btree (label varchar_pattern_ops) + db_dbgroup_name_type_12656f33_uniq: CREATE UNIQUE INDEX db_dbgroup_name_type_12656f33_uniq + ON public.db_dbgroup USING btree (label, type_string) + db_dbgroup_pkey: CREATE UNIQUE INDEX db_dbgroup_pkey ON public.db_dbgroup USING + btree (id) + db_dbgroup_type_23b2a748: CREATE INDEX db_dbgroup_type_23b2a748 ON public.db_dbgroup + USING btree (type_string) + db_dbgroup_type_23b2a748_like: CREATE INDEX db_dbgroup_type_23b2a748_like ON public.db_dbgroup + USING btree (type_string varchar_pattern_ops) + db_dbgroup_user_id_100f8a51: CREATE INDEX db_dbgroup_user_id_100f8a51 ON public.db_dbgroup + USING btree (user_id) + db_dbgroup_uuid_af896177_uniq: CREATE UNIQUE INDEX db_dbgroup_uuid_af896177_uniq + ON public.db_dbgroup USING btree (uuid) +db_dbgroup_dbnodes: + db_dbgroup_dbnodes_dbgroup_id_9d3a0f9d: CREATE INDEX db_dbgroup_dbnodes_dbgroup_id_9d3a0f9d + ON public.db_dbgroup_dbnodes USING btree (dbgroup_id) + db_dbgroup_dbnodes_dbgroup_id_dbnode_id_eee23cce_uniq: CREATE UNIQUE INDEX db_dbgroup_dbnodes_dbgroup_id_dbnode_id_eee23cce_uniq + ON public.db_dbgroup_dbnodes USING btree (dbgroup_id, dbnode_id) + db_dbgroup_dbnodes_dbnode_id_118b9439: CREATE INDEX db_dbgroup_dbnodes_dbnode_id_118b9439 + ON public.db_dbgroup_dbnodes USING btree (dbnode_id) + db_dbgroup_dbnodes_pkey: CREATE UNIQUE INDEX db_dbgroup_dbnodes_pkey ON public.db_dbgroup_dbnodes + USING btree (id) +db_dblink: + db_dblink_input_id_9245bd73: CREATE INDEX db_dblink_input_id_9245bd73 ON public.db_dblink + USING btree (input_id) + db_dblink_label_f1343cfb: CREATE INDEX db_dblink_label_f1343cfb ON public.db_dblink + USING btree (label) + db_dblink_label_f1343cfb_like: CREATE INDEX db_dblink_label_f1343cfb_like ON public.db_dblink + USING btree (label varchar_pattern_ops) + db_dblink_output_id_c0167528: CREATE INDEX db_dblink_output_id_c0167528 ON public.db_dblink + USING btree (output_id) + db_dblink_pkey: CREATE UNIQUE INDEX db_dblink_pkey ON public.db_dblink USING btree + (id) + db_dblink_type_229f212b: CREATE INDEX db_dblink_type_229f212b ON public.db_dblink + USING btree (type) + db_dblink_type_229f212b_like: CREATE INDEX db_dblink_type_229f212b_like ON public.db_dblink + USING btree (type varchar_pattern_ops) +db_dblog: + db_dblog_dbnode_id_da34b732: CREATE INDEX db_dblog_dbnode_id_da34b732 ON public.db_dblog + USING btree (dbnode_id) + db_dblog_levelname_ad5dc346: CREATE INDEX db_dblog_levelname_ad5dc346 ON public.db_dblog + USING btree (levelname) + db_dblog_levelname_ad5dc346_like: CREATE INDEX db_dblog_levelname_ad5dc346_like + ON public.db_dblog USING btree (levelname varchar_pattern_ops) + db_dblog_loggername_00b5ba16: CREATE INDEX db_dblog_loggername_00b5ba16 ON public.db_dblog + USING btree (loggername) + db_dblog_loggername_00b5ba16_like: CREATE INDEX db_dblog_loggername_00b5ba16_like + ON public.db_dblog USING btree (loggername varchar_pattern_ops) + db_dblog_pkey: CREATE UNIQUE INDEX db_dblog_pkey ON public.db_dblog USING btree + (id) + db_dblog_uuid_9cf77df3_uniq: CREATE UNIQUE INDEX db_dblog_uuid_9cf77df3_uniq ON + public.db_dblog USING btree (uuid) +db_dbnode: + db_dbnode_ctime_71626ef5: CREATE INDEX db_dbnode_ctime_71626ef5 ON public.db_dbnode + USING btree (ctime) + db_dbnode_dbcomputer_id_315372a3: CREATE INDEX db_dbnode_dbcomputer_id_315372a3 + ON public.db_dbnode USING btree (dbcomputer_id) + db_dbnode_label_6469539e: CREATE INDEX db_dbnode_label_6469539e ON public.db_dbnode + USING btree (label) + db_dbnode_label_6469539e_like: CREATE INDEX db_dbnode_label_6469539e_like ON public.db_dbnode + USING btree (label varchar_pattern_ops) + db_dbnode_mtime_0554ea3d: CREATE INDEX db_dbnode_mtime_0554ea3d ON public.db_dbnode + USING btree (mtime) + db_dbnode_pkey: CREATE UNIQUE INDEX db_dbnode_pkey ON public.db_dbnode USING btree + (id) + db_dbnode_process_type_df7298d0: CREATE INDEX db_dbnode_process_type_df7298d0 ON + public.db_dbnode USING btree (process_type) + db_dbnode_process_type_df7298d0_like: CREATE INDEX db_dbnode_process_type_df7298d0_like + ON public.db_dbnode USING btree (process_type varchar_pattern_ops) + db_dbnode_type_a8ce9753: CREATE INDEX db_dbnode_type_a8ce9753 ON public.db_dbnode + USING btree (node_type) + db_dbnode_type_a8ce9753_like: CREATE INDEX db_dbnode_type_a8ce9753_like ON public.db_dbnode + USING btree (node_type varchar_pattern_ops) + db_dbnode_user_id_12e7aeaf: CREATE INDEX db_dbnode_user_id_12e7aeaf ON public.db_dbnode + USING btree (user_id) + db_dbnode_uuid_62e0bf98_uniq: CREATE UNIQUE INDEX db_dbnode_uuid_62e0bf98_uniq ON + public.db_dbnode USING btree (uuid) +db_dbsetting: + db_dbsetting_key_1b84beb4_like: CREATE INDEX db_dbsetting_key_1b84beb4_like ON public.db_dbsetting + USING btree (key varchar_pattern_ops) + db_dbsetting_key_1b84beb4_uniq: CREATE UNIQUE INDEX db_dbsetting_key_1b84beb4_uniq + ON public.db_dbsetting USING btree (key) + db_dbsetting_pkey: CREATE UNIQUE INDEX db_dbsetting_pkey ON public.db_dbsetting + USING btree (id) +db_dbuser: + db_dbuser_email_30150b7e_like: CREATE INDEX db_dbuser_email_30150b7e_like ON public.db_dbuser + USING btree (email varchar_pattern_ops) + db_dbuser_email_30150b7e_uniq: CREATE UNIQUE INDEX db_dbuser_email_30150b7e_uniq + ON public.db_dbuser USING btree (email) + db_dbuser_pkey: CREATE UNIQUE INDEX db_dbuser_pkey ON public.db_dbuser USING btree + (id) diff --git a/tests/backends/test_schema_parity/test_primary_keys.yml b/tests/backends/test_schema_parity/test_primary_keys.yml new file mode 100644 index 0000000000..5b7aa52d60 --- /dev/null +++ b/tests/backends/test_schema_parity/test_primary_keys.yml @@ -0,0 +1,30 @@ +db_dbauthinfo: + db_dbauthinfo_pkey: + - id +db_dbcomment: + db_dbcomment_pkey: + - id +db_dbcomputer: + db_dbcomputer_pkey: + - id +db_dbgroup: + db_dbgroup_pkey: + - id +db_dbgroup_dbnodes: + db_dbgroup_dbnodes_pkey: + - id +db_dblink: + db_dblink_pkey: + - id +db_dblog: + db_dblog_pkey: + - id +db_dbnode: + db_dbnode_pkey: + - id +db_dbsetting: + db_dbsetting_pkey: + - id +db_dbuser: + db_dbuser_pkey: + - id diff --git a/tests/backends/test_schema_parity/test_unique_constraints.yml b/tests/backends/test_schema_parity/test_unique_constraints.yml new file mode 100644 index 0000000000..c4662b8a40 --- /dev/null +++ b/tests/backends/test_schema_parity/test_unique_constraints.yml @@ -0,0 +1,34 @@ +db_dbauthinfo: + db_dbauthinfo_aiidauser_id_dbcomputer_id_777cdaa8_uniq: + - aiidauser_id + - dbcomputer_id +db_dbcomment: + db_dbcomment_uuid_49bac08c_uniq: + - uuid +db_dbcomputer: + db_dbcomputer_label_bc480bab_uniq: + - label + db_dbcomputer_uuid_f35defa6_uniq: + - uuid +db_dbgroup: + db_dbgroup_name_type_12656f33_uniq: + - label + - type_string + db_dbgroup_uuid_af896177_uniq: + - uuid +db_dbgroup_dbnodes: + db_dbgroup_dbnodes_dbgroup_id_dbnode_id_eee23cce_uniq: + - dbgroup_id + - dbnode_id +db_dblog: + db_dblog_uuid_9cf77df3_uniq: + - uuid +db_dbnode: + db_dbnode_uuid_62e0bf98_uniq: + - uuid +db_dbsetting: + db_dbsetting_key_1b84beb4_uniq: + - key +db_dbuser: + db_dbuser_email_30150b7e_uniq: + - email