diff --git a/.github/workflows/docs-branch-checks.yml b/.github/workflows/docs-branch-checks.yml index be7e80b30..e0bd50404 100644 --- a/.github/workflows/docs-branch-checks.yml +++ b/.github/workflows/docs-branch-checks.yml @@ -50,4 +50,3 @@ jobs: if: ${{ inputs.lint || github.event_name == 'push' }} name: Lint docs uses: neo4j/docs-tools/.github/workflows/reusable-docs-lint.yml@v1.0.3 - diff --git a/.github/workflows/docs-deploy-surge.yml b/.github/workflows/docs-deploy-surge.yml index 9d8ec7e52..64a4ea746 100644 --- a/.github/workflows/docs-deploy-surge.yml +++ b/.github/workflows/docs-deploy-surge.yml @@ -71,7 +71,6 @@ jobs: - id: unzip-changelog if: ${{ hashFiles('changelog.zip') != '' }} run: unzip changelog.zip - - id: get-deploy-id run: | deployid=$(> $GITHUB_OUTPUT - - uses: actions/setup-node@v4 with: node-version: lts/* diff --git a/.github/workflows/docs-teardown.yml b/.github/workflows/docs-teardown.yml index 794fe2448..cf2db31b3 100644 --- a/.github/workflows/docs-teardown.yml +++ b/.github/workflows/docs-teardown.yml @@ -42,4 +42,3 @@ jobs: The preview documentation has now been torn down - reopening this PR will republish it. GITHUB_TOKEN: ${{ secrets.DOCS_PR_COMMENT_TOKEN }} - diff --git a/antora.yml b/antora.yml index 1ee1ca99d..0f4c5b449 100644 --- a/antora.yml +++ b/antora.yml @@ -7,7 +7,7 @@ nav: asciidoc: attributes: neo4j-version: '5' - neo4j-version-minor: '5.24' - neo4j-version-exact: '5.24.1' - neo4j-buildnumber: '5.24' + neo4j-version-minor: '5.25' + neo4j-version-exact: '5.25.0' + neo4j-buildnumber: '5.25' neo4j-debian-package-version: '1:5.22.0@' diff --git a/modules/ROOT/content-nav.adoc b/modules/ROOT/content-nav.adoc index ca8b850a0..d7c5a81fa 100644 --- a/modules/ROOT/content-nav.adoc +++ b/modules/ROOT/content-nav.adoc @@ -164,6 +164,7 @@ ** xref:backup-restore/modes.adoc[] ** xref:backup-restore/online-backup.adoc[] ** xref:backup-restore/aggregate.adoc[] +** xref:backup-restore/inspect.adoc[] ** xref:backup-restore/restore-backup.adoc[] ** xref:backup-restore/offline-backup.adoc[] ** xref:backup-restore/restore-dump.adoc[] diff --git a/modules/ROOT/pages/backup-restore/aggregate.adoc b/modules/ROOT/pages/backup-restore/aggregate.adoc index 3f35d7c83..7f85a220b 100644 --- a/modules/ROOT/pages/backup-restore/aggregate.adoc +++ b/modules/ROOT/pages/backup-restore/aggregate.adoc @@ -71,7 +71,7 @@ Aggregates a chain of backup artifacts into a single artifact. |Accepts either a path to a single artifact file or a folder containing backup artifacts. When a file is supplied, the __ parameter should be omitted. -The option to supply a file is only available from Neo4j 5.2 onwards. +It is possible to aggregate backup artifacts from AWS S3 buckets, Google Cloud storage buckets, and Azure buckets using the appropriate URI as the path. | |-h, --help @@ -106,7 +106,7 @@ For more information, see <>. [NOTE] ==== -Neo4j 5.24 introduces the `--temp-path` option to address potential issues related to disk space when performing backup-related commands, especially when cloud storage is involved. +Neo4j 5.24 introduces the `--temp-path` option to address potential issues related to disk space when performing backup-related commands, especially when cloud storage is involved. If `--temp-path` is not set, a temporary directory is created inside the directory specified by the `--from-path` option. diff --git a/modules/ROOT/pages/backup-restore/index.adoc b/modules/ROOT/pages/backup-restore/index.adoc index 9257361c8..9be0cbdd2 100644 --- a/modules/ROOT/pages/backup-restore/index.adoc +++ b/modules/ROOT/pages/backup-restore/index.adoc @@ -8,6 +8,7 @@ This chapter describes the following: * xref:backup-restore/modes.adoc[Backup modes] -- The supported backup modes. * xref:backup-restore/online-backup.adoc[Back up an online database] -- How to back up an online database. * xref:backup-restore/aggregate.adoc[Aggregate a database backup chain] - How to aggregate a backup chain into a single backup. +* xref:backup-restore/inspect.adoc[Inspect the metadata of a database backup file] -- How to inspect the metadata of a database backup file. * xref:backup-restore/restore-backup.adoc[Restore a database backup] -- How to restore a database backup in a live Neo4j deployment. * xref:backup-restore/offline-backup.adoc[Back up an offline database] -- How to back up an offline database. * xref:backup-restore/restore-dump.adoc[Restore a database dump] -- How to restore a database dump in a live Neo4j deployment. diff --git a/modules/ROOT/pages/backup-restore/inspect.adoc b/modules/ROOT/pages/backup-restore/inspect.adoc new file mode 100644 index 000000000..205240ecf --- /dev/null +++ b/modules/ROOT/pages/backup-restore/inspect.adoc @@ -0,0 +1,236 @@ +[[inspect-backup]] += Inspect the metadata of a backup file +:description: This section describes how to inspect the metadata of backup files. Metadata are information like the database name, the backup compression, the transaction range that the backup contains etc.. +:page-role: enterprise-edition new-5.25 + +You can inspect the metadata of a database backup file using the `neo4j-admin backup inspect` command. + +[[inspect-backup-command]] +== Command + +The inspect command lists the metadata stored in the header of backup files. +This metadata primarily defines how backups are connected to form xref:backup-restore/online-backup.adoc#backup-chain[backup chains]. +A backup chain is a sequence of one or more backup(s) logically connected. +The order of the sequence guarantees that when replayed (see xref:backup-restore/restore-backup.adoc[restore] or xref:backup-restore/aggregate.adoc[aggregate]), the store and the transaction data are consumed in a consistent manner. + +The metadata contains the following information: + +* *Database*: database name of the database fragment that the backup includes. +* *Database ID*: a unique identifier that distinguishes databases (even with the same name). +* *Time*: time the backup was taken. +* *Full*: indicates whether it is a full backup (i.e. initial backup containing the store files) or a differential backup (i.e. subsequent backup containing only the transactions to be applied to the store files). +* *Compressed*: indicates whether the backup data inside the backup file is compressed. +* *Lowest transaction ID*: when the backup is full, this value is always 1, and when it is a differential backup, the value corresponds to the first transaction ID the backup starts with. +* *Highest transaction ID*: similarly, this value indicates the last transaction ID stored in the backup file. + +[[inspect-backup-syntax]] +=== Syntax + +[source,role=noheader] +---- +neo4j-admin backup inspect [-h] [--empty] [--expand-commands] [--latest-backup] + [--latest-chain] [--show-metadata] [--verbose] + [--additional-config=] [--database=] + [--format=] +---- + +=== Description + +Command to read the backup metadata. + +[[inspect-backup-command-parameters]] +=== Parameters + +.`neo4j-admin backup inspect` parameters +[options="header", cols="1m,3a"] +|=== +| Parameter +| Description + +| +|Path denoting either a directory where backups are stored or a single backup to inspect. +|=== + +[NOTE] +==== +The `` parameter can also inspect backups stored in AWS S3 buckets (from Neo4j 5.19), Google Cloud storage buckets (from Neo4j 5.21), and Azure buckets (from Neo4j 5.24). +==== + +[[inspect-backup-command-options]] +=== Options + +.`neo4j-admin backup inspect` options +[options="header", cols="5m,6a,4m"] +|=== +| Option +| Description +| Default + +|--additional-config= +|Configuration file with additional configuration. +| + +| --expand-commands +| Allow command expansion in config value evaluation. +| + +|-h, --help +|Show this help message and exit. +| + +| --latest-backup +| Show only the latest backup. +| false + +| --latest-chain +| List the full backup chain ending with the latest downloaded backup. +| false + +| --show-metadata +| Show the backup metadata. +| false + +| --database= +| Name of the database to inspect. +| + +| --format= +| Format of the output of the command. Possible values are: 'JSON, TABULAR'. +| TABULAR + +| --empty +| Include empty backups. +| false + +|--verbose +|Enable verbose output. +| +|=== + + +[[aggregate-backup-example]] +== Examples + +Given the folder _/backups_ containing a set of database backups: + +[source,shell] +---- +/backups +├── london-2024-10-07T16-03-51.backup +├── london-2024-10-07T16-04-05.backup +├── malmo-2024-10-07T16-00-07.backup +├── malmo-2024-10-07T16-00-19.backup +├── malmo-2024-10-07T16-00-34.backup +├── malmo-2024-10-07T16-00-44.backup +├── malmo-2024-10-07T16-00-50.backup +├── malmo-2024-10-07T16-01-08.backup +├── malmo-2024-10-07T16-01-24.backup +└── neo4j-2024-10-07T16-05-37.backup +---- + +=== Listing the metadata of the backup files + +The following command lists the backup files' names along with their respective metadata: + +[source,shell] +---- +bin/neo4j-admin backup inspect /backups --show-metadata --empty +---- + +The `--empty` option is used to include the empty backups. +An empty backup is created when a database is backed up but no new data exists. +Empty backups are used to record the backup history. + +.Example output +[result] +---- +| FILE | DATABASE | DATABASE ID | TIME (UTC) | FULL | COMPRESSED | LOWEST TX | HIGHEST TX | +| file:///backups/neo4j-2024-10-07T16-05-37.backup | neo4j | 7dcb1d0c-4374-4476-b8ae-d3c3f124683f | 2024-10-07T16:05:37 | true | true | 1 | 3 | +| file:///backups/malmo-2024-10-07T16-01-24.backup | malmo | 62d1820c-3ac6-4b15-a0b3-bf7e7becc8d0 | 2024-10-07T16:01:24 | true | true | 1 | 8 | +| file:///backups/malmo-2024-10-07T16-01-08.backup | malmo | 62d1820c-3ac6-4b15-a0b3-bf7e7becc8d0 | 2024-10-07T16:01:08 | true | true | 1 | 7 | +| file:///backups/malmo-2024-10-07T16-00-50.backup | malmo | 62d1820c-3ac6-4b15-a0b3-bf7e7becc8d0 | 2024-10-07T16:00:50 | false | true | 0 | 0 | +| file:///backups/malmo-2024-10-07T16-00-44.backup | malmo | 62d1820c-3ac6-4b15-a0b3-bf7e7becc8d0 | 2024-10-07T16:00:44 | false | true | 7 | 7 | +| file:///backups/malmo-2024-10-07T16-00-34.backup | malmo | 62d1820c-3ac6-4b15-a0b3-bf7e7becc8d0 | 2024-10-07T16:00:34 | false | true | 6 | 6 | +| file:///backups/malmo-2024-10-07T16-00-19.backup | malmo | 62d1820c-3ac6-4b15-a0b3-bf7e7becc8d0 | 2024-10-07T16:00:19 | false | true | 0 | 0 | +| file:///backups/malmo-2024-10-07T16-00-07.backup | malmo | 62d1820c-3ac6-4b15-a0b3-bf7e7becc8d0 | 2024-10-07T16:00:07 | true | true | 1 | 5 | +| file:///backups/london-2024-10-07T16-04-05.backup | london | d4dae73c-dfef-4d28-88cd-fe6cc88ddca1 | 2024-10-07T16:04:05 | false | true | 6 | 6 | +| file:///backups/london-2024-10-07T16-03-51.backup | london | d4dae73c-dfef-4d28-88cd-fe6cc88ddca1 | 2024-10-07T16:03:51 | true | true | 1 | 5 | +---- + +=== Listing the latest backups + +To list only the most recent backups performed for each database, use the `--latest-backup` option. + +[source,shell] +---- +bin/neo4j-admin backup inspect /backups --show-metadata --latest-backup +---- + +.Example output +[result] +---- +| FILE | DATABASE | DATABASE ID | TIME (UTC) | FULL | COMPRESSED | LOWEST TX | HIGHEST TX | +| file:///backups/neo4j-2024-10-07T16-05-37.backup | neo4j | 7dcb1d0c-4374-4476-b8ae-d3c3f124683f | 2024-10-07T16:05:37 | true | true | 1 | 3 | +| file:///backups/malmo-2024-10-07T16-01-24.backup | malmo | 62d1820c-3ac6-4b15-a0b3-bf7e7becc8d0 | 2024-10-07T16:01:24 | true | true | 1 | 8 | +| file:///backups/london-2024-10-07T16-04-05.backup | london | d4dae73c-dfef-4d28-88cd-fe6cc88ddca1 | 2024-10-07T16:04:05 | false | true | 6 | 6 | +---- + +=== Inspecting backup chains + +A backup chain corresponds to a sequence of one or more backup(s) logically connected by their transaction IDs. +To inspect the backup chains of a given database, use the `--latest-chain` option and the `--database` option with the database whose backup chain you want to inspect: + +[source,shell] +---- +bin/neo4j-admin backup inspect /backups --show-metadata --latest-chain --database=london +---- + +.Example output +[result] +---- +| FILE | DATABASE | DATABASE ID | TIME (UTC) | FULL | COMPRESSED | LOWEST TX | HIGHEST TX | +| file:///backups/london-2024-10-07T16-04-05.backup | london | d4dae73c-dfef-4d28-88cd-fe6cc88ddca1 | 2024-10-07T16:04:05 | false | true | 6 | 6 | +| file:///backups/london-2024-10-07T16-03-51.backup | london | d4dae73c-dfef-4d28-88cd-fe6cc88ddca1 | 2024-10-07T16:03:51 | true | true | 1 | 5 | +---- + +The result returns a chain of size two: + +* The first backup is a full backup containing the store files within the transaction range [1,5]. +* The second backup is a differential backup containing only the subsequent modifications to the store files. +Those modifications are materialised by a sequence of transactions to apply. +Its range is [6,6]. + + +=== Inspecting a backup chain ending with a specific backup + +To inspect a backup chain ending with a specific backup, use the `--latest-chain` option as follows: + +[source,shell] +---- +bin/neo4j-admin backup inspect /backups/london-2024-10-07T16-04-05.backup --show-metadata --latest-chain +---- + +.Example output +[result] +---- +| FILE | DATABASE | DATABASE ID | TIME (UTC) | FULL | COMPRESSED | LOWEST TX | HIGHEST TX | +| file:///backups/london-2024-10-07T16-04-05.backup | london | d4dae73c-dfef-4d28-88cd-fe6cc88ddca1 | 2024-10-07T16:04:05 | false | true | 6 | 6 | +| file:///backups/london-2024-10-07T16-03-51.backup | london | d4dae73c-dfef-4d28-88cd-fe6cc88ddca1 | 2024-10-07T16:03:51 | true | true | 1 | 5 | +---- + +[NOTE] +==== +In this case, the `--database` option is unnecessary because the database identifier is part of the metadata stored in the header of the backup file _london-2024-10-07T16-04-05.backup_. +==== + + + + + + + + + + + + diff --git a/modules/ROOT/pages/backup-restore/offline-backup.adoc b/modules/ROOT/pages/backup-restore/offline-backup.adoc index dcf8877ce..6f67fc67a 100644 --- a/modules/ROOT/pages/backup-restore/offline-backup.adoc +++ b/modules/ROOT/pages/backup-restore/offline-backup.adoc @@ -82,6 +82,7 @@ The `neo4j-admin database dump` command has the following options: |--to-path= |Destination folder of a database dump. +It is possible to dump databases into AWS S3 buckets, Google Cloud storage buckets, and Azure buckets using the appropriate URI as the path. | |--to-stdout diff --git a/modules/ROOT/pages/backup-restore/online-backup.adoc b/modules/ROOT/pages/backup-restore/online-backup.adoc index ccca9a8a1..4913e612b 100644 --- a/modules/ROOT/pages/backup-restore/online-backup.adoc +++ b/modules/ROOT/pages/backup-restore/online-backup.adoc @@ -174,7 +174,7 @@ Note: this is an EXPERIMENTAL option. Consult Neo4j support before use. | |--to-path= -|Directory to place backup in (required unless `--inspect-path` is used). +|Directory to place backup in (required unless `--inspect-path` is used). It is possible to back up databases into AWS S3 buckets, Google Cloud storage buckets, and Azure using the appropriate URI as the path. | |--type= @@ -196,7 +196,7 @@ For more information, see <>. [NOTE] ==== -Neo4j 5.24 introduces the `--temp-path` option to address potential issues related to disk space when performing backup-related commands, especially when cloud storage is involved. +Neo4j 5.24 introduces the `--temp-path` option to address potential issues related to disk space when performing backup-related commands, especially when cloud storage is involved. If `--temp-path` is not set, a temporary directory is created inside the directory specified by the `--path` option. diff --git a/modules/ROOT/pages/backup-restore/restore-backup.adoc b/modules/ROOT/pages/backup-restore/restore-backup.adoc index e332d86a9..8b6db26c0 100644 --- a/modules/ROOT/pages/backup-restore/restore-backup.adoc +++ b/modules/ROOT/pages/backup-restore/restore-backup.adoc @@ -79,6 +79,7 @@ neo4j-admin database restore [-h] [--expand-commands] |--from-path=[,...] |A single path or a comma-separated list of paths pointing to a backup artifact file. An artifact file can be 1) a full backup, in which case it is restored directly or, 2) a differential backup, in which case the command tries first to find in the folder a backup chain ending at that specific differential backup and then restores that chain. +It is possible to restore backups from AWS S3 buckets, Google Cloud storage buckets, and Azure buckets using the appropriate URI as the path. | |-h, --help @@ -130,7 +131,7 @@ For more information, see <>. [NOTE] ==== -Neo4j 5.24 introduces the `--temp-path` option to address potential issues related to disk space when performing backup-related commands, especially when cloud storage is involved. +Neo4j 5.24 introduces the `--temp-path` option to address potential issues related to disk space when performing backup-related commands, especially when cloud storage is involved. If `--temp-path` is not set, a temporary directory is created inside the directory specified by the `--from-path` option. diff --git a/modules/ROOT/pages/backup-restore/restore-dump.adoc b/modules/ROOT/pages/backup-restore/restore-dump.adoc index dbb1a16ff..fca02216f 100644 --- a/modules/ROOT/pages/backup-restore/restore-dump.adoc +++ b/modules/ROOT/pages/backup-restore/restore-dump.adoc @@ -77,6 +77,7 @@ If `--info` is specified, then the database is not loaded, but information (i.e. |--from-path= |Path to directory containing archive(s). +It is possible to load databases from AWS S3 buckets, Google Cloud storage buckets, and Azure bucket using the appropriate URI as the path. | |--from-stdin diff --git a/modules/ROOT/pages/clustering/databases.adoc b/modules/ROOT/pages/clustering/databases.adoc index e5736b180..8ec0bf018 100644 --- a/modules/ROOT/pages/clustering/databases.adoc +++ b/modules/ROOT/pages/clustering/databases.adoc @@ -235,7 +235,7 @@ Neo4j 5.24 introduces the xref:reference/procedures.adoc#procedure_dbms_cluster_ * To make your database write-available again after it has been lost (for example, due to a disaster). // See xref:clustering/disaster-recovery.adoc[] for more information. - + [CAUTION] ==== The recreate procedure works only for real user databases and not for composite databases, or the `system` database. @@ -304,7 +304,7 @@ If not used stores were more up to date than the used ones, this results in data You can specify a set of available servers. The stores on all allocations will be synchronized to the most up-to-date store from the defined servers. -The number of defined servers cannot exceed the number of total allocations in the desired topology. +The number of defined servers cannot exceed the number of total allocations in the desired topology. [source, shell] ---- @@ -341,7 +341,7 @@ There is an option to define a new topology when recreating a database. This can be beneficial during a disaster, if enough servers are not available to recreate the database with the original topology. When altering the total number of allocations down during a recreation, it is important to remember that the number of seeding servers cannot exceed the number of total allocations of the database. This also holds true when using recreate with an empty list of seeders. -If there are more available servers in the cluster hosting the database than the number of new allocations, the recreation will fail. +If there are more available servers in the cluster hosting the database than the number of new allocations, the recreation will fail. [source, shell] ---- @@ -407,7 +407,7 @@ See <<#_create_database, `CREATE DATABASE`>> for more information. ---- CREATE DATABASE foo TOPOLOGY [desired number of primaries] PRIMARIES [desired number of secondaries] SECONDARIES -OPTIONS {existingData: 'use', existingDataSeedInstance: '8512c9b9-d9e8-48e6-b037-b15b0004ca18'}; +OPTIONS {existingData: 'use', existingDataSeedServer: '8512c9b9-d9e8-48e6-b037-b15b0004ca18'}; ---- . Verify that the `foo` database is online on the desired number of servers, in the desired roles. If the `foo` database is of considerable size, the execution of the command can take some time. @@ -438,27 +438,13 @@ The seed can be either a backup or a dump from an existing database. The sources of seeds are called _seed providers_. The mechanism is pluggable, allowing new sources of seeds to be supported (see link:https://www.neo4j.com/docs/java-reference/current/extending-neo4j/project-setup/#extending-neo4j-plugin-seed-provider[Java Reference -> Implement custom seed providers] for more information). -The product has built-in support for seed from a mounted file system (file), FTP server, HTTP/HTTPS server and Amazon S3. +The product has built-in support for seed from a mounted file system (file), FTP server, HTTP/HTTPS server, Amazon S3, Google Cloud Storage (from Neo4j 5.25), and Azure Cloud Storage (from Neo4j 5.25). [NOTE] ==== -S3 is supported by default, but the other providers require configuration of xref:configuration/configuration-settings.adoc#config_dbms.databases.seed_from_uri_providers[`dbms.databases.seed_from_uri_providers`]. -Neo4j 5 comes bundled with necessary libraries for AWS S3 connectivity and thus, `aws cli` is not required. +Amazon S3, Google Cloud Storage, and Azure Cloud Storage are supported by default, but the other providers require configuration of xref:configuration/configuration-settings.adoc#config_dbms.databases.seed_from_uri_providers[`dbms.databases.seed_from_uri_providers`]. ==== -The `URLConnectionSeedProvider` supports the following: - -** file: -** ftp: -** http: -** https: -** URIs - -Accordingly, the `S3SeedProviders` supports: - -** S3: -** URIs - The URI of the seed is specified when the `CREATE DATABASE` command is issued: [source, cypher, role="noplay"] @@ -481,7 +467,26 @@ neo4j@neo4j> SHOW DATABASES; To determine the cause of the problem, it is recommended to look at the `debug.log`. -Certain seed providers, such as S3, may require additional configuration. +==== Seed providers + +The `URLConnectionSeedProvider` supports the following: + +** `file:` +** `ftp:` +** `http:` +** `https:` + +The `S3SeedProvider` supports: + +** `s3:` + +[NOTE] +==== +Neo4j 5 comes bundled with necessary libraries for AWS S3 connectivity. +Therefore, if you use `S3SeedProvider`,`aws cli` is not required but can be used with the `CloudSeedProvider`. +==== + +The `S3SeedProvider` requires additional configuration. This is specified with the `seedConfig` option. This option expects a comma-separated list of configurations. Each configuration value is specified as a name followed by `=` and the value, as such: @@ -491,23 +496,68 @@ Each configuration value is specified as a name followed by `=` and the value, a CREATE DATABASE foo OPTIONS { existingData: 'use', seedURI: 's3:/myBucket/myBackup.backup', seedConfig: 'region=eu-west-1' } ---- -The available configuration options are: - -* `file:` -* `ftp:` -* `http:` -* `https:` -* `s3:` - use this to specify the path to your S3 bucket. -For example, `seedURI: 's3:/myBucket/myBackup.backup'` -* `region:` - use this together with `s3:` to set the AWS region that hosts the S3 bucket. -For example, `seedConfig: 'region=eu-west-1'`. - -Some seed providers may also want to pass credentials into the provider. +`S3SeedProvider` also requires passing in credentials. These are specified with the `seedCredentials` option. Seed credentials are securely passed from the Cypher command to each server hosting the database. For this to work, Neo4j on each server in the cluster must be configured with identical keystores. This is identical to the configuration required by remote aliases, see xref:database-administration/aliases/remote-database-alias-configuration.adoc#remote-alias-config-DBMS_admin-A[Configuration of DBMS with remote database alias]. -If this configuration is not performed, the `seedCredential` option fails. +If this configuration is not performed, the `seedCredentials` option fails. + +[source, cypher, role="noplay"] +---- +CREATE DATABASE foo OPTIONS { existingData: 'use', seedURI: 's3:/myBucket/myBackup.backup', seedConfig: 'region=eu-west-1', seedCredentials: [accessKey];[secretKey] } +---- +Where `accessKey` and `secretKey` are provided by AWS. + +The `CloudSeedProvider` supports: + +** `s3:` +** `gs:` +** `azb:` + +[.tabbed-example] +===== +[role=include-with-AWS-S3 label--new-5.25] +====== + +include::partial$/aws-s3-overrides.adoc[] + +include::partial$/aws-s3-credentials.adoc[] + +. Create database from `myBackup.backup`. ++ +[source,shell, role="nocopy"] +---- +CREATE DATABASE foo OPTIONS { existingData: 'use', seedURI: 's3:/myBucket/myBackup.backup' } +---- + +====== +[role=include-with-Google-cloud-storage label--new-5.25] +====== + +include::partial$/gcs-credentials.adoc[] + +. Create database from `myBackup.backup`. ++ +[source,shell] +---- +CREATE DATABASE foo OPTIONS { existingData: 'use', seedURI: 'gs:/myBucket/myBackup.backup' } +---- +====== +[role=include-with-Azure-cloud-storage label--new-5.25] +====== + +include::partial$/azb-credentials.adoc[] + +. Create database from `myBackup.backup`. ++ +[source,shell] +---- +CREATE DATABASE foo OPTIONS { existingData: 'use', seedURI: 'azb://myStorageAccount/myContainer/myBackup.backup' } +---- +====== +===== + For example, in the case of `S3SeedProvider`(the default provider), `seedCredentials: [accessKey];[secretKey]` where `accessKey` and `secretKey` are provided by AWS. @@ -535,9 +585,17 @@ For example, in the case of `S3SeedProvider`(the default provider), `seedCredent | `URLConnectionSeedProvider` | `\https://myhttp.com/backups/backup1.backup` -| `S3:` -| `S3SeedProvider` +| `s3:` +| `S3SeedProvider`, `CloudSeedProvider` | `s3://mybucket/backups/backup1.backup` + +| `gs:` +| `CloudSeedProvider` +| `gs://mybucket/backups/backup1.backup` + +| `azb:` +| `CloudSeedProvider` +| `azb://mystorageaccount.blob/backupscontainer/backup1.backup` |=== [[cluster-allow-deny-db]] diff --git a/modules/ROOT/pages/configuration/configuration-settings.adoc b/modules/ROOT/pages/configuration/configuration-settings.adoc index 34d4f8424..4632700ac 100644 --- a/modules/ROOT/pages/configuration/configuration-settings.adoc +++ b/modules/ROOT/pages/configuration/configuration-settings.adoc @@ -118,10 +118,12 @@ m|+++250.00MiB+++ |=== -[role=enterprise-edition label--dynamic] +[role=label--dynamic] [[config_db.checkpoint.iops.limit]] === `db.checkpoint.iops.limit` +label:enterprise-edition[Enterprise Edition] label:dynamic[Dynamic] + .db.checkpoint.iops.limit [frame="topbot", stripes=odd, grid="cols", cols="<1s,<4"] |=== @@ -738,7 +740,7 @@ m|+++false+++ |=== -[role=label--enterprise-edition] +[role=label--enterprise-edition label--deprecated-5.23] [[config_initial.dbms.database_allocator]] === `initial.dbms.database_allocator` @@ -3087,6 +3089,7 @@ m|+++false+++ [[config_server.memory.query_cache.shared_cache_num_entries]] === `server.memory.query_cache.shared_cache_num_entries` +// label:enterprise-edition[Enterprise Edition] label:new[Introduced in 5.7] label:dynamic[Dynamic since 5.10] .server.memory.query_cache.shared_cache_num_entries [frame="topbot", stripes=odd, grid="cols", cols="<1s,<4"] |=== diff --git a/modules/ROOT/pages/database-administration/standard-databases/create-databases.adoc b/modules/ROOT/pages/database-administration/standard-databases/create-databases.adoc index 9c1c6a559..88f8d139f 100644 --- a/modules/ROOT/pages/database-administration/standard-databases/create-databases.adoc +++ b/modules/ROOT/pages/database-administration/standard-databases/create-databases.adoc @@ -31,7 +31,7 @@ In versions previous to Neo4j 5.22, the default store format for all new databas From Neo4j 5.22, `block` is the default format for all newly-created databases as long as they do not have the xref:configuration/configuration-settings.adoc#config_db.format[`db.format`] setting specified. + If you want to change it, you can set a new value for the xref:configuration/configuration-settings.adoc#config_db.format[`db.format`] configuration in the _neo4j.conf_ file. + Alternatively, you can set the store format of new databases using the `CREATE DATABASE databasename OPTIONS {storeFormat: 'the-new-format'}` command. -However, if the store is seeded with `seedURI` or `existingDataSeedInstance`, or if the command is being used to mount pre-existing store files already present on the disk, they will use their current store format without any alterations. +However, if the store is seeded with `seedURI`, `existingDataSeedServer` or `existingDataSeedInstance`, or if the command is being used to mount pre-existing store files already present on the disk, they will use their current store format without any alterations. See xref:database-internals/store-formats.adoc[Store formats], for more details about available database store formats in Neo4j. @@ -76,13 +76,17 @@ The `CREATE DATABASE` command can have a map of options, e.g. `OPTIONS {key: 'va | `use` | Controls how the system handles existing data on disk when creating the database. -Currently, this is only supported with `existingDataSeedInstance` and `seedURI`, and must be set to `use`, which indicates the existing data files should be used for the new database. +Currently, this is only supported with `existingDataSeedInstance`, `existingDataSeedServer` and `seedURI`, and must be set to `use`, which indicates the existing data files should be used for the new database. -| `existingDataSeedInstance` + +|`existingDataSeedServer` label:new[new in 5.25] + +`existingDataSeedInstance` label:deprecated[deprecated in 5.25] | ID of the cluster server | Defines which server is used for seeding the data of the created database. The server ID can be found in the `serverId` column after running `SHOW SERVERS`. +`existingDataSeedInstance` is replaced by `existingDataSeedServer` in Neo4j 5.25. | `seedURI` | URI to a backup or a dump from an existing database. @@ -110,12 +114,12 @@ For details about enrichment mode, see link:{neo4j-docs-base-uri}/cdc/current/ge | `aligned` \| `standard` \| `high_limit` \| `block` | Defines the store format if the database created is new. -If the store is seeded with `seedURI` or `existingDataSeedInstance`, or if the command is being used to mount pre-existing store files already present on the disk, they will use their current store format without any alterations. +If the store is seeded with `seedURI`, `existingDataSeedInstance` or `existingDataSeedServer`, or if the command is used to mount pre-existing store files already present on the disk, they will retain their current store format without any modifications. |=== [NOTE] ==== -The `existingData`, `existingDataSeedInstance`, `seedURI`, `seedConfig`, and `seedCredentials` options cannot be combined with the `OR REPLACE` part of this command. +The `existingData`, `existingDataSeedInstance`, `existingDataSeedServer`, `seedURI`, `seedConfig`, and `seedCredentials` options cannot be combined with the `OR REPLACE` part of this command. More details about seeding options can be found in xref::clustering/databases.adoc#cluster-seed[Seed a cluster]. ==== diff --git a/modules/ROOT/pages/database-internals/store-formats.adoc b/modules/ROOT/pages/database-internals/store-formats.adoc index b9f3ae8e6..b0f13ca5f 100644 --- a/modules/ROOT/pages/database-internals/store-formats.adoc +++ b/modules/ROOT/pages/database-internals/store-formats.adoc @@ -24,7 +24,8 @@ Block format means a few pages need to be loaded to serve a query, i.e. fewer pa * *Property access:* Properties are stored in blocks with their nodes and relationships drastically reducing the amount of pointer chasing required to access properties. * *Entity limits:* Able to run graphs at large scales. Supports the highest limits at the time of writing. -See <> for details. +See <> for details. + +label:new[Introduced in 5.25] Supports token names (including label, property key, and relationship type names) of any length up to the GQL identifier max length of 16,383 characters. * *Future-proofing:* Designed to be extended and improved without requiring store migrations. New features such as data types, or performance enhancements are available without rewriting the store. Aligned:: diff --git a/modules/ROOT/pages/monitoring/logging.adoc b/modules/ROOT/pages/monitoring/logging.adoc index 75d590843..1e0e1a8e9 100644 --- a/modules/ROOT/pages/monitoring/logging.adoc +++ b/modules/ROOT/pages/monitoring/logging.adoc @@ -1183,7 +1183,7 @@ The `QueryLogJsonLayout.json` template mimics the 4.x layout and contains the fo If the type of the log entry is `query`, these additional fields are available: -.JSON format log entries +.JSON format log entries for log type `query` [cols="1m,3a", options="header"] |=== | Name @@ -1247,6 +1247,10 @@ Included when xref:configuration/configuration-settings.adoc#config_db.logs.quer | Reason for failure. Included when applicable. +| errorInfo +| label:new[Introduced in 5.25] GQL error information as a JSON object. +See <> for details on the contents of the `errorInfo` JSON object. + | transactionId | The transaction ID of the running query. @@ -1258,7 +1262,7 @@ Included when xref:configuration/configuration-settings.adoc#config_db.logs.quer If the type of the log entry is `transaction`, the following additional fields are available: -.JSON format log entries +.JSON format log entries for log type `transaction` [cols="1m,3a", options="header"] |=== | Name @@ -1280,3 +1284,61 @@ Either same as `authenticatedUser` or an impersonated user. | transactionId | ID of the transaction. |=== + +[role=label--new-5.25] +[[gql-error-information]] +==== GQL error information + +The query log includes the GQL error information under the JSON object `errorInfo`. +`errorInfo` can contain the following elements: + +* `GQLSTATUS` -- A 5-character long alpha-numeric code identifying the error. +* `statusDescription` -- A message describing the error. +* `classification` -- The type of error representing a division of client, transient, and database errors. +* `position` -- The position (a JSON object containing a field for `column`, `offset`, and `line`) in the query where this error occurred. +* `cause` -- A JSON object containing the `errorInfo` JSON object of the cause of the current `errorInfo` JSON object. + +[NOTE] +==== +The default GQLSTATUS code `50N42` is returned when an exception does not have a GQL object. +Starting from Neo4j 5.25, GQL objects are added to exceptions; therefore, you can expect many `50N42` codes. +However, it's important not to rely on this default code, as future Neo4j versions might change it by adding an appropriate GQL object to the exception. +Additionally, GQL codes for external procedures are not yet stable. +==== + +The following are examples of the `errorInfo` JSON object: + +.`errorInfo` JSON object of a database error +[source%linenums,xml,options="nowrap",highlight=4] +---- +... +"errorInfo": { + "GQLSTATUS": "51N66", + "statusDescription": "error: system configuration or operation exception - resource exhaustion. Insufficient resources to complete the request.", + "cause": { + "GQLSTATUS": "51N55", + "statusDescription": "error: system configuration or operation exception - cannot create additional database. Failed to create the database `db10`. The limit of databases is reached. Either increase the limit using the config setting dbms.max_databases or drop a database.", + "classification": "DATABASE_ERROR" + }, + "classification": "DATABASE_ERROR" + }, +... +---- + +.`errorInfo` JSON object of a client error +[source%linenums,xml,options="nowrap",highlight=4] +---- +... +"errorInfo": { + "GQLSTATUS": "42N62", + "statusDescription": "error: syntax error or access rule violation - variable not defined. Variable `m` not defined.", + "position": { + "column": 18, + "offset": 17, + "line": 1 + }, + "classification": "CLIENT_ERROR" + }, +"query": "MATCH (n) RETURN m", +... +---- diff --git a/modules/ROOT/pages/monitoring/metrics/reference.adoc b/modules/ROOT/pages/monitoring/metrics/reference.adoc index f204bfc7c..0be270fbd 100644 --- a/modules/ROOT/pages/monitoring/metrics/reference.adoc +++ b/modules/ROOT/pages/monitoring/metrics/reference.adoc @@ -553,6 +553,7 @@ The deprecated Raft core metrics are replaced accordingly by the Raft metrics in |.cluster.raft.append_index|The append index of the Raft log. Each index represents a write transaction (possibly internal) proposed for commitment. The values mostly increase, but sometimes they can decrease as a consequence of leader changes. The append index should always be bigger than or equal to the commit index. (gauge) |.cluster.raft.commit_index|The commit index of the Raft log. Represents the commitment of previously appended entries. Its value increases monotonically if you do not unbind the cluster state. The commit index should always be less than or equal to the append index and bigger than or equal to the applied index. (gauge) |.cluster.raft.applied_index|The applied index of the Raft log. Represents the application of the committed Raft log entries to the database and internal state. The applied index should always be less than or equal to the commit index. The difference between this and the commit index can be used to monitor how up-to-date the follower database is. (gauge) +|.cluster.raft.prune_index |label:new[Introduced in 5.25] The head index of the Raft log. Represents the oldest Raft index that exists in the log. A prune event will increase this value. This can be used to track how much history of Raft logs the member has. (gauge) |.cluster.raft.term|The Raft Term of this server. It increases monotonically if you do not unbind the cluster state. (gauge) |.cluster.raft.tx_retries|Transaction retries. (counter) |.cluster.raft.is_leader|Is this server the leader? Track this for each rafted primary database in the cluster. It reports `0` if it is not the leader and `1` if it is the leader. The sum of all of these should always be `1`. However, there are transient periods in which the sum can be more than `1` because more than one member thinks it is the leader. Action may be needed if the metric shows `0` for more than 30 seconds. (gauge) diff --git a/modules/ROOT/pages/security/ssl-fips-compatibility.adoc b/modules/ROOT/pages/security/ssl-fips-compatibility.adoc index b0e988c37..f07c2eb78 100644 --- a/modules/ROOT/pages/security/ssl-fips-compatibility.adoc +++ b/modules/ROOT/pages/security/ssl-fips-compatibility.adoc @@ -1,8 +1,8 @@ -[role=enterprise-edition] [[ssl-fips-compatibility]] = Configuring SSL for FIPS 140-2 compatibility :description: How to configure Neo4j to use FIPS compatible SSL encryption. :keywords: ssl, tls, authentication, encryption, encrypted, security, fips, fips 140, fips 140-2, nist, hipaa +:page-role: enterprise-edition new-5.24 Federal Information Processing Standards (FIPS) 140 is a U.S. government standard established by the National Institute of Standards and Technology (NIST) which is used to accredit cryptographic modules such as those used in TLS network encryption. While FIPS 140 compliance is primarily required for federal agencies and their contractors, it also is used in the healthcare sector under regulations like the Health Insurance Portability and Accountability Act (HIPAA) to protect patient data. diff --git a/modules/ROOT/pages/tools/cypher-shell.adoc b/modules/ROOT/pages/tools/cypher-shell.adoc index 82e66cd0f..bb3ccfa0d 100644 --- a/modules/ROOT/pages/tools/cypher-shell.adoc +++ b/modules/ROOT/pages/tools/cypher-shell.adoc @@ -63,6 +63,10 @@ The syntax for running Cypher Shell is: | Exit and report failures at the end of the input when reading from a file. | +| --enable-autocompletions +| Whether to enable Cypher autocompletions inside the CLI, which are disabled by default. +| + |--format {auto,verbose,plain} |Desired output format. Displays the results in tabular format if you use the shell interactively and with minimal formatting if you use it for scripting. + `verbose` displays results in a tabular format and prints statistics. + diff --git a/modules/ROOT/pages/tools/neo4j-admin/neo4j-admin-import.adoc b/modules/ROOT/pages/tools/neo4j-admin/neo4j-admin-import.adoc index 9524a5741..de082ca5b 100644 --- a/modules/ROOT/pages/tools/neo4j-admin/neo4j-admin-import.adoc +++ b/modules/ROOT/pages/tools/neo4j-admin/neo4j-admin-import.adoc @@ -240,6 +240,8 @@ Therefore, use it with care, especially with large imports. * Multiple data sources like these can be specified in one import, where each data source has its own header. * Files can also be specified using regular expressions. +It is possible to import files from AWS S3 buckets, Google Cloud storage buckets, and Azure buckets using the appropriate URI as the path. + For an example, see <>. | @@ -274,6 +276,8 @@ The value can be a plain number or a byte units string, e.g. `128k`, `1m`. * Multiple data sources like these can be specified in one import, where each data source has its own header. * Files can also be specified using regular expressions. +It is possible to import files from AWS S3 buckets, Google Cloud storage buckets, and Azure buckets using the appropriate URI as the path. + For an example, see <>. | @@ -752,6 +756,8 @@ Therefore, use it with care, especially with large imports. * Multiple data sources like these can be specified in one import, where each data source has its own header. * Files can also be specified using regular expressions. +It is possible to import files from AWS S3 buckets, Google Cloud storage buckets, and Azure buckets using the appropriate URI as the path. + For an example, see <>. | @@ -782,6 +788,8 @@ The value can be a plain number or a byte units string, e.g. `128k`, `1m`. * Multiple data sources like these can be specified in one import, where each data source has its own header. * Files can also be specified using regular expressions. +It is possible to import files from AWS S3 buckets, Google Cloud storage buckets, and Azure buckets using the appropriate URI as the path. + For an example, see <>. | @@ -996,6 +1004,7 @@ ID:: LABEL:: Read one or more labels from this field. Like array values, multiple labels are separated by `;`, or by the character specified with `--array-delimiter`. + label:new[Introduced in 5.25] The max length of label names for block format is 16,383 characters. .Define node files ==== @@ -1046,6 +1055,7 @@ The mandatory fields are: TYPE:: The relationship type to use for this relationship. + label:new[Introduced in 5.25] The max length of relationship type names for block format is 16,383 characters. START_ID:: The ID of the start node for this relationship. END_ID:: @@ -1091,6 +1101,7 @@ carrieanne,"Trinity",tt0242653,ACTED_IN For properties, the `` part of the field designates the property key, while the `` part assigns a data type. You can have properties in both node data files and relationship data files. +label:new[Introduced in 5.25] The max length of property keys for block format is 16,383 characters. Use one of `int`, `long`, `float`, `double`, `boolean`, `byte`, `short`, `char`, `string`, `point`, `date`, `localtime`, `time`, `localdatetime`, `datetime`, and `duration` to designate the data type for properties. By default, types (except arrays) are converted to Cypher types. diff --git a/modules/ROOT/pages/tools/neo4j-admin/upload-to-aura.adoc b/modules/ROOT/pages/tools/neo4j-admin/upload-to-aura.adoc index 7e748f303..0df7e2887 100644 --- a/modules/ROOT/pages/tools/neo4j-admin/upload-to-aura.adoc +++ b/modules/ROOT/pages/tools/neo4j-admin/upload-to-aura.adoc @@ -59,8 +59,7 @@ If Neo4j Cloud username and password are not provided either as a command option | Description | -|Name of the database that should be uploaded. -The name is used to select a file which is expected to be named .dump or .backup. +|Name of the database that should be uploaded. The name is used to select a file which is expected to be named .dump or .backup. |=== === Options