From 5c2da4622268587141276966ab14a31515ad226a Mon Sep 17 00:00:00 2001
From: cnp-autobot
Date: Wed, 18 Oct 2023 10:55:47 +0000
Subject: [PATCH 1/8] [create-pull-request] automated change
---
.../1/api_reference.md.in | 40 -
.../1/api_reference.mdx | 1149 -----
.../1/appendixes/object_stores.mdx | 447 ++
.../1/applications.mdx | 19 +-
.../1/architecture.mdx | 4 +
.../docs/postgres_for_kubernetes/1/backup.mdx | 382 ++
.../1/backup_barmanobjectstore.mdx | 152 +
.../1/backup_recovery.mdx | 917 +---
.../1/backup_volumesnapshot.mdx | 229 +
.../postgres_for_kubernetes/1/bootstrap.mdx | 437 +-
.../1/cloudnative-pg.v1.mdx | 4216 +++++++++++++++++
.../1/cluster_conf.mdx | 19 +
.../1/connection_pooling.mdx | 15 +-
.../1/declarative_hibernation.mdx | 2 +-
.../1/declarative_role_management.mdx | 2 +-
.../1/default-monitoring.yaml | 10 +-
.../postgres_for_kubernetes/1/evaluation.mdx | 36 +-
.../1/failure_modes.mdx | 27 +
.../docs/postgres_for_kubernetes/1/faq.mdx | 38 +-
.../postgres_for_kubernetes/1/fencing.mdx | 6 +-
.../1/images/grafana-local.png | 4 +-
.../1/installation_upgrade.mdx | 234 +-
.../1/instance_manager.mdx | 59 +-
.../1/kubectl-plugin.mdx | 2 +-
.../1/labels_annotations.mdx | 153 +
.../postgres_for_kubernetes/1/monitoring.mdx | 4 +-
.../postgres_for_kubernetes/1/openshift.mdx | 10 +-
.../1/operator_capability_levels.mdx | 73 +-
.../postgres_for_kubernetes/1/postgis.mdx | 4 +-
.../1/postgresql_conf.mdx | 11 +
.../postgres_for_kubernetes/1/quickstart.mdx | 17 +-
.../postgres_for_kubernetes/1/recovery.mdx | 615 +++
.../1/replica_cluster.mdx | 55 +-
.../postgres_for_kubernetes/1/replication.mdx | 34 +-
.../1/resource_management.mdx | 2 +-
.../postgres_for_kubernetes/1/samples.mdx | 15 +-
.../samples/backup-with-volume-snapshot.yaml | 8 +
.../1/samples/cluster-example-full.yaml | 2 +-
.../1/samples/cluster-example-monitoring.yaml | 32 -
...-example-replica-from-volume-snapshot.yaml | 54 +
.../cluster-example-replica-streaming.yaml | 2 +-
.../samples/cluster-example-with-backup.yaml | 2 +-
.../cluster-example-with-volume-snapshot.yaml | 32 +
.../cluster-restore-snapshot-full.yaml | 18 +
.../cluster-restore-snapshot-pitr.yaml | 40 +
.../1/samples/cluster-restore-snapshot.yaml | 3 +-
.../1/samples/monitoring/alerts.yaml | 9 +
.../samples/monitoring/grafana-configmap.yaml | 1038 +++-
.../samples/monitoring/grafana-dashboard.json | 80 +-
.../1/samples/monitoring/prometheusrule.yaml | 9 +
.../postgres_for_kubernetes/1/scheduling.mdx | 4 +-
.../postgres_for_kubernetes/1/security.mdx | 17 +-
.../1/ssl_connections.mdx | 2 +-
.../postgres_for_kubernetes/1/storage.mdx | 28 +-
.../1/troubleshooting.mdx | 59 +-
.../1/wal_archiving.mdx | 79 +
scripts/fileProcessor/package-lock.json | 5 +-
scripts/source/package-lock.json | 5 +-
58 files changed, 8041 insertions(+), 2926 deletions(-)
delete mode 100644 product_docs/docs/postgres_for_kubernetes/1/api_reference.md.in
delete mode 100644 product_docs/docs/postgres_for_kubernetes/1/api_reference.mdx
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/appendixes/object_stores.mdx
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/backup.mdx
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/recovery.mdx
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/samples/backup-with-volume-snapshot.yaml
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-replica-from-volume-snapshot.yaml
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-with-volume-snapshot.yaml
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/samples/cluster-restore-snapshot-full.yaml
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/samples/cluster-restore-snapshot-pitr.yaml
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx
diff --git a/product_docs/docs/postgres_for_kubernetes/1/api_reference.md.in b/product_docs/docs/postgres_for_kubernetes/1/api_reference.md.in
deleted file mode 100644
index cde03bde967..00000000000
--- a/product_docs/docs/postgres_for_kubernetes/1/api_reference.md.in
+++ /dev/null
@@ -1,40 +0,0 @@
-# API Reference
-
-EDB Postgres for Kubernetes extends the Kubernetes API defining the following
-custom resources:
-
-- [Backup](#backup)
-- [Cluster](#cluster)
-- [Pooler](#pooler)
-- [ScheduledBackup](#scheduledbackup)
-
-All the resources are defined in the `postgresql.k8s.enterprisedb.io/v1`
-API.
-
-Please refer to the ["Configuration Samples" page](samples.md)" of the
-documentation for examples of usage.
-
-Below you will find a description of the defined resources:
-
-
-
-{{ range $ -}}
-- [{{ .Name -}}](#{{ .Name -}})
-{{ end }}
-
-{{ range $ -}}
-{{ .Anchor }}
-
-## {{ .Name }}
-
-{{ .Doc -}}
-{{ if .Items }}
-
-{{ .TableFieldName }} | {{ .TableFieldDoc }} | {{ .TableFieldRawType }}
-{{ .TableFieldNameDashSize }} | {{ .TableFieldDocDashSize }} | {{ .TableFieldRawTypeDashSize }}
-{{ end }}
-{{- range .Items -}}
-`{{ .Name }}` | {{ .Doc }}{{ if .Mandatory }} - *mandatory* {{ end }} | {{ .RawType }}
-{{ end }}
-{{ end -}}
-
diff --git a/product_docs/docs/postgres_for_kubernetes/1/api_reference.mdx b/product_docs/docs/postgres_for_kubernetes/1/api_reference.mdx
deleted file mode 100644
index c23f4c19fe8..00000000000
--- a/product_docs/docs/postgres_for_kubernetes/1/api_reference.mdx
+++ /dev/null
@@ -1,1149 +0,0 @@
----
-title: 'API Reference'
-originalFilePath: 'src/api_reference.md'
----
-
-EDB Postgres for Kubernetes extends the Kubernetes API defining the following
-custom resources:
-
-- [Backup](#backup)
-- [Cluster](#cluster)
-- [Pooler](#pooler)
-- [ScheduledBackup](#scheduledbackup)
-
-All the resources are defined in the `postgresql.k8s.enterprisedb.io/v1`
-API.
-
-Please refer to the ["Configuration Samples" page](samples.md)" of the
-documentation for examples of usage.
-
-Below you will find a description of the defined resources:
-
-
-
-- [AffinityConfiguration](#AffinityConfiguration)
-- [AzureCredentials](#AzureCredentials)
-- [Backup](#Backup)
-- [BackupConfiguration](#BackupConfiguration)
-- [BackupList](#BackupList)
-- [BackupSource](#BackupSource)
-- [BackupSpec](#BackupSpec)
-- [BackupStatus](#BackupStatus)
-- [BarmanCredentials](#BarmanCredentials)
-- [BarmanObjectStoreConfiguration](#BarmanObjectStoreConfiguration)
-- [BootstrapConfiguration](#BootstrapConfiguration)
-- [BootstrapInitDB](#BootstrapInitDB)
-- [BootstrapPgBaseBackup](#BootstrapPgBaseBackup)
-- [BootstrapRecovery](#BootstrapRecovery)
-- [CertificatesConfiguration](#CertificatesConfiguration)
-- [CertificatesStatus](#CertificatesStatus)
-- [Cluster](#Cluster)
-- [ClusterList](#ClusterList)
-- [ClusterSpec](#ClusterSpec)
-- [ClusterStatus](#ClusterStatus)
-- [ConfigMapKeySelector](#ConfigMapKeySelector)
-- [ConfigMapResourceVersion](#ConfigMapResourceVersion)
-- [DataBackupConfiguration](#DataBackupConfiguration)
-- [DataSource](#DataSource)
-- [EPASConfiguration](#EPASConfiguration)
-- [EmbeddedObjectMetadata](#EmbeddedObjectMetadata)
-- [ExternalCluster](#ExternalCluster)
-- [GoogleCredentials](#GoogleCredentials)
-- [Import](#Import)
-- [ImportSource](#ImportSource)
-- [InstanceID](#InstanceID)
-- [InstanceReportedState](#InstanceReportedState)
-- [LDAPBindAsAuth](#LDAPBindAsAuth)
-- [LDAPBindSearchAuth](#LDAPBindSearchAuth)
-- [LDAPConfig](#LDAPConfig)
-- [LocalObjectReference](#LocalObjectReference)
-- [ManagedConfiguration](#ManagedConfiguration)
-- [ManagedRoles](#ManagedRoles)
-- [Metadata](#Metadata)
-- [MonitoringConfiguration](#MonitoringConfiguration)
-- [NodeMaintenanceWindow](#NodeMaintenanceWindow)
-- [PasswordState](#PasswordState)
-- [PgBouncerIntegrationStatus](#PgBouncerIntegrationStatus)
-- [PgBouncerSecrets](#PgBouncerSecrets)
-- [PgBouncerSpec](#PgBouncerSpec)
-- [PodTemplateSpec](#PodTemplateSpec)
-- [Pooler](#Pooler)
-- [PoolerIntegrations](#PoolerIntegrations)
-- [PoolerList](#PoolerList)
-- [PoolerMonitoringConfiguration](#PoolerMonitoringConfiguration)
-- [PoolerSecrets](#PoolerSecrets)
-- [PoolerSpec](#PoolerSpec)
-- [PoolerStatus](#PoolerStatus)
-- [PostInitApplicationSQLRefs](#PostInitApplicationSQLRefs)
-- [PostgresConfiguration](#PostgresConfiguration)
-- [RecoveryTarget](#RecoveryTarget)
-- [ReplicaClusterConfiguration](#ReplicaClusterConfiguration)
-- [ReplicationSlotsConfiguration](#ReplicationSlotsConfiguration)
-- [ReplicationSlotsHAConfiguration](#ReplicationSlotsHAConfiguration)
-- [RoleConfiguration](#RoleConfiguration)
-- [RollingUpdateStatus](#RollingUpdateStatus)
-- [S3Credentials](#S3Credentials)
-- [ScheduledBackup](#ScheduledBackup)
-- [ScheduledBackupList](#ScheduledBackupList)
-- [ScheduledBackupSpec](#ScheduledBackupSpec)
-- [ScheduledBackupStatus](#ScheduledBackupStatus)
-- [SecretKeySelector](#SecretKeySelector)
-- [SecretVersion](#SecretVersion)
-- [SecretsResourceVersion](#SecretsResourceVersion)
-- [ServiceAccountTemplate](#ServiceAccountTemplate)
-- [StorageConfiguration](#StorageConfiguration)
-- [SyncReplicaElectionConstraints](#SyncReplicaElectionConstraints)
-- [TDEConfiguration](#TDEConfiguration)
-- [Topology](#Topology)
-- [WalBackupConfiguration](#WalBackupConfiguration)
-
-
-
-## AffinityConfiguration
-
-AffinityConfiguration contains the info we need to create the affinity rules for Pods
-
-| Name | Description | Type |
-| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ |
-| `enablePodAntiAffinity ` | Activates anti-affinity for the pods. The operator will define pods anti-affinity unless this field is explicitly set to false | \*bool |
-| `topologyKey ` | TopologyKey to use for anti-affinity configuration. See k8s documentation for more info on that - *mandatory* | string |
-| `nodeSelector ` | NodeSelector is map of key-value pairs used to define the nodes on which the pods can run. More info: | map[string]string |
-| `nodeAffinity ` | NodeAffinity describes node affinity scheduling rules for the pod. More info: | \*corev1.NodeAffinity |
-| `tolerations ` | Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run on tainted nodes. More info: | \[]corev1.Toleration |
-| `podAntiAffinityType ` | PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are added if all the existing nodes don't match the required pod anti-affinity rule. More info: | string |
-| `additionalPodAntiAffinity` | AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. | \*corev1.PodAntiAffinity |
-| `additionalPodAffinity ` | AdditionalPodAffinity allows to specify pod affinity terms to be passed to all the cluster's pods. | \*corev1.PodAffinity |
-
-
-
-## AzureCredentials
-
-AzureCredentials is the type for the credentials to be used to upload files to Azure Blob Storage. The connection string contains every needed information. If the connection string is not specified, we'll need the storage account name and also one (and only one) of:
-
-- storageKey - storageSasToken
-
-- inheriting the credentials from the pod environment by setting inheritFromAzureAD to true
-
-| Name | Description | Type |
-| -------------------- | ----------------------------------------------------------------------------------------------- | ----------------------------------------- |
-| `connectionString ` | The connection string to be used | [\*SecretKeySelector](#SecretKeySelector) |
-| `storageAccount ` | The storage account where to upload data | [\*SecretKeySelector](#SecretKeySelector) |
-| `storageKey ` | The storage account key to be used in conjunction with the storage account name | [\*SecretKeySelector](#SecretKeySelector) |
-| `storageSasToken ` | A shared-access-signature to be used in conjunction with the storage account name | [\*SecretKeySelector](#SecretKeySelector) |
-| `inheritFromAzureAD` | Use the Azure AD based authentication without providing explicitly the keys. - *mandatory* | bool |
-
-
-
-## Backup
-
-Backup is the Schema for the backups API
-
-| Name | Description | Type |
-| ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ |
-| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta) |
-| `spec ` | Specification of the desired behavior of the backup. More info: | [BackupSpec](#BackupSpec) |
-| `status ` | Most recently observed status of the backup. This data may not be up to date. Populated by the system. Read-only. More info: | [BackupStatus](#BackupStatus) |
-
-
-
-## BackupConfiguration
-
-BackupConfiguration defines how the backup of the cluster are taken. Currently the only supported backup method is barmanObjectStore. For details and examples refer to the Backup and Recovery section of the documentation
-
-| Name | Description | Type |
-| ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------- |
-| `barmanObjectStore` | The configuration for the barman-cloud tool suite | [\*BarmanObjectStoreConfiguration](#BarmanObjectStoreConfiguration) |
-| `retentionPolicy ` | RetentionPolicy is the retention policy to be used for backups and WALs (i.e. '60d'). The retention policy is expressed in the form of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - days, weeks, months. | string |
-| `target ` | The policy to decide which instance should perform backups. Available options are empty string, which will default to `prefer-standby` policy, `primary` to have backups run always on primary instances, `prefer-standby` to have backups run preferably on the most updated standby, if available. | BackupTarget |
-
-
-
-## BackupList
-
-BackupList contains a list of Backup
-
-| Name | Description | Type |
-| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------- |
-| `metadata` | Standard list metadata. More info: | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#listmeta-v1-meta) |
-| `items ` | List of backups - *mandatory* | [\[\]Backup](#Backup) |
-
-
-
-## BackupSource
-
-BackupSource contains the backup we need to restore from, plus some information that could be needed to correctly restore it.
-
-| Name | Description | Type |
-| ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------- |
-| `endpointCA` | EndpointCA store the CA bundle of the barman endpoint. Useful when using self-signed certificates to avoid errors with certificate issuer and barman-cloud-wal-archive. | [\*SecretKeySelector](#SecretKeySelector) |
-
-
-
-## BackupSpec
-
-BackupSpec defines the desired state of Backup
-
-| Name | Description | Type |
-| --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------- |
-| `cluster` | The cluster to backup | [LocalObjectReference](#LocalObjectReference) |
-| `target ` | The policy to decide which instance should perform this backup. If empty, it defaults to `cluster.spec.backup.target`. Available options are empty string, `primary` and `prefer-standby`. `primary` to have backups run always on primary instances, `prefer-standby` to have backups run preferably on the most updated standby, if available. | BackupTarget |
-
-
-
-## BackupStatus
-
-BackupStatus defines the observed state of Backup
-
-| Name | Description | Type |
-| ----------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- |
-| `endpointCA ` | EndpointCA store the CA bundle of the barman endpoint. Useful when using self-signed certificates to avoid errors with certificate issuer and barman-cloud-wal-archive. | [\*SecretKeySelector](#SecretKeySelector) |
-| `endpointURL ` | Endpoint to be used to upload data to the cloud, overriding the automatic endpoint discovery | string |
-| `destinationPath` | The path where to store the backup (i.e. s3://bucket/path/to/folder) this path, with different destination folders, will be used for WALs and for data. This may not be populated in case of errors. | string |
-| `serverName ` | The server name on S3, the cluster name is used if this parameter is omitted | string |
-| `encryption ` | Encryption method required to S3 API | string |
-| `backupId ` | The ID of the Barman backup | string |
-| `backupName ` | The Name of the Barman backup | string |
-| `phase ` | The last backup status | BackupPhase |
-| `startedAt ` | When the backup was started | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#time-v1-meta) |
-| `stoppedAt ` | When the backup was terminated | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#time-v1-meta) |
-| `beginWal ` | The starting WAL | string |
-| `endWal ` | The ending WAL | string |
-| `beginLSN ` | The starting xlog | string |
-| `endLSN ` | The ending xlog | string |
-| `error ` | The detected error | string |
-| `commandOutput ` | Unused. Retained for compatibility with old versions. | string |
-| `commandError ` | The backup command output in case of error | string |
-| `instanceID ` | Information to identify the instance where the backup has been taken from | [\*InstanceID](#InstanceID) |
-
-
-
-## BarmanCredentials
-
-BarmanCredentials an object containing the potential credentials for each cloud provider
-
-| Name | Description | Type |
-| ------------------- | ------------------------------------------------------------- | ----------------------------------------- |
-| `googleCredentials` | The credentials to use to upload data to Google Cloud Storage | [\*GoogleCredentials](#GoogleCredentials) |
-| `s3Credentials ` | The credentials to use to upload data to S3 | [\*S3Credentials](#S3Credentials) |
-| `azureCredentials ` | The credentials to use to upload data to Azure Blob Storage | [\*AzureCredentials](#AzureCredentials) |
-
-
-
-## BarmanObjectStoreConfiguration
-
-BarmanObjectStoreConfiguration contains the backup configuration using Barman against an S3-compatible object storage
-
-| Name | Description | Type |
-| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
-| `endpointURL ` | Endpoint to be used to upload data to the cloud, overriding the automatic endpoint discovery | string |
-| `endpointCA ` | EndpointCA store the CA bundle of the barman endpoint. Useful when using self-signed certificates to avoid errors with certificate issuer and barman-cloud-wal-archive | [\*SecretKeySelector](#SecretKeySelector) |
-| `destinationPath` | The path where to store the backup (i.e. s3://bucket/path/to/folder) this path, with different destination folders, will be used for WALs and for data - *mandatory* | string |
-| `serverName ` | The server name on S3, the cluster name is used if this parameter is omitted | string |
-| `wal ` | The configuration for the backup of the WAL stream. When not defined, WAL files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. | [\*WalBackupConfiguration](#WalBackupConfiguration) |
-| `data ` | The configuration to be used to backup the data files When not defined, base backups files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. | [\*DataBackupConfiguration](#DataBackupConfiguration) |
-| `tags ` | Tags is a list of key value pairs that will be passed to the Barman --tags option. | map[string]string |
-| `historyTags ` | HistoryTags is a list of key value pairs that will be passed to the Barman --history-tags option. | map[string]string |
-
-
-
-## BootstrapConfiguration
-
-BootstrapConfiguration contains information about how to create the PostgreSQL cluster. Only a single bootstrap method can be defined among the supported ones. `initdb` will be used as the bootstrap method if left unspecified. Refer to the Bootstrap page of the documentation for more information.
-
-| Name | Description | Type |
-| --------------- | ---------------------------------------------------------------------------------------- | ------------------------------------------------- |
-| `initdb ` | Bootstrap the cluster via initdb | [\*BootstrapInitDB](#BootstrapInitDB) |
-| `recovery ` | Bootstrap the cluster from a backup | [\*BootstrapRecovery](#BootstrapRecovery) |
-| `pg_basebackup` | Bootstrap the cluster taking a physical backup of another compatible PostgreSQL instance | [\*BootstrapPgBaseBackup](#BootstrapPgBaseBackup) |
-
-
-
-## BootstrapInitDB
-
-BootstrapInitDB is the configuration of the bootstrap process when initdb is used Refer to the Bootstrap page of the documentation for more information.
-
-| Name | Description | Type |
-| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------- |
-| `database ` | Name of the database used by the application. Default: `app`. - *mandatory* | string |
-| `owner ` | Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. - *mandatory* | string |
-| `secret ` | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | [\*LocalObjectReference](#LocalObjectReference) |
-| `redwood ` | If we need to enable/disable Redwood compatibility. Requires EPAS and for EPAS defaults to true | \*bool |
-| `options ` | The list of options that must be passed to initdb when creating the cluster. Deprecated: This could lead to inconsistent configurations, please use the explicit provided parameters instead. If defined, explicit values will be ignored. | \[]string |
-| `dataChecksums ` | Whether the `-k` option should be passed to initdb, enabling checksums on data pages (default: `false`) | \*bool |
-| `encoding ` | The value to be passed as option `--encoding` for initdb (default:`UTF8`) | string |
-| `localeCollate ` | The value to be passed as option `--lc-collate` for initdb (default:`C`) | string |
-| `localeCType ` | The value to be passed as option `--lc-ctype` for initdb (default:`C`) | string |
-| `walSegmentSize ` | The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` option for initdb (default: empty, resulting in PostgreSQL default: 16MB) | int |
-| `postInitSQL ` | List of SQL queries to be executed as a superuser immediately after the cluster has been created - to be used with extreme care (by default empty) | \[]string |
-| `postInitApplicationSQL ` | List of SQL queries to be executed as a superuser in the application database right after is created - to be used with extreme care (by default empty) | \[]string |
-| `postInitTemplateSQL ` | List of SQL queries to be executed as a superuser in the `template1` after the cluster has been created - to be used with extreme care (by default empty) | \[]string |
-| `import ` | Bootstraps the new cluster by importing data from an existing PostgreSQL instance using logical backup (`pg_dump` and `pg_restore`) | [\*Import](#Import) |
-| `postInitApplicationSQLRefs` | PostInitApplicationSQLRefs points references to ConfigMaps or Secrets which contain SQL files, the general implementation order to these references is from all Secrets to all ConfigMaps, and inside Secrets or ConfigMaps, the implementation order is same as the order of each array (by default empty) | [\*PostInitApplicationSQLRefs](#PostInitApplicationSQLRefs) |
-
-
-
-## BootstrapPgBaseBackup
-
-BootstrapPgBaseBackup contains the configuration required to take a physical backup of an existing PostgreSQL cluster
-
-| Name | Description | Type |
-| ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------- |
-| `source ` | The name of the server of which we need to take a physical backup - *mandatory* | string |
-| `database` | Name of the database used by the application. Default: `app`. - *mandatory* | string |
-| `owner ` | Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. - *mandatory* | string |
-| `secret ` | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | [\*LocalObjectReference](#LocalObjectReference) |
-
-
-
-## BootstrapRecovery
-
-BootstrapRecovery contains the configuration required to restore from an existing cluster using 3 methodologies: external cluster, volume snapshots or backup objects. Full recovery and Point-In-Time Recovery are supported. The method can be also be used to create clusters in continuous recovery (replica clusters), also supporting cascading replication when `instances` > 1. Once the cluster exits recovery, the password for the superuser will be changed through the provided secret. Refer to the Bootstrap page of the documentation for more information.
-
-| Name | Description | Type |
-| ----------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------- |
-| `backup ` | The backup object containing the physical base backup from which to initiate the recovery procedure. Mutually exclusive with `source` and `volumeSnapshots`. | [\*BackupSource](#BackupSource) |
-| `source ` | The external cluster whose backup we will restore. This is also used as the name of the folder under which the backup is stored, so it must be set to the name of the source cluster Mutually exclusive with `backup` and `volumeSnapshots`. | string |
-| `volumeSnapshots` | The static PVC data source(s) from which to initiate the recovery procedure. Currently supporting `VolumeSnapshot` and `PersistentVolumeClaim` resources that map an existing PVC group, compatible with EDB Postgres for Kubernetes, and taken with a cold backup copy on a fenced Postgres instance (limitation which will be removed in the future when online backup will be implemented). Mutually exclusive with `backup` and `source`. | [\*DataSource](#DataSource) |
-| `recoveryTarget ` | By default, the recovery process applies all the available WAL files in the archive (full recovery). However, you can also end the recovery as soon as a consistent state is reached or recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). More info: | [\*RecoveryTarget](#RecoveryTarget) |
-| `database ` | Name of the database used by the application. Default: `app`. - *mandatory* | string |
-| `owner ` | Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. - *mandatory* | string |
-| `secret ` | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | [\*LocalObjectReference](#LocalObjectReference) |
-
-
-
-## CertificatesConfiguration
-
-CertificatesConfiguration contains the needed configurations to handle server certificates.
-
-| Name | Description | Type |
-| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------- |
-| `serverCASecret ` | The secret containing the Server CA certificate. If not defined, a new secret will be created with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
Contains:
- `ca.crt`: CA that should be used to validate the server certificate, used as `sslrootcert` in client connection strings.
- `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, this can be omitted.
| string |
-| `serverTLSSecret ` | The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. If not defined, ServerCASecret must provide also `ca.key` and a new secret will be created using the provided CA. | string |
-| `replicationTLSSecret` | The secret of type kubernetes.io/tls containing the client certificate to authenticate as the `streaming_replica` user. If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be created using the provided CA. | string |
-| `clientCASecret ` | The secret containing the Client CA certificate. If not defined, a new secret will be created with a self-signed CA and will be used to generate all the client certificates.
Contains:
- `ca.crt`: CA that should be used to validate the client certificates, used as `ssl_ca_file` of all the instances.
- `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, this can be omitted.
| string |
-| `serverAltDNSNames ` | The list of the server alternative DNS names to be added to the generated server TLS certificates, when required. | \[]string |
-
-
-
-## CertificatesStatus
-
-CertificatesStatus contains configuration certificates and related expiration dates.
-
-| Name | Description | Type |
-| ------------- | -------------------------------------- | ----------------- |
-| `expirations` | Expiration dates for all certificates. | map[string]string |
-
-
-
-## Cluster
-
-Cluster is the Schema for the PostgreSQL API
-
-| Name | Description | Type |
-| ---------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ |
-| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta) |
-| `spec ` | Specification of the desired behavior of the cluster. More info: | [ClusterSpec](#ClusterSpec) |
-| `status ` | Most recently observed status of the cluster. This data may not be up to date. Populated by the system. Read-only. More info: | [ClusterStatus](#ClusterStatus) |
-
-
-
-## ClusterList
-
-ClusterList contains a list of Cluster
-
-| Name | Description | Type |
-| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------- |
-| `metadata` | Standard list metadata. More info: | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#listmeta-v1-meta) |
-| `items ` | List of clusters - *mandatory* | [\[\]Cluster](#Cluster) |
-
-
-
-## ClusterSpec
-
-ClusterSpec defines the desired state of Cluster
-
-| Name | Description | Type |
-| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
-| `description ` | Description of this PostgreSQL cluster | string |
-| `inheritedMetadata ` | Metadata that will be inherited by all objects related to the Cluster | [\*EmbeddedObjectMetadata](#EmbeddedObjectMetadata) |
-| `imageName ` | Name of the container image, supporting both tags (`:`) and digests for deterministic and repeatable deployments (`:@sha256:`) | string |
-| `imagePullPolicy ` | Image pull policy. One of `Always`, `Never` or `IfNotPresent`. If not defined, it defaults to `IfNotPresent`. Cannot be updated. More info: | corev1.PullPolicy |
-| `schedulerName ` | If specified, the pod will be dispatched by specified Kubernetes scheduler. If not specified, the pod will be dispatched by the default scheduler. More info: | string |
-| `postgresUID ` | The UID of the `postgres` user inside the image, defaults to `26` | int64 |
-| `postgresGID ` | The GID of the `postgres` user inside the image, defaults to `26` | int64 |
-| `instances ` | Number of instances required in the cluster - *mandatory* | int |
-| `minSyncReplicas ` | Minimum number of instances required in synchronous replication with the primary. Undefined or 0 allow writes to complete when no standby is available. | int |
-| `maxSyncReplicas ` | The target value for the synchronous replication quorum, that can be decreased if the number of ready standbys is lower than this. Undefined or 0 disable synchronous replication. | int |
-| `postgresql ` | Configuration of the PostgreSQL server | [PostgresConfiguration](#PostgresConfiguration) |
-| `replicationSlots ` | Replication slots management configuration | [\*ReplicationSlotsConfiguration](#ReplicationSlotsConfiguration) |
-| `bootstrap ` | Instructions to bootstrap this cluster | [\*BootstrapConfiguration](#BootstrapConfiguration) |
-| `replica ` | Replica cluster configuration | [\*ReplicaClusterConfiguration](#ReplicaClusterConfiguration) |
-| `superuserSecret ` | The secret containing the superuser password. If not defined a new secret will be created with a randomly generated password | [\*LocalObjectReference](#LocalObjectReference) |
-| `enableSuperuserAccess ` | When this option is enabled, the operator will use the `SuperuserSecret` to update the `postgres` user password (if the secret is not present, the operator will automatically create one). When this option is disabled, the operator will ignore the `SuperuserSecret` content, delete it when automatically created, and then blank the password of the `postgres` user by setting it to `NULL`. Enabled by default. | \*bool |
-| `certificates ` | The configuration for the CA and related certificates | [\*CertificatesConfiguration](#CertificatesConfiguration) |
-| `imagePullSecrets ` | The list of pull secrets to be used to pull the images. If the license key contains a pull secret that secret will be automatically included. | [\[\]LocalObjectReference](#LocalObjectReference) |
-| `storage ` | Configuration of the storage of the instances | [StorageConfiguration](#StorageConfiguration) |
-| `serviceAccountTemplate ` | Configure the generation of the service account | [\*ServiceAccountTemplate](#ServiceAccountTemplate) |
-| `walStorage ` | Configuration of the storage for PostgreSQL WAL (Write-Ahead Log) | [\*StorageConfiguration](#StorageConfiguration) |
-| `startDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 30) | int32 |
-| `stopDelay ` | The time in seconds that is allowed for a PostgreSQL instance to gracefully shutdown (default 30) | int32 |
-| `switchoverDelay ` | The time in seconds that is allowed for a primary PostgreSQL instance to gracefully shutdown during a switchover. Default value is 40000000, greater than one year in seconds, big enough to simulate an infinite delay | int32 |
-| `failoverDelay ` | The amount of time (in seconds) to wait before triggering a failover after the primary PostgreSQL instance in the cluster was detected to be unhealthy | int32 |
-| `affinity ` | Affinity/Anti-affinity rules for Pods | [AffinityConfiguration](#AffinityConfiguration) |
-| `topologySpreadConstraints` | TopologySpreadConstraints specifies how to spread matching pods among the given topology. More info: | \[]corev1.TopologySpreadConstraint |
-| `resources ` | Resources requirements of every generated Pod. Please refer to for more information. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#resourcerequirements-v1-core) |
-| `priorityClassName ` | Name of the priority class which will be used in every generated Pod, if the PriorityClass specified does not exist, the pod will not be able to schedule. Please refer to for more information | string |
-| `primaryUpdateStrategy ` | Deployment strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (`unsupervised` - default) or manual (`supervised`) | PrimaryUpdateStrategy |
-| `primaryUpdateMethod ` | Method to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be with a switchover (`switchover`) or in-place (`restart` - default) | PrimaryUpdateMethod |
-| `backup ` | The configuration to be used for backups | [\*BackupConfiguration](#BackupConfiguration) |
-| `nodeMaintenanceWindow ` | Define a maintenance window for the Kubernetes nodes | [\*NodeMaintenanceWindow](#NodeMaintenanceWindow) |
-| `licenseKey ` | The license key of the cluster. When empty, the cluster operates in trial mode and after the expiry date (default 30 days) the operator will cease any reconciliation attempt. For details, please refer to the license agreement that comes with the operator. | string |
-| `licenseKeySecret ` | The reference to the license key. When this is set it take precedence over LicenseKey. | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) |
-| `monitoring ` | The configuration of the monitoring infrastructure of this cluster | [\*MonitoringConfiguration](#MonitoringConfiguration) |
-| `externalClusters ` | The list of external clusters which are used in the configuration | [\[\]ExternalCluster](#ExternalCluster) |
-| `logLevel ` | The instances' log level, one of the following values: error, warning, info (default), debug, trace | string |
-| `projectedVolumeTemplate ` | Template to be used to define projected volumes, projected volumes will be mounted under `/projected` base folder | \*corev1.ProjectedVolumeSource |
-| `env ` | Env follows the Env format to pass environment variables to the pods created in the cluster | \[]corev1.EnvVar |
-| `envFrom ` | EnvFrom follows the EnvFrom format to pass environment variables sources to the pods to be used by Env | \[]corev1.EnvFromSource |
-| `managed ` | The configuration that is used by the portions of PostgreSQL that are managed by the instance manager | [\*ManagedConfiguration](#ManagedConfiguration) |
-| `seccompProfile ` | The SeccompProfile applied to every Pod and Container. Defaults to: `RuntimeDefault` | \*corev1.SeccompProfile |
-
-
-
-## ClusterStatus
-
-ClusterStatus defines the observed state of Cluster
-
-| Name | Description | Type |
-| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------- |
-| `instances ` | The total number of PVC Groups detected in the cluster. It may differ from the number of existing instance pods. | int |
-| `readyInstances ` | The total number of ready instances in the cluster. It is equal to the number of ready instance pods. | int |
-| `instancesStatus ` | InstancesStatus indicates in which status the instances are | map[utils.PodStatus][]string |
-| `instancesReportedState ` | The reported state of the instances during the last reconciliation loop | [map\[PodName\]InstanceReportedState](#InstanceReportedState) |
-| `managedRolesStatus ` | ManagedRolesStatus reports the state of the managed roles in the cluster | [ManagedRoles](#ManagedRoles) |
-| `timelineID ` | The timeline of the Postgres cluster | int |
-| `topology ` | Instances topology. | [Topology](#Topology) |
-| `latestGeneratedNode ` | ID of the latest generated node (used to avoid node name clashing) | int |
-| `currentPrimary ` | Current primary instance | string |
-| `targetPrimary ` | Target primary instance, this is different from the previous one during a switchover or a failover | string |
-| `pvcCount ` | How many PVCs have been created by this cluster | int32 |
-| `jobCount ` | How many Jobs have been created by this cluster | int32 |
-| `danglingPVC ` | List of all the PVCs created by this cluster and still available which are not attached to a Pod | \[]string |
-| `resizingPVC ` | List of all the PVCs that have ResizingPVC condition. | \[]string |
-| `initializingPVC ` | List of all the PVCs that are being initialized by this cluster | \[]string |
-| `healthyPVC ` | List of all the PVCs not dangling nor initializing | \[]string |
-| `unusablePVC ` | List of all the PVCs that are unusable because another PVC is missing | \[]string |
-| `licenseStatus ` | Status of the license | licensekey.Status |
-| `writeService ` | Current write pod | string |
-| `readService ` | Current list of read pods | string |
-| `phase ` | Current phase of the cluster | string |
-| `phaseReason ` | Reason for the current phase | string |
-| `secretsResourceVersion ` | The list of resource versions of the secrets managed by the operator. Every change here is done in the interest of the instance manager, which will refresh the secret data | [SecretsResourceVersion](#SecretsResourceVersion) |
-| `configMapResourceVersion ` | The list of resource versions of the configmaps, managed by the operator. Every change here is done in the interest of the instance manager, which will refresh the configmap data | [ConfigMapResourceVersion](#ConfigMapResourceVersion) |
-| `certificates ` | The configuration for the CA and related certificates, initialized with defaults. | [CertificatesStatus](#CertificatesStatus) |
-| `firstRecoverabilityPoint ` | The first recoverability point, stored as a date in RFC3339 format | string |
-| `lastSuccessfulBackup ` | Stored as a date in RFC3339 format | string |
-| `lastFailedBackup ` | Stored as a date in RFC3339 format | string |
-| `cloudNativePostgresqlCommitHash ` | The commit hash number of which this operator running | string |
-| `currentPrimaryTimestamp ` | The timestamp when the last actual promotion to primary has occurred | string |
-| `currentPrimaryFailingSinceTimestamp` | The timestamp when the primary was detected to be unhealthy This field is reported when spec.failoverDelay is populated or during online upgrades | string |
-| `targetPrimaryTimestamp ` | The timestamp when the last request for a new primary has occurred | string |
-| `poolerIntegrations ` | The integration needed by poolers referencing the cluster | [\*PoolerIntegrations](#PoolerIntegrations) |
-| `cloudNativePostgresqlOperatorHash ` | The hash of the binary of the operator | string |
-| `onlineUpdateEnabled ` | OnlineUpdateEnabled shows if the online upgrade is enabled inside the cluster | bool |
-| `azurePVCUpdateEnabled ` | AzurePVCUpdateEnabled shows if the PVC online upgrade is enabled for this cluster | bool |
-| `conditions ` | Conditions for cluster object | \[]metav1.Condition |
-| `instanceNames ` | List of instance names in the cluster | \[]string |
-
-
-
-## ConfigMapKeySelector
-
-ConfigMapKeySelector contains enough information to let you locate the key of a ConfigMap
-
-| Name | Description | Type |
-| ----- | ------------------------------- | ------ |
-| `key` | The key to select - *mandatory* | string |
-
-
-
-## ConfigMapResourceVersion
-
-ConfigMapResourceVersion is the resource versions of the secrets managed by the operator
-
-| Name | Description | Type |
-| --------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------- |
-| `metrics` | A map with the versions of all the config maps used to pass metrics. Map keys are the config map names, map values are the versions | map[string]string |
-
-
-
-## DataBackupConfiguration
-
-DataBackupConfiguration is the configuration of the backup of the data directory
-
-| Name | Description | Type |
-| --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- |
-| `compression ` | Compress a backup file (a tar file per tablespace) while streaming it to the object store. Available options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. | CompressionType |
-| `encryption ` | Whenever to force the encryption of files (if the bucket is not already configured for that). Allowed options are empty string (use the bucket policy, default), `AES256` and `aws:kms` | EncryptionType |
-| `immediateCheckpoint` | Control whether the I/O workload for the backup initial checkpoint will be limited, according to the `checkpoint_completion_target` setting on the PostgreSQL server. If set to true, an immediate checkpoint will be used, meaning PostgreSQL will complete the checkpoint as soon as possible. `false` by default. | bool |
-| `jobs ` | The number of parallel jobs to be used to upload the backup, defaults to 2 | \*int32 |
-
-
-
-## DataSource
-
-DataSource contains the configuration required to bootstrap a PostgreSQL cluster from an existing storage
-
-| Name | Description | Type |
-| ------------ | ------------------------------------------------------------------------------- | ---------------------------------- |
-| `storage ` | Configuration of the storage of the instances - *mandatory* | corev1.TypedLocalObjectReference |
-| `walStorage` | Configuration of the storage for PostgreSQL WAL (Write-Ahead Log) | \*corev1.TypedLocalObjectReference |
-
-
-
-## EPASConfiguration
-
-EPASConfiguration contains EDB Postgres Advanced Server specific configurations
-
-| Name | Description | Type |
-| ------- | --------------------------------- | --------------------------------------- |
-| `audit` | If true enables edb_audit logging | bool |
-| `tde ` | TDE configuration | [\*TDEConfiguration](#TDEConfiguration) |
-
-
-
-## EmbeddedObjectMetadata
-
-EmbeddedObjectMetadata contains metadata to be inherited by all resources related to a Cluster
-
-| Name | Description | Type |
-| ------------- | ----------- | ----------------- |
-| `labels ` | | map[string]string |
-| `annotations` | | map[string]string |
-
-
-
-## ExternalCluster
-
-ExternalCluster represents the connection parameters to an external cluster which is used in the other sections of the configuration
-
-| Name | Description | Type |
-| ---------------------- | ------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- |
-| `name ` | The server name, required - *mandatory* | string |
-| `connectionParameters` | The list of connection parameters, such as dbname, host, username, etc | map[string]string |
-| `sslCert ` | The reference to an SSL certificate to be used to connect to this instance | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) |
-| `sslKey ` | The reference to an SSL private key to be used to connect to this instance | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) |
-| `sslRootCert ` | The reference to an SSL CA public key to be used to connect to this instance | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) |
-| `password ` | The reference to the password to be used to connect to the server | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) |
-| `barmanObjectStore ` | The configuration for the barman-cloud tool suite | [\*BarmanObjectStoreConfiguration](#BarmanObjectStoreConfiguration) |
-
-
-
-## GoogleCredentials
-
-GoogleCredentials is the type for the Google Cloud Storage credentials. This needs to be specified even if we run inside a GKE environment.
-
-| Name | Description | Type |
-| ------------------------ | -------------------------------------------------------------------------------------------------------- | ----------------------------------------- |
-| `gkeEnvironment ` | If set to true, will presume that it's running inside a GKE environment, default to false. - *mandatory* | bool |
-| `applicationCredentials` | The secret containing the Google Cloud Storage JSON file with the credentials | [\*SecretKeySelector](#SecretKeySelector) |
-
-
-
-## Import
-
-Import contains the configuration to init a database from a logic snapshot of an externalCluster
-
-| Name | Description | Type |
-| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------- |
-| `source ` | The source of the import - *mandatory* | [ImportSource](#ImportSource) |
-| `type ` | The import type. Can be `microservice` or `monolith`. - *mandatory* | SnapshotType |
-| `databases ` | The databases to import - *mandatory* | \[]string |
-| `roles ` | The roles to import | \[]string |
-| `postImportApplicationSQL` | List of SQL queries to be executed as a superuser in the application database right after is imported - to be used with extreme care (by default empty). Only available in microservice type. | \[]string |
-| `schemaOnly ` | When set to true, only the `pre-data` and `post-data` sections of `pg_restore` are invoked, avoiding data import. Default: `false`. | bool |
-
-
-
-## ImportSource
-
-ImportSource describes the source for the logical snapshot
-
-| Name | Description | Type |
-| ----------------- | ------------------------------------------------------------- | ------ |
-| `externalCluster` | The name of the externalCluster used for import - *mandatory* | string |
-
-
-
-## InstanceID
-
-InstanceID contains the information to identify an instance
-
-| Name | Description | Type |
-| ------------- | ---------------- | ------ |
-| `podName ` | The pod name | string |
-| `ContainerID` | The container ID | string |
-
-
-
-## InstanceReportedState
-
-InstanceReportedState describes the last reported state of an instance during a reconciliation loop
-
-| Name | Description | Type |
-| ------------ | ----------------------------------------------------------- | ---- |
-| `isPrimary ` | indicates if an instance is the primary one - *mandatory* | bool |
-| `timeLineID` | indicates on which TimelineId the instance is | int |
-
-
-
-## LDAPBindAsAuth
-
-LDAPBindAsAuth provides the required fields to use the bind authentication for LDAP
-
-| Name | Description | Type |
-| -------- | ----------------------------------------- | ------ |
-| `prefix` | Prefix for the bind authentication option | string |
-| `suffix` | Suffix for the bind authentication option | string |
-
-
-
-## LDAPBindSearchAuth
-
-LDAPBindSearchAuth provides the required fields to use the bind+search LDAP authentication process
-
-| Name | Description | Type |
-| ----------------- | -------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
-| `baseDN ` | Root DN to begin the user search | string |
-| `bindDN ` | DN of the user to bind to the directory | string |
-| `bindPassword ` | Secret with the password for the user to bind to the directory | [\*corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) |
-| `searchAttribute` | Attribute to match against the username | string |
-| `searchFilter ` | Search filter to use when doing the search+bind authentication | string |
-
-
-
-## LDAPConfig
-
-LDAPConfig contains the parameters needed for LDAP authentication
-
-| Name | Description | Type |
-| ---------------- | --------------------------------------------------------------- | ------------------------------------------- |
-| `server ` | LDAP hostname or IP address | string |
-| `port ` | LDAP server port | int |
-| `scheme ` | LDAP schema to be used, possible options are `ldap` and `ldaps` | LDAPScheme |
-| `tls ` | Set to 'true' to enable LDAP over TLS. 'false' is default | bool |
-| `bindAsAuth ` | Bind as authentication configuration | [\*LDAPBindAsAuth](#LDAPBindAsAuth) |
-| `bindSearchAuth` | Bind+Search authentication configuration | [\*LDAPBindSearchAuth](#LDAPBindSearchAuth) |
-
-
-
-## LocalObjectReference
-
-LocalObjectReference contains enough information to let you locate a local object with a known type inside the same namespace
-
-| Name | Description | Type |
-| ------ | ----------------------------------- | ------ |
-| `name` | Name of the referent. - *mandatory* | string |
-
-
-
-## ManagedConfiguration
-
-ManagedConfiguration represents the portions of PostgreSQL that are managed by the instance manager
-
-| Name | Description | Type |
-| ------- | --------------------------------------- | ------------------------------------------- |
-| `roles` | Database roles managed by the `Cluster` | [\[\]RoleConfiguration](#RoleConfiguration) |
-
-
-
-## ManagedRoles
-
-ManagedRoles tracks the status of a cluster's managed roles
-
-| Name | Description | Type |
-| ----------------- | ----------------------------------------------------------------------------------------------------- | -------------------------------------------- |
-| `byStatus ` | ByStatus gives the list of roles in each state | map[RoleStatus][]string |
-| `cannotReconcile` | CannotReconcile lists roles that cannot be reconciled in PostgreSQL, with an explanation of the cause | map[string][]string |
-| `passwordStatus ` | PasswordStatus gives the last transaction id and password secret version for each managed role | [map\[string\]PasswordState](#PasswordState) |
-
-
-
-## Metadata
-
-Metadata is a structure similar to the metav1.ObjectMeta, but still parseable by controller-gen to create a suitable CRD for the user. The comment of PodTemplateSpec has an explanation of why we are not using the core data types.
-
-| Name | Description | Type |
-| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------- |
-| `labels ` | Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: | map[string]string |
-| `annotations` | Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: | map[string]string |
-
-
-
-## MonitoringConfiguration
-
-MonitoringConfiguration is the type containing all the monitoring configuration for a certain cluster
-
-| Name | Description | Type |
-| ------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- |
-| `disableDefaultQueries ` | Whether the default queries should be injected. Set it to `true` if you don't want to inject default queries into the cluster. Default: false. | \*bool |
-| `customQueriesConfigMap` | The list of config maps containing the custom queries | [\[\]ConfigMapKeySelector](#ConfigMapKeySelector) |
-| `customQueriesSecret ` | The list of secrets containing the custom queries | [\[\]SecretKeySelector](#SecretKeySelector) |
-| `enablePodMonitor ` | Enable or disable the `PodMonitor` | bool |
-
-
-
-## NodeMaintenanceWindow
-
-NodeMaintenanceWindow contains information that the operator will use while upgrading the underlying node.
-
-This option is only useful when the chosen storage prevents the Pods from being freely moved across nodes.
-
-| Name | Description | Type |
-| ------------ | ------------------------------------------------------------------------------------------------------------------------------ | ------ |
-| `inProgress` | Is there a node maintenance activity in progress? - *mandatory* | bool |
-| `reusePVC ` | Reuse the existing PVC (wait for the node to come up again) or not (recreate it elsewhere - when `instances` >1) - *mandatory* | \*bool |
-
-
-
-## PasswordState
-
-PasswordState represents the state of the password of a managed RoleConfiguration
-
-| Name | Description | Type |
-| ----------------- | ------------------------------------------------------------------- | ------ |
-| `transactionID ` | the last transaction ID to affect the role definition in PostgreSQL | int64 |
-| `resourceVersion` | the resource version of the password secret | string |
-
-
-
-## PgBouncerIntegrationStatus
-
-PgBouncerIntegrationStatus encapsulates the needed integration for the pgbouncer poolers referencing the cluster
-
-| Name | Description | Type |
-| --------- | ----------- | --------- |
-| `secrets` | | \[]string |
-
-
-
-## PgBouncerSecrets
-
-PgBouncerSecrets contains the versions of the secrets used by pgbouncer
-
-| Name | Description | Type |
-| ----------- | ----------------------------- | ------------------------------- |
-| `authQuery` | The auth query secret version | [SecretVersion](#SecretVersion) |
-
-
-
-## PgBouncerSpec
-
-PgBouncerSpec defines how to configure PgBouncer
-
-| Name | Description | Type |
-| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------- |
-| `poolMode ` | The pool mode - *mandatory* | PgBouncerPoolMode |
-| `authQuerySecret` | The credentials of the user that need to be used for the authentication query. In case it is specified, also an AuthQuery (e.g. "SELECT usename, passwd FROM pg_shadow WHERE usename=$1") has to be specified and no automatic CNP Cluster integration will be triggered. | [\*LocalObjectReference](#LocalObjectReference) |
-| `authQuery ` | The query that will be used to download the hash of the password of a certain user. Default: "SELECT usename, passwd FROM user_search($1)". In case it is specified, also an AuthQuerySecret has to be specified and no automatic CNP Cluster integration will be triggered. | string |
-| `parameters ` | Additional parameters to be passed to PgBouncer - please check the CNP documentation for a list of options you can configure | map[string]string |
-| `pg_hba ` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | \[]string |
-| `paused ` | When set to `true`, PgBouncer will disconnect from the PostgreSQL server, first waiting for all queries to complete, and pause all new client connections until this value is set to `false` (default). Internally, the operator calls PgBouncer's `PAUSE` and `RESUME` commands. | \*bool |
-
-
-
-## PodTemplateSpec
-
-PodTemplateSpec is a structure allowing the user to set a template for Pod generation.
-
-Unfortunately we can't use the corev1.PodTemplateSpec type because the generated CRD won't have the field for the metadata section.
-
-References:
-
-| Name | Description | Type |
-| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------- |
-| `metadata` | Standard object's metadata. More info: | [Metadata](#Metadata) |
-| `spec ` | Specification of the desired behavior of the pod. More info: | corev1.PodSpec |
-
-
-
-## Pooler
-
-Pooler is the Schema for the poolers API
-
-| Name | Description | Type |
-| ---------- | ----------- | ------------------------------------------------------------------------------------------------------------ |
-| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta) |
-| `spec ` | | [PoolerSpec](#PoolerSpec) |
-| `status ` | | [PoolerStatus](#PoolerStatus) |
-
-
-
-## PoolerIntegrations
-
-PoolerIntegrations encapsulates the needed integration for the poolers referencing the cluster
-
-| Name | Description | Type |
-| ---------------------- | ----------- | --------------------------------------------------------- |
-| `pgBouncerIntegration` | | [PgBouncerIntegrationStatus](#PgBouncerIntegrationStatus) |
-
-
-
-## PoolerList
-
-PoolerList contains a list of Pooler
-
-| Name | Description | Type |
-| ---------- | ------------- | -------------------------------------------------------------------------------------------------------- |
-| `metadata` | | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#listmeta-v1-meta) |
-| `items ` | - *mandatory* | [\[\]Pooler](#Pooler) |
-
-
-
-## PoolerMonitoringConfiguration
-
-PoolerMonitoringConfiguration is the type containing all the monitoring configuration for a certain Pooler.
-
-Mirrors the Cluster's MonitoringConfiguration but without the custom queries part for now.
-
-| Name | Description | Type |
-| ------------------ | ---------------------------------- | ---- |
-| `enablePodMonitor` | Enable or disable the `PodMonitor` | bool |
-
-
-
-## PoolerSecrets
-
-PoolerSecrets contains the versions of all the secrets used
-
-| Name | Description | Type |
-| ------------------ | -------------------------------------------- | --------------------------------------- |
-| `serverTLS ` | The server TLS secret version | [SecretVersion](#SecretVersion) |
-| `serverCA ` | The server CA secret version | [SecretVersion](#SecretVersion) |
-| `clientCA ` | The client CA secret version | [SecretVersion](#SecretVersion) |
-| `pgBouncerSecrets` | The version of the secrets used by PgBouncer | [\*PgBouncerSecrets](#PgBouncerSecrets) |
-
-
-
-## PoolerSpec
-
-PoolerSpec defines the desired state of Pooler
-
-| Name | Description | Type |
-| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
-| `cluster ` | This is the cluster reference on which the Pooler will work. Pooler name should never match with any cluster name within the same namespace. - *mandatory* | [LocalObjectReference](#LocalObjectReference) |
-| `type ` | Which instances we must forward traffic to? - *mandatory* | PoolerType |
-| `instances ` | The number of replicas we want - *mandatory* | int32 |
-| `template ` | The template of the Pod to be created | [\*PodTemplateSpec](#PodTemplateSpec) |
-| `pgbouncer ` | The PgBouncer configuration - *mandatory* | [\*PgBouncerSpec](#PgBouncerSpec) |
-| `deploymentStrategy` | The deployment strategy to use for pgbouncer to replace existing pods with new ones | \*appsv1.DeploymentStrategy |
-| `monitoring ` | The configuration of the monitoring infrastructure of this pooler. | [\*PoolerMonitoringConfiguration](#PoolerMonitoringConfiguration) |
-
-
-
-## PoolerStatus
-
-PoolerStatus defines the observed state of Pooler
-
-| Name | Description | Type |
-| ----------- | ----------------------------------------- | --------------------------------- |
-| `secrets ` | The resource version of the config object | [\*PoolerSecrets](#PoolerSecrets) |
-| `instances` | The number of pods trying to be scheduled | int32 |
-
-
-
-## PostInitApplicationSQLRefs
-
-PostInitApplicationSQLRefs points references to ConfigMaps or Secrets which contain SQL files, the general implementation order to these references is from all Secrets to all ConfigMaps, and inside Secrets or ConfigMaps, the implementation order is same as the order of each array
-
-| Name | Description | Type |
-| --------------- | ------------------------------------------------------ | ------------------------------------------------- |
-| `secretRefs ` | SecretRefs holds a list of references to Secrets | [\[\]SecretKeySelector](#SecretKeySelector) |
-| `configMapRefs` | ConfigMapRefs holds a list of references to ConfigMaps | [\[\]ConfigMapKeySelector](#ConfigMapKeySelector) |
-
-
-
-## PostgresConfiguration
-
-PostgresConfiguration defines the PostgreSQL configuration
-
-| Name | Description | Type |
-| ------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
-| `parameters ` | PostgreSQL configuration options (postgresql.conf) | map[string]string |
-| `pg_hba ` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | \[]string |
-| `epas ` | EDB Postgres Advanced Server specific configurations | [\*EPASConfiguration](#EPASConfiguration) |
-| `syncReplicaElectionConstraint` | Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be set up. | [SyncReplicaElectionConstraints](#SyncReplicaElectionConstraints) |
-| `promotionTimeout ` | Specifies the maximum number of seconds to wait when promoting an instance to primary. Default value is 40000000, greater than one year in seconds, big enough to simulate an infinite timeout | int32 |
-| `shared_preload_libraries ` | Lists of shared preload libraries to add to the default ones | \[]string |
-| `ldap ` | Options to specify LDAP configuration | [\*LDAPConfig](#LDAPConfig) |
-
-
-
-## RecoveryTarget
-
-RecoveryTarget allows to configure the moment where the recovery process will stop. All the target options except TargetTLI are mutually exclusive.
-
-| Name | Description | Type |
-| ----------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ |
-| `backupID ` | The ID of the backup from which to start the recovery process. If empty (default) the operator will automatically detect the backup based on targetTime or targetLSN if specified. Otherwise use the latest available backup in chronological order. | string |
-| `targetTLI ` | The target timeline ("latest" or a positive integer) | string |
-| `targetXID ` | The target transaction ID | string |
-| `targetName ` | The target name (to be previously created with `pg_create_restore_point`) | string |
-| `targetLSN ` | The target LSN (Log Sequence Number) | string |
-| `targetTime ` | The target time as a timestamp in the RFC3339 standard | string |
-| `targetImmediate` | End recovery as soon as a consistent state is reached | \*bool |
-| `exclusive ` | Set the target to be exclusive. If omitted, defaults to false, so that in Postgres, `recovery_target_inclusive` will be true | \*bool |
-
-
-
-## ReplicaClusterConfiguration
-
-ReplicaClusterConfiguration encapsulates the configuration of a replica cluster
-
-| Name | Description | Type |
-| --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ |
-| `enabled` | If replica mode is enabled, this cluster will be a replica of an existing cluster. Replica cluster can be created from a recovery object store or via streaming through pg_basebackup. Refer to the Replication page of the documentation for more information. - *mandatory* | bool |
-| `source ` | The name of the external cluster which is the replication origin - *mandatory* | string |
-
-
-
-## ReplicationSlotsConfiguration
-
-ReplicationSlotsConfiguration encapsulates the configuration of replication slots
-
-| Name | Description | Type |
-| ------------------ | ---------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- |
-| `highAvailability` | Replication slots for high availability configuration | [\*ReplicationSlotsHAConfiguration](#ReplicationSlotsHAConfiguration) |
-| `updateInterval ` | Standby will update the status of the local replication slots every `updateInterval` seconds (default 30). | int |
-
-
-
-## ReplicationSlotsHAConfiguration
-
-ReplicationSlotsHAConfiguration encapsulates the configuration of the replication slots that are automatically managed by the operator to control the streaming replication connections with the standby instances for high availability (HA) purposes. Replication slots are a PostgreSQL feature that makes sure that PostgreSQL automatically keeps WAL files in the primary when a streaming client (in this specific case a replica that is part of the HA cluster) gets disconnected.
-
-| Name | Description | Type |
-| ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------ |
-| `enabled ` | If enabled, the operator will automatically manage replication slots on the primary instance and use them in streaming replication connections with all the standby instances that are part of the HA cluster. If disabled (default), the operator will not take advantage of replication slots in streaming connections with the replicas. This feature also controls replication slots in replica cluster, from the designated primary to its cascading replicas. This can only be set at creation time. - *mandatory* | \*bool |
-| `slotPrefix` | Prefix for replication slots managed by the operator for HA. It may only contain lower case letters, numbers, and the underscore character. This can only be set at creation time. By default set to `_cnp_`. | string |
-
-
-
-## RoleConfiguration
-
-RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role with the additional field Ensure specifying whether to ensure the presence or absence of the role in the database
-
-The defaults of the CREATE ROLE command are applied Reference:
-
-| Name | Description | Type |
-| ----------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- |
-| `name ` | Name of the role - *mandatory* | string |
-| `comment ` | Description of the role | string |
-| `ensure ` | Ensure the role is `present` or `absent` - defaults to "present" | EnsureOption |
-| `passwordSecret ` | Secret containing the password of the role (if present) If null, the password will be ignored unless DisablePassword is set | [\*LocalObjectReference](#LocalObjectReference) |
-| `disablePassword` | DisablePassword indicates that a role's password should be set to NULL in Postgres | bool |
-| `superuser ` | Whether the role is a `superuser` who can override all access restrictions within the database - superuser status is dangerous and should be used only when really needed. You must yourself be a superuser to create a new superuser. Defaults is `false`. | bool |
-| `createdb ` | When set to `true`, the role being defined will be allowed to create new databases. Specifying `false` (default) will deny a role the ability to create databases. | bool |
-| `createrole ` | Whether the role will be permitted to create, alter, drop, comment on, change the security label for, and grant or revoke membership in other roles. Default is `false`. | bool |
-| `inherit ` | Whether a role "inherits" the privileges of roles it is a member of. Defaults is `true`. | \*bool |
-| `login ` | Whether the role is allowed to log in. A role having the `login` attribute can be thought of as a user. Roles without this attribute are useful for managing database privileges, but are not users in the usual sense of the word. Default is `false`. | bool |
-| `replication ` | Whether a role is a replication role. A role must have this attribute (or be a superuser) in order to be able to connect to the server in replication mode (physical or logical replication) and in order to be able to create or drop replication slots. A role having the `replication` attribute is a very highly privileged role, and should only be used on roles actually used for replication. Default is `false`. | bool |
-| `bypassrls ` | Whether a role bypasses every row-level security (RLS) policy. Default is `false`. | bool |
-| `connectionLimit` | If the role can log in, this specifies how many concurrent connections the role can make. `-1` (the default) means no limit. | int64 |
-| `validUntil ` | Date and time after which the role's password is no longer valid. When omitted, the password will never expire (default). | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#time-v1-meta) |
-| `inRoles ` | List of one or more existing roles to which this role will be immediately added as a new member. Default empty. | \[]string |
-
-
-
-## RollingUpdateStatus
-
-RollingUpdateStatus contains the information about an instance which is being updated
-
-| Name | Description | Type |
-| ----------- | ------------------------------------------------- | ------------------------------------------------------------------------------------------------ |
-| `imageName` | The image which we put into the Pod - *mandatory* | string |
-| `startedAt` | When the update has been started | [metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#time-v1-meta) |
-
-
-
-## S3Credentials
-
-S3Credentials is the type for the credentials to be used to upload files to S3. It can be provided in two alternative ways:
-
-- explicitly passing accessKeyId and secretAccessKey
-
-- inheriting the role from the pod environment by setting inheritFromIAMRole to true
-
-| Name | Description | Type |
-| -------------------- | -------------------------------------------------------------------------------------- | ----------------------------------------- |
-| `accessKeyId ` | The reference to the access key id | [\*SecretKeySelector](#SecretKeySelector) |
-| `secretAccessKey ` | The reference to the secret access key | [\*SecretKeySelector](#SecretKeySelector) |
-| `region ` | The reference to the secret containing the region name | [\*SecretKeySelector](#SecretKeySelector) |
-| `sessionToken ` | The references to the session key | [\*SecretKeySelector](#SecretKeySelector) |
-| `inheritFromIAMRole` | Use the role based authentication without providing explicitly the keys. - *mandatory* | bool |
-
-
-
-## ScheduledBackup
-
-ScheduledBackup is the Schema for the scheduledbackups API
-
-| Name | Description | Type |
-| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ |
-| `metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta) |
-| `spec ` | Specification of the desired behavior of the ScheduledBackup. More info: | [ScheduledBackupSpec](#ScheduledBackupSpec) |
-| `status ` | Most recently observed status of the ScheduledBackup. This data may not be up to date. Populated by the system. Read-only. More info: | [ScheduledBackupStatus](#ScheduledBackupStatus) |
-
-
-
-## ScheduledBackupList
-
-ScheduledBackupList contains a list of ScheduledBackup
-
-| Name | Description | Type |
-| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------- |
-| `metadata` | Standard list metadata. More info: | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#listmeta-v1-meta) |
-| `items ` | List of clusters - *mandatory* | [\[\]ScheduledBackup](#ScheduledBackup) |
-
-
-
-## ScheduledBackupSpec
-
-ScheduledBackupSpec defines the desired state of ScheduledBackup
-
-| Name | Description | Type |
-| ---------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------- |
-| `suspend ` | If this backup is suspended or not | \*bool |
-| `immediate ` | If the first backup has to be immediately start after creation or not | \*bool |
-| `schedule ` | The schedule does not follow the same format used in Kubernetes CronJobs as it includes an additional seconds specifier, see - *mandatory* | string |
-| `cluster ` | The cluster to backup | [LocalObjectReference](#LocalObjectReference) |
-| `backupOwnerReference` | Indicates which ownerReference should be put inside the created backup resources.
- none: no owner reference for created backup objects (same behavior as before the field was introduced)
- self: sets the Scheduled backup object as owner of the backup
- cluster: set the cluster as owner of the backup
| string |
-| `target ` | The policy to decide which instance should perform this backup. If empty, it defaults to `cluster.spec.backup.target`. Available options are empty string, `primary` and `prefer-standby`. `primary` to have backups run always on primary instances, `prefer-standby` to have backups run preferably on the most updated standby, if available. | BackupTarget |
-
-
-
-## ScheduledBackupStatus
-
-ScheduledBackupStatus defines the observed state of ScheduledBackup
-
-| Name | Description | Type |
-| ------------------ | -------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- |
-| `lastCheckTime ` | The latest time the schedule | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#time-v1-meta) |
-| `lastScheduleTime` | Information when was the last time that backup was successfully scheduled. | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#time-v1-meta) |
-| `nextScheduleTime` | Next time we will run a backup | [\*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#time-v1-meta) |
-
-
-
-## SecretKeySelector
-
-SecretKeySelector contains enough information to let you locate the key of a Secret
-
-| Name | Description | Type |
-| ----- | ------------------------------- | ------ |
-| `key` | The key to select - *mandatory* | string |
-
-
-
-## SecretVersion
-
-SecretVersion contains a secret name and its ResourceVersion
-
-| Name | Description | Type |
-| --------- | --------------------------------- | ------ |
-| `name ` | The name of the secret | string |
-| `version` | The ResourceVersion of the secret | string |
-
-
-
-## SecretsResourceVersion
-
-SecretsResourceVersion is the resource versions of the secrets managed by the operator
-
-| Name | Description | Type |
-| -------------------------- | --------------------------------------------------------------------------------------------------------------------------- | ----------------- |
-| `superuserSecretVersion ` | The resource version of the "postgres" user secret | string |
-| `replicationSecretVersion` | The resource version of the "streaming_replica" user secret | string |
-| `applicationSecretVersion` | The resource version of the "app" user secret | string |
-| `managedRoleSecretVersion` | The resource versions of the managed roles secrets | map[string]string |
-| `caSecretVersion ` | Unused. Retained for compatibility with old versions. | string |
-| `clientCaSecretVersion ` | The resource version of the PostgreSQL client-side CA secret version | string |
-| `serverCaSecretVersion ` | The resource version of the PostgreSQL server-side CA secret version | string |
-| `serverSecretVersion ` | The resource version of the PostgreSQL server-side secret version | string |
-| `barmanEndpointCA ` | The resource version of the Barman Endpoint CA if provided | string |
-| `metrics ` | A map with the versions of all the secrets used to pass metrics. Map keys are the secret names, map values are the versions | map[string]string |
-
-
-
-## ServiceAccountTemplate
-
-ServiceAccountTemplate contains the template needed to generate the service accounts
-
-| Name | Description | Type |
-| ---------- | ------------------------------------------------------------------------------------ | --------------------- |
-| `metadata` | Metadata are the metadata to be used for the generated service account - *mandatory* | [Metadata](#Metadata) |
-
-
-
-## StorageConfiguration
-
-StorageConfiguration is the configuration of the storage of the PostgreSQL instances
-
-| Name | Description | Type |
-| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------- |
-| `storageClass ` | StorageClass to use for database data (`PGDATA`). Applied after evaluating the PVC template, if available. If not specified, generated PVCs will be satisfied by the default storage class | \*string |
-| `size ` | Size of the storage. Required if not already specified in the PVC template. Changes to this field are automatically reapplied to the created PVCs. Size cannot be decreased. | string |
-| `resizeInUseVolumes` | Resize existent PVCs, defaults to true | \*bool |
-| `pvcTemplate ` | Template to be used to generate the Persistent Volume Claim | [\*corev1.PersistentVolumeClaimSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#persistentvolumeclaim-v1-core) |
-
-
-
-## SyncReplicaElectionConstraints
-
-SyncReplicaElectionConstraints contains the constraints for sync replicas election.
-
-For anti-affinity parameters two instances are considered in the same location if all the labels values match.
-
-In future synchronous replica election restriction by name will be supported.
-
-| Name | Description | Type |
-| ------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | --------- |
-| `enabled ` | This flag enables the constraints for sync replicas - *mandatory* | bool |
-| `nodeLabelsAntiAffinity` | A list of node labels values to extract and compare to evaluate if the pods reside in the same topology or not | \[]string |
-
-
-
-## TDEConfiguration
-
-TDEConfiguration contains the Transparent Data Encryption configuration
-
-| Name | Description | Type |
-| ------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ |
-| `enabled ` | True if we want to have TDE enabled | bool |
-| `secretKeyRef ` | Reference to the secret that contains the encryption key | [\*v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) |
-| `wrapCommand ` | WrapCommand is the encrypt command provided by the user | [\*v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) |
-| `unwrapCommand ` | UnwrapCommand is the decryption command provided by the user | [\*v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) |
-| `passphraseCommand` | PassphraseCommand is the command executed to get the passphrase that will be passed to the OpenSSL command to encrypt and decrypt | [\*v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#secretkeyselector-v1-core) |
-
-
-
-## Topology
-
-Topology contains the cluster topology
-
-| Name | Description | Type |
-| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------- |
-| `successfullyExtracted` | SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors in synchronous replica election in case of failures | bool |
-| `instances ` | Instances contains the pod topology of the instances | map[PodName]PodTopologyLabels |
-| `nodesUsed ` | NodesUsed represents the count of distinct nodes accommodating the instances. A value of '1' suggests that all instances are hosted on a single node, implying the absence of High Availability (HA). Ideally, this value should be the same as the number of instances in the Postgres HA cluster, implying shared nothing architecture on the compute side. | int32 |
-
-
-
-## WalBackupConfiguration
-
-WalBackupConfiguration is the configuration of the backup of the WAL stream
-
-| Name | Description | Type |
-| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- |
-| `compression` | Compress a WAL file before sending it to the object store. Available options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. | CompressionType |
-| `encryption ` | Whenever to force the encryption of files (if the bucket is not already configured for that). Allowed options are empty string (use the bucket policy, default), `AES256` and `aws:kms` | EncryptionType |
-| `maxParallel` | Number of WAL files to be either archived in parallel (when the PostgreSQL instance is archiving to a backup object store) or restored in parallel (when a PostgreSQL standby is fetching WAL files from a recovery object store). If not specified, WAL files will be processed one at a time. It accepts a positive integer as a value - with 1 being the minimum accepted value. | int |
\ No newline at end of file
diff --git a/product_docs/docs/postgres_for_kubernetes/1/appendixes/object_stores.mdx b/product_docs/docs/postgres_for_kubernetes/1/appendixes/object_stores.mdx
new file mode 100644
index 00000000000..6f473de7c0d
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/appendixes/object_stores.mdx
@@ -0,0 +1,447 @@
+---
+title: 'Appendix A - Common object stores for backups'
+originalFilePath: 'src/appendixes/object_stores.md'
+---
+
+You can store the [backup](../backup.md) files in any service that is supported
+by the Barman Cloud infrastructure. That is:
+
+- [Amazon S3](#aws-s3)
+- [Microsoft Azure Blob Storage](#azure-blob-storage)
+- [Google Cloud Storage](#google-cloud-storage)
+
+You can also use any compatible implementation of the supported services.
+
+The required setup depends on the chosen storage provider and is
+discussed in the following sections.
+
+## AWS S3
+
+[AWS Simple Storage Service (S3)](https://aws.amazon.com/s3/) is
+a very popular object storage service offered by Amazon.
+
+As far as EDB Postgres for Kubernetes backup is concerned, you can define the permissions to
+store backups in S3 buckets in two ways:
+
+- If EDB Postgres for Kubernetes is running in EKS. you may want to use the
+ [IRSA authentication method](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html)
+- Alternatively, you can use the `ACCESS_KEY_ID` and `ACCESS_SECRET_KEY` credentials
+
+### AWS Access key
+
+You will need the following information about your environment:
+
+- `ACCESS_KEY_ID`: the ID of the access key that will be used
+ to upload files into S3
+
+- `ACCESS_SECRET_KEY`: the secret part of the access key mentioned above
+
+- `ACCESS_SESSION_TOKEN`: the optional session token, in case it is required
+
+The access key used must have permission to upload files into
+the bucket. Given that, you must create a Kubernetes secret with the
+credentials, and you can do that with the following command:
+
+```sh
+kubectl create secret generic aws-creds \
+ --from-literal=ACCESS_KEY_ID= \
+ --from-literal=ACCESS_SECRET_KEY=
+# --from-literal=ACCESS_SESSION_TOKEN= # if required
+```
+
+The credentials will be stored inside Kubernetes and will be encrypted
+if encryption at rest is configured in your installation.
+
+Once that secret has been created, you can configure your cluster like in
+the following example:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+[...]
+spec:
+ backup:
+ barmanObjectStore:
+ destinationPath: ""
+ s3Credentials:
+ accessKeyId:
+ name: aws-creds
+ key: ACCESS_KEY_ID
+ secretAccessKey:
+ name: aws-creds
+ key: ACCESS_SECRET_KEY
+```
+
+The destination path can be any URL pointing to a folder where
+the instance can upload the WAL files, e.g.
+`s3://BUCKET_NAME/path/to/folder`.
+
+### IAM Role for Service Account (IRSA)
+
+In order to use IRSA you need to set an `annotation` in the `ServiceAccount` of
+the Postgres cluster.
+
+We can configure EDB Postgres for Kubernetes to inject them using the `serviceAccountTemplate`
+stanza:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+[...]
+spec:
+ serviceAccountTemplate:
+ metadata:
+ annotations:
+ eks.amazonaws.com/role-arn: arn:[...]
+ [...]
+```
+
+### S3 lifecycle policy
+
+Barman Cloud writes objects to S3, then does not update them until they are
+deleted by the Barman Cloud retention policy. A recommended approach for an S3
+lifecycle policy is to expire the current version of objects a few days longer
+than the Barman retention policy, enable object versioning, and expire
+non-current versions after a number of days. Such a policy protects against
+accidental deletion, and also allows for restricting permissions to the
+EDB Postgres for Kubernetes workload so that it may delete objects from S3 without granting
+permissions to permanently delete objects.
+
+### Other S3-compatible Object Storages providers
+
+In case you're using S3-compatible object storage, like **MinIO** or
+**Linode Object Storage**, you can specify an endpoint instead of using the
+default S3 one.
+
+In this example, it will use the `bucket` of **Linode** in the region
+`us-east1`.
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+[...]
+spec:
+ backup:
+ barmanObjectStore:
+ destinationPath: "s3://bucket/"
+ endpointURL: "https://us-east1.linodeobjects.com"
+ s3Credentials:
+ [...]
+```
+
+In case you're using **Digital Ocean Spaces**, you will have to use the Path-style syntax.
+In this example, it will use the `bucket` from **Digital Ocean Spaces** in the region `SFO3`.
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+[...]
+spec:
+ backup:
+ barmanObjectStore:
+ destinationPath: "s3://[your-bucket-name]/[your-backup-folder]/"
+ endpointURL: "https://sfo3.digitaloceanspaces.com"
+ s3Credentials:
+ [...]
+```
+
+!!! Important
+ Suppose you configure an Object Storage provider which uses a certificate signed with a private CA,
+ like when using OpenShift or MinIO via HTTPS. In that case, you need to set the option `endpointCA`
+ referring to a secret containing the CA bundle so that Barman can verify the certificate correctly.
+
+!!! Note
+ If you want ConfigMaps and Secrets to be **automatically** reloaded by instances, you can
+ add a label with key `k8s.enterprisedb.io/reload` to the Secrets/ConfigMaps. Otherwise, you will have to reload
+ the instances using the `kubectl cnp reload` subcommand.
+
+## Azure Blob Storage
+
+[Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/) is the
+obect storage service provided by Microsoft.
+
+In order to access your storage account for backup and recovery of
+EDB Postgres for Kubernetes managed databases, you will need one of the following
+combinations of credentials:
+
+- [Connection String](https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#configure-a-connection-string-for-an-azure-storage-account)
+- Storage account name and [Storage account access key](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage)
+- Storage account name and [Storage account SAS Token](https://docs.microsoft.com/en-us/azure/storage/blobs/sas-service-create)
+- Storage account name and [Azure AD Workload Identity](https://azure.github.io/azure-workload-identity/docs/introduction.html)
+ properly configured.
+
+Using **Azure AD Workload Identity**, you can avoid saving the credentials into a Kubernetes Secret,
+and have a Cluster configuration adding the `inheritFromAzureAD` as follows:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+[...]
+spec:
+ backup:
+ barmanObjectStore:
+ destinationPath: ""
+ azureCredentials:
+ inheritFromAzureAD: true
+```
+
+On the other side, using both **Storage account access key** or **Storage account SAS Token**,
+the credentials need to be stored inside a Kubernetes Secret, adding data entries only when
+needed. The following command performs that:
+
+```
+kubectl create secret generic azure-creds \
+ --from-literal=AZURE_STORAGE_ACCOUNT= \
+ --from-literal=AZURE_STORAGE_KEY= \
+ --from-literal=AZURE_STORAGE_SAS_TOKEN= \
+ --from-literal=AZURE_STORAGE_CONNECTION_STRING=
+```
+
+The credentials will be encrypted at rest, if this feature is enabled in the used
+Kubernetes cluster.
+
+Given the previous secret, the provided credentials can be injected inside the cluster
+configuration:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+[...]
+spec:
+ backup:
+ barmanObjectStore:
+ destinationPath: ""
+ azureCredentials:
+ connectionString:
+ name: azure-creds
+ key: AZURE_CONNECTION_STRING
+ storageAccount:
+ name: azure-creds
+ key: AZURE_STORAGE_ACCOUNT
+ storageKey:
+ name: azure-creds
+ key: AZURE_STORAGE_KEY
+ storageSasToken:
+ name: azure-creds
+ key: AZURE_STORAGE_SAS_TOKEN
+```
+
+When using the Azure Blob Storage, the `destinationPath` fulfills the following
+structure:
+
+```
+://..core.windows.net/
+```
+
+where `` is `/`. The **account name**,
+which is also called **storage account name**, is included in the used host name.
+
+### Other Azure Blob Storage compatible providers
+
+If you are using a different implementation of the Azure Blob Storage APIs,
+the `destinationPath` will have the following structure:
+
+```
+://://
+```
+
+In that case, `` is the first component of the path.
+
+This is required if you are testing the Azure support via the Azure Storage
+Emulator or [Azurite](https://github.com/Azure/Azurite).
+
+## Google Cloud Storage
+
+Currently, the EDB Postgres for Kubernetes operator supports two authentication methods for
+[Google Cloud Storage](https://cloud.google.com/storage/):
+
+- the first one assumes that the pod is running inside a Google Kubernetes Engine cluster
+- the second one leverages the environment variable `GOOGLE_APPLICATION_CREDENTIALS`
+
+### Running inside Google Kubernetes Engine
+
+When running inside Google Kubernetes Engine you can configure your backups to
+simply rely on [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity),
+without having to set any credentials. In particular, you need to:
+
+- set `.spec.backup.barmanObjectStore.googleCredentials.gkeEnvironment` to `true`
+- set the `iam.gke.io/gcp-service-account` annotation in the `serviceAccountTemplate` stanza
+
+Please use the following example as a reference:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+[...]
+spec:
+ [...]
+ backup:
+ barmanObjectStore:
+ destinationPath: "gs://"
+ googleCredentials:
+ gkeEnvironment: true
+
+ serviceAccountTemplate:
+ metadata:
+ annotations:
+ iam.gke.io/gcp-service-account: [...].iam.gserviceaccount.com
+ [...]
+```
+
+### Using authentication
+
+Following the [instruction from Google](https://cloud.google.com/docs/authentication/getting-started)
+you will get a JSON file that contains all the required information to authenticate.
+
+The content of the JSON file must be provided using a `Secret` that can be created
+with the following command:
+
+```shell
+kubectl create secret generic backup-creds --from-file=gcsCredentials=gcs_credentials_file.json
+```
+
+This will create the `Secret` with the name `backup-creds` to be used in the yaml file like this:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+[...]
+spec:
+ backup:
+ barmanObjectStore:
+ destinationPath: "gs://"
+ googleCredentials:
+ applicationCredentials:
+ name: backup-creds
+ key: gcsCredentials
+```
+
+Now the operator will use the credentials to authenticate against Google Cloud Storage.
+
+!!! Important
+ This way of authentication will create a JSON file inside the container with all the needed
+ information to access your Google Cloud Storage bucket, meaning that if someone gets access to the pod
+ will also have write permissions to the bucket.
+
+## MinIO Gateway
+
+Optionally, you can use MinIO Gateway as a common interface which
+relays backup objects to other cloud storage solutions, like S3 or GCS.
+For more information, please refer to [MinIO official documentation](https://docs.min.io/).
+
+Specifically, the EDB Postgres for Kubernetes cluster can directly point to a local
+MinIO Gateway as an endpoint, using previously created credentials and service.
+
+MinIO secrets will be used by both the PostgreSQL cluster and the MinIO instance.
+Therefore, you must create them in the same namespace:
+
+```sh
+kubectl create secret generic minio-creds \
+ --from-literal=MINIO_ACCESS_KEY= \
+ --from-literal=MINIO_SECRET_KEY=
+```
+
+!!! Note
+ Cloud Object Storage credentials will be used only by MinIO Gateway in this case.
+
+!!! Important
+ In order to allow PostgreSQL to reach MinIO Gateway, it is necessary to create a
+ `ClusterIP` service on port `9000` bound to the MinIO Gateway instance.
+
+For example:
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: minio-gateway-service
+spec:
+ type: ClusterIP
+ ports:
+ - port: 9000
+ targetPort: 9000
+ protocol: TCP
+ selector:
+ app: minio
+```
+
+!!! Warning
+ At the time of writing this documentation, the official
+ [MinIO Operator](https://github.com/minio/minio-operator/issues/71)
+ for Kubernetes does not support the gateway feature. As such, we will use a
+ `deployment` instead.
+
+The MinIO deployment will use cloud storage credentials to upload objects to the
+remote bucket and relay backup files to different locations.
+
+Here is an example using AWS S3 as Cloud Object Storage:
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+[...]
+spec:
+ containers:
+ - name: minio
+ image: minio/minio:RELEASE.2020-06-03T22-13-49Z
+ args:
+ - gateway
+ - s3
+ env:
+ # MinIO access key and secret key
+ - name: MINIO_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: minio-creds
+ key: MINIO_ACCESS_KEY
+ - name: MINIO_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: minio-creds
+ key: MINIO_SECRET_KEY
+ # AWS credentials
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: aws-creds
+ key: ACCESS_KEY_ID
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: aws-creds
+ key: ACCESS_SECRET_KEY
+# Uncomment the below section if session token is required
+# - name: AWS_SESSION_TOKEN
+# valueFrom:
+# secretKeyRef:
+# name: aws-creds
+# key: ACCESS_SESSION_TOKEN
+ ports:
+ - containerPort: 9000
+```
+
+Proceed by configuring MinIO Gateway service as the `endpointURL` in the `Cluster`
+definition, then choose a bucket name to replace `BUCKET_NAME`:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+[...]
+spec:
+ backup:
+ barmanObjectStore:
+ destinationPath: s3://BUCKET_NAME/
+ endpointURL: http://minio-gateway-service:9000
+ s3Credentials:
+ accessKeyId:
+ name: minio-creds
+ key: MINIO_ACCESS_KEY
+ secretAccessKey:
+ name: minio-creds
+ key: MINIO_SECRET_KEY
+ [...]
+```
+
+Verify on `s3://BUCKET_NAME/` the presence of archived WAL files before
+proceeding with a backup.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_for_kubernetes/1/applications.mdx b/product_docs/docs/postgres_for_kubernetes/1/applications.mdx
index afd60986a0d..7fa1fb7615e 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/applications.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/applications.mdx
@@ -68,11 +68,20 @@ PostgreSQL cluster it deploys:
- `[cluster name]-superuser`
- `[cluster name]-app`
-The secrets contain the username, password, and a working
-[`.pgpass file`](https://www.postgresql.org/docs/current/libpq-pgpass.html)
-respectively for the `postgres` user and the *owner* of the database.
+Each secret contain the following:
+
+- username
+- password
+- hostname to the RW service
+- port number
+- database name
+- a working [`.pgpass file`](https://www.postgresql.org/docs/current/libpq-pgpass.html)
+- [uri](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING)
+- [jdbc-uri](https://jdbc.postgresql.org/documentation/use/#connecting-to-the-database)
The `-app` credentials are the ones that should be used by applications
-connecting to the PostgreSQL cluster.
+connecting to the PostgreSQL cluster, and correspond to the user *owning* the
+database.
-The `-superuser` ones are supposed to be used only for administrative purposes.
\ No newline at end of file
+The `-superuser` ones are supposed to be used only for administrative purposes,
+and correspond to the `postgres` user.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx
index 25d13096d61..38e694fe184 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx
@@ -6,6 +6,10 @@ originalFilePath: 'src/architecture.md'
This section covers the main architectural aspects you need to consider
when deploying PostgreSQL in Kubernetes.
+!!! Important
+ We encourage you to read an article that we've written for the CNCF blog
+ with title ["Recommended Architectures for PostgreSQL in Kubernetes"](https://www.cncf.io/blog/2023/09/29/recommended-architectures-for-postgresql-in-kubernetes/).
+
!!! Important
If you are deploying PostgreSQL in a self-managed Kubernetes environment,
please make sure you read the ["Kubernetes architecture"](#kubernetes-architecture)
diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup.mdx
new file mode 100644
index 00000000000..0df10cb2fe3
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/backup.mdx
@@ -0,0 +1,382 @@
+---
+title: 'Backup'
+originalFilePath: 'src/backup.md'
+---
+
+!!! Important
+ With version 1.21, backup and recovery capabilities in EDB Postgres for Kubernetes
+ have sensibly changed due to the introduction of native support for
+ [Kubernetes Volume Snapshots](backup_volumesnapshot.md).
+ Up to that point, backup and recovery were available only for object
+ stores. Please carefully read this section and the [recovery](recovery.md)
+ one if you have been a user of EDB Postgres for Kubernetes 1.15 through 1.20.
+
+PostgreSQL natively provides first class backup and recovery capabilities based
+on file system level (physical) copy. These have been successfully used for
+more than 15 years in mission critical production databases, helping
+organizations all over the world achieve their disaster recovery goals with
+Postgres.
+
+!!! Note
+ There's another way to backup databases in PostgreSQL, through the
+ `pg_dump` utility - which relies on logical backups instead of physical ones.
+ However, logical backups are not suitable for business continuity use cases
+ and as such are not covered by EDB Postgres for Kubernetes (yet, at least).
+ If you want to use the `pg_dump` utility, let yourself be inspired by the
+ ["Troubleshooting / Emergency backup" section](troubleshooting.md#emergency-backup).
+
+In EDB Postgres for Kubernetes, the backup infrastructure for each PostgreSQL cluster is made
+up of the following resources:
+
+- **WAL archive**: a location containing the WAL files (transactional logs)
+ that are continuously written by Postgres and archived for data durability
+- **Physical base backups**: a copy of all the files that PostgreSQL uses to
+ store the data in the database (primarily the `PGDATA` and any tablespace)
+
+The WAL archive can only be stored on object stores at the moment.
+
+On the other hand, EDB Postgres for Kubernetes supports two ways to store physical base backups:
+
+- on [object stores](backup_barmanobjectstore.md), as tarballs - optionally
+ compressed
+- on [Kubernetes Volume Snapshots](backup_volumesnapshot.md), if supported by
+ the underlying storage class
+
+!!! Important
+ Before choosing your backup strategy with EDB Postgres for Kubernetes, it is important that
+ you take some time to familiarize with some basic concepts, like WAL archive,
+ hot and cold backups.
+
+## WAL archive
+
+The WAL archive in PostgreSQL is at the heart of **continuous backup**, and it
+is fundamental for the following reasons:
+
+- **Hot backups**: the possibility to take physical base backups from any
+ instance in the Postgres cluster (either primary or standby) without shutting
+ down the server; they are also known as online backups
+- **Point in Time recovery** (PITR): to possibility to recover at any point in
+ time from the first available base backup in your system
+
+!!! Warning
+ WAL archive alone is useless. Without a physical base backup, you cannot
+ restore a PostgreSQL cluster.
+
+In general, the presence of a WAL archive enhances the resilience of a
+PostgreSQL cluster, allowing each instance to fetch any required WAL file from
+the archive if needed (normally the WAL archive has higher retention periods
+than any Postgres instance that normally recycles those files).
+
+This use case can also be extended to [replica clusters](replica_cluster.md),
+as they can simply rely on the WAL archive to synchronize across long
+distances, extending disaster recovery goals across different regions.
+
+When you [configure a WAL archive](wal_archiving.md), EDB Postgres for Kubernetes provides
+out-of-the-box an RPO <= 5 minutes for disaster recovery, even across regions.
+
+!!! Important
+ Our recommendation is to always setup the WAL archive in production.
+ There are known use cases - normally involving staging and development
+ environments - where none of the above benefits are needed and the WAL
+ archive is not necessary. RPO in this case can be any value, such as
+ 24 hours (daily backups) or infinite (no backup at all).
+
+## Cold and Hot backups
+
+Hot backups have already been defined in the previous section. They require the
+presence of a WAL archive and they are the norm in any modern database management
+system.
+
+**Cold backups**, also known as offline backups, are instead physical base backups
+taken when the PostgreSQL instance (standby or primary) is shut down. They are
+consistent per definition and they represent a snapshot of the database at the
+time it was shut down.
+
+As a result, PostgreSQL instances can be restarted from a cold backup without
+the need of a WAL archive, even though they can take advantage of it, if
+available (with all the benefits on the recovery side highlighted in the
+previous section).
+
+In those situations with a higher RPO (for example, 1 hour or 24 hours), and
+shorter retention periods, cold backups represent a viable option to be considered
+for your disaster recovery plans.
+
+## Object stores or volume snapshots: which one to use?
+
+In EDB Postgres for Kubernetes, object store based backups:
+
+- always require the WAL archive
+- support hot backup only
+- don't support incremental copy
+- don't support differential copy
+
+VolumeSnapshots instead:
+
+- don't require the WAL archive, although in production it is always recommended
+- support cold backup only (currently)
+- support incremental copy, depending on the underlying storage classes
+- support differential copy, depending on the underlying storage classes
+
+Which one to use depends on your specific requirements and environment,
+including:
+
+- availability of a viable object store solution in your Kubernetes cluster
+- availability of a trusted storage class that supports volume snapshots
+- size of the database: with object stores, the larger your database, the
+ longer backup and, most importantly, recovery procedures take (the latter
+ impacts RTO); in presence of Very Large Databases (VLDB), the general
+ advice is to rely on Volume Snapshots as, thanks to copy-on-write, they
+ provide faster recovery
+- data mobility and possibility to store or relay backup files on a
+ secondary location in a different region, or any subsequent one
+- other factors, mostly based on the confidence and familiarity with the
+ underlying storage solutions
+
+The summary table below highlights some of the main differences between the two
+available methods for storing physical base backups.
+
+| | Object store | Volume Snapshots |
+| --------------------------------- | :----------: | :------------------: |
+| **WAL archiving** | Required | Recommended (1) |
+| **Cold backup** | 𐄂 | ✓ |
+| **Hot backup** | ✓ | 𐄂 (2) |
+| **Incremental copy** | 𐄂 | ✓ (3) |
+| **Differential copy** | 𐄂 | ✓ (3) |
+| **Backup from a standby** | ✓ | ✓ |
+| **Snapshot recovery** | 𐄂 (4) | ✓ |
+| **Point In Time Recovery (PITR)** | ✓ | Requires WAL archive |
+| **Underlying technology** | Barman Cloud | Kubernetes API |
+
+> See the explanation below for the notes in the above table:
+>
+> 1. WAL archive must be on an object store
+> 2. Hot backup is not available yet for volume snapshots, and it is planned
+> for version 1.22; however, considering that cold backup is taken by fencing
+> temporarily a standby, the operation does not induce any downtime for your
+> write applications
+> 3. If supported by the underlying storage classes of the PostgreSQL volumes
+> 4. Snapshot recovery can be emulated using the `bootstrap.recovery.recoveryTarget.targetImmediate` option
+
+## Scheduled backups
+
+Scheduled backups are the recommended way to configure your backup strategy in
+EDB Postgres for Kubernetes. They are managed by the `ScheduledBackup` resource.
+
+!!! Info
+ Please refer to [`ScheduledBackupSpec`](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec)
+ in the API reference for a full list of options.
+
+The `schedule` field allows you to define a *six-term cron schedule* specification,
+which includes seconds, as expressed in
+the [Go `cron` package format](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format).
+
+!!! Warning
+ Beware that this format accepts also the `seconds` field, and it is
+ different from the `crontab` format in Unix/Linux systems.
+
+This is an example of a scheduled backup:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: ScheduledBackup
+metadata:
+ name: backup-example
+spec:
+ schedule: "0 0 0 * * *"
+ backupOwnerReference: self
+ cluster:
+ name: pg-backup
+```
+
+The above example will schedule a backup every day at midnight because the schedule
+specifies zero for the second, minute, and hour, while specifying wildcard, meaning all,
+for day of the month, month, and day of the week.
+
+In Kubernetes CronJobs, the equivalent expression is `0 0 * * *` because seconds
+are not included.
+
+!!! Hint
+ Backup frequency might impact your recovery time object (RTO) after a
+ disaster which requires a full or Point-In-Time recovery operation. Our
+ advice is that you regularly test your backups by recovering them, and then
+ measuring the time it takes to recover from scratch so that you can refine
+ your RTO predictability. Recovery time is influenced by the size of the
+ base backup and the amount of WAL files that need to be fetched from the archive
+ and replayed during recovery (remember that WAL archiving is what enables
+ continuous backup in PostgreSQL!).
+ Based on our experience, a weekly base backup is more than enough for most
+ cases - while it is extremely rare to schedule backups more frequently than once
+ a day.
+
+You can choose whether to schedule a backup on a defined object store or a
+volume snapshot via the `.spec.method` attribute, by default set to
+`barmanObjectStore`. If you have properly defined
+[volume snapshots](backup_volumesnapshot.md#how-to-configure-volume-snapshot-backups)
+in the `backup` stanza of the cluster, you can set `method: volumeSnapshot`
+to start scheduling base backups on volume snapshots.
+
+ScheduledBackups can be suspended, if needed, by setting `.spec.suspend: true`.
+This will stop any new backup from being scheduled until the option is removed
+or set back to `false`.
+
+In case you want to issue a backup as soon as the ScheduledBackup resource is created
+you can set `.spec.immediate: true`.
+
+!!! Note
+ `.spec.backupOwnerReference` indicates which ownerReference should be put inside
+ the created backup resources.
+
+ - *none:* no owner reference for created backup objects (same behavior as before the field was introduced)
+ - *self:* sets the Scheduled backup object as owner of the backup
+ - *cluster:* set the cluster as owner of the backup
+
+## On-demand backups
+
+!!! Info
+ Please refer to [`BackupSpec`](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-BackupSpec)
+ in the API reference for a full list of options.
+
+To request a new backup, you need to create a new `Backup` resource
+like the following one:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Backup
+metadata:
+ name: backup-example
+spec:
+ method: barmanObjectStore
+ cluster:
+ name: pg-backup
+```
+
+In this case, the operator will start to orchestrate the cluster to take the
+required backup on an object store, using `barman-cloud-backup`. You can check
+the backup status using the plain `kubectl describe backup ` command:
+
+```text
+Name: backup-example
+Namespace: default
+Labels:
+Annotations: API Version: postgresql.k8s.enterprisedb.io/v1
+Kind: Backup
+Metadata:
+ Creation Timestamp: 2020-10-26T13:57:40Z
+ Self Link: /apis/postgresql.k8s.enterprisedb.io/v1/namespaces/default/backups/backup-example
+ UID: ad5f855c-2ffd-454a-a157-900d5f1f6584
+Spec:
+ Cluster:
+ Name: pg-backup
+Status:
+ Phase: running
+ Started At: 2020-10-26T13:57:40Z
+Events:
+```
+
+When the backup has been completed, the phase will be `completed`
+like in the following example:
+
+```text
+Name: backup-example
+Namespace: default
+Labels:
+Annotations: API Version: postgresql.k8s.enterprisedb.io/v1
+Kind: Backup
+Metadata:
+ Creation Timestamp: 2020-10-26T13:57:40Z
+ Self Link: /apis/postgresql.k8s.enterprisedb.io/v1/namespaces/default/backups/backup-example
+ UID: ad5f855c-2ffd-454a-a157-900d5f1f6584
+Spec:
+ Cluster:
+ Name: pg-backup
+Status:
+ Backup Id: 20201026T135740
+ Destination Path: s3://backups/
+ Endpoint URL: http://minio:9000
+ Phase: completed
+ s3Credentials:
+ Access Key Id:
+ Key: ACCESS_KEY_ID
+ Name: minio
+ Secret Access Key:
+ Key: ACCESS_SECRET_KEY
+ Name: minio
+ Server Name: pg-backup
+ Started At: 2020-10-26T13:57:40Z
+ Stopped At: 2020-10-26T13:57:44Z
+Events:
+```
+
+!!!Important
+ This feature will not backup the secrets for the superuser and the
+ application user. The secrets are supposed to be backed up as part of
+ the standard backup procedures for the Kubernetes cluster.
+
+## Backup from a standby
+
+
+
+Taking a base backup requires to scrape the whole data content of the
+PostgreSQL instance on disk, possibly resulting in I/O contention with the
+actual workload of the database.
+
+For this reason, EDB Postgres for Kubernetes allows you to take advantage of a
+feature which is directly available in PostgreSQL: **backup from a standby**.
+
+By default, backups will run on the most aligned replica of a `Cluster`. If
+no replicas are available, backups will run on the primary instance.
+
+!!! Info
+ Although the standby might not always be up to date with the primary,
+ in the time continuum from the first available backup to the last
+ archived WAL this is normally irrelevant. The base backup indeed
+ represents the starting point from which to begin a recovery operation,
+ including PITR. Similarly to what happens with
+ [`pg_basebackup`](https://www.postgresql.org/docs/current/app-pgbasebackup.html),
+ when backing up from an online standby we do not force a switch of the WAL on the
+ primary. This might produce unexpected results in the short term (before
+ `archive_timeout` kicks in) in deployments with low write activity.
+
+If you prefer to always run backups on the primary, you can set the backup
+target to `primary` as outlined in the example below:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ [...]
+spec:
+ backup:
+ target: "primary"
+```
+
+!!! Warning
+ Beware of setting the target to primary when performing a cold backup
+ with volume snapshots, as this will shut down the primary for
+ the time needed to take the snapshot, impacting write operations.
+ This also applies to taking a cold backup in a single-instance cluster, even
+ if you did not explicitly set the primary as the target.
+
+When the backup target is set to `prefer-standby`, such policy will ensure
+backups are run on the most up-to-date available secondary instance, or if no
+other instance is available, on the primary instance.
+
+By default, when not otherwise specified, target is automatically set to take
+backups from a standby.
+
+The backup target specified in the `Cluster` can be overridden in the `Backup`
+and `ScheduledBackup` types, like in the following example:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Backup
+metadata:
+ [...]
+spec:
+ cluster:
+ name: [...]
+ target: "primary"
+```
+
+In the previous example, EDB Postgres for Kubernetes will invariably choose the primary
+instance even if the `Cluster` is set to prefer replicas.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx
new file mode 100644
index 00000000000..db1a990242a
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx
@@ -0,0 +1,152 @@
+---
+title: 'Backup on object stores'
+originalFilePath: 'src/backup_barmanobjectstore.md'
+---
+
+EDB Postgres for Kubernetes natively supports **online/hot backup** of PostgreSQL
+clusters through continuous physical backup and WAL archiving on an object
+store. This means that the database is always up (no downtime required)
+and that Point In Time Recovery is available.
+
+The operator can orchestrate a continuous backup infrastructure
+that is based on the [Barman Cloud](https://pgbarman.org) tool. Instead
+of using the classical architecture with a Barman server, which
+backs up many PostgreSQL instances, the operator relies on the
+`barman-cloud-wal-archive`, `barman-cloud-check-wal-archive`,
+`barman-cloud-backup`, `barman-cloud-backup-list`, and
+`barman-cloud-backup-delete` tools. As a result, base backups will
+be *tarballs*. Both base backups and WAL files can be compressed
+and encrypted.
+
+For this, it is required to use an image with `barman-cli-cloud` included.
+You can use the image `quay.io/enterprisedb/postgresql` for this scope,
+as it is composed of a community PostgreSQL image and the latest
+`barman-cli-cloud` package.
+
+!!! Important
+ Always ensure that you are running the latest version of the operands
+ in your system to take advantage of the improvements introduced in
+ Barman cloud (as well as improve the security aspects of your cluster).
+
+A backup is performed from a primary or a designated primary instance in a
+`Cluster` (please refer to
+[replica clusters](replica_cluster.md)
+for more information about designated primary instances), or alternatively
+on a [standby](#backup-from-a-standby).
+
+## Common object stores
+
+If you are looking for a specific object store such as
+[AWS S3](appendixes/object_stores.md#aws-s3),
+[Microsoft Azure Blob Storage](appendixes/object_stores.md#azure-blob-storage),
+[Google Cloud Storage](appendixes/object_stores.md#google-cloud-storage), or
+[MinIO Gateway](appendixes/object_stores.md#minio-gateway), or a compatible
+provider, please refer to [Appendix A - Common object stores](appendixes/object_stores.md).
+
+## Retention policies
+
+!!! Important
+ Retention policies are not currently available on volume snapshots.
+
+EDB Postgres for Kubernetes can manage the automated deletion of backup files from
+the backup object store, using **retention policies** based on the recovery
+window.
+
+Internally, the retention policy feature uses `barman-cloud-backup-delete`
+with `--retention-policy “RECOVERY WINDOW OF {{ retention policy value }} {{ retention policy unit }}”`.
+
+For example, you can define your backups with a retention policy of 30 days as
+follows:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+[...]
+spec:
+ backup:
+ barmanObjectStore:
+ destinationPath: ""
+ s3Credentials:
+ accessKeyId:
+ name: aws-creds
+ key: ACCESS_KEY_ID
+ secretAccessKey:
+ name: aws-creds
+ key: ACCESS_SECRET_KEY
+ retentionPolicy: "30d"
+```
+
+!!! Note "There's more ..."
+ The **recovery window retention policy** is focused on the concept of
+ *Point of Recoverability* (`PoR`), a moving point in time determined by
+ `current time - recovery window`. The *first valid backup* is the first
+ available backup before `PoR` (in reverse chronological order).
+ EDB Postgres for Kubernetes must ensure that we can recover the cluster at
+ any point in time between `PoR` and the latest successfully archived WAL
+ file, starting from the first valid backup. Base backups that are older
+ than the first valid backup will be marked as *obsolete* and permanently
+ removed after the next backup is completed.
+
+## Compression algorithms
+
+EDB Postgres for Kubernetes by default archives backups and WAL files in an
+uncompressed fashion. However, it also supports the following compression
+algorithms via `barman-cloud-backup` (for backups) and
+`barman-cloud-wal-archive` (for WAL files):
+
+- bzip2
+- gzip
+- snappy
+
+The compression settings for backups and WALs are independent. See the
+[DataBackupConfiguration](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-DataBackupConfiguration) and
+[WALBackupConfiguration](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-WalBackupConfiguration) sections in
+the API reference.
+
+It is important to note that archival time, restore time, and size change
+between the algorithms, so the compression algorithm should be chosen according
+to your use case.
+
+The Barman team has performed an evaluation of the performance of the supported
+algorithms for Barman Cloud. The following table summarizes a scenario where a
+backup is taken on a local MinIO deployment. The Barman GitHub project includes
+a [deeper analysis](https://github.com/EnterpriseDB/barman/issues/344#issuecomment-992547396).
+
+| Compression | Backup Time (ms) | Restore Time (ms) | Uncompressed size (MB) | Compressed size (MB) | Approx ratio |
+| ----------- | ---------------- | ----------------- | ---------------------- | -------------------- | ------------ |
+| None | 10927 | 7553 | 395 | 395 | 1:1 |
+| bzip2 | 25404 | 13886 | 395 | 67 | 5.9:1 |
+| gzip | 116281 | 3077 | 395 | 91 | 4.3:1 |
+| snappy | 8134 | 8341 | 395 | 166 | 2.4:1 |
+
+## Tagging of backup objects
+
+Barman 2.18 introduces support for tagging backup resources when saving them in
+object stores via `barman-cloud-backup` and `barman-cloud-wal-archive`. As a
+result, if your PostgreSQL container image includes Barman with version 2.18 or
+higher, EDB Postgres for Kubernetes enables you to specify tags as key-value pairs
+for backup objects, namely base backups, WAL files and history files.
+
+You can use two properties in the `.spec.backup.barmanObjectStore` definition:
+
+- `tags`: key-value pair tags to be added to backup objects and archived WAL
+ file in the backup object store
+- `historyTags`: key-value pair tags to be added to archived history files in
+ the backup object store
+
+The excerpt of a YAML manifest below provides an example of usage of this
+feature:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+[...]
+spec:
+ backup:
+ barmanObjectStore:
+ [...]
+ tags:
+ backupRetentionPolicy: "expire"
+ historyTags:
+ backupRetentionPolicy: "keep"
+```
\ No newline at end of file
diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup_recovery.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup_recovery.mdx
index c891acbeded..42d7a0d33fb 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/backup_recovery.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/backup_recovery.mdx
@@ -3,915 +3,12 @@ title: 'Backup and Recovery'
originalFilePath: 'src/backup_recovery.md'
---
-EDB Postgres for Kubernetes natively supports **online/hot backup** of PostgreSQL
-clusters through continuous physical backup and WAL archiving.
-This means that the database is always up (no downtime required)
-and that you can recover at any point in time from the first
-available base backup in your system. The latter is normally
-referred to as "Point In Time Recovery" (PITR).
+Until EDB Postgres for Kubernetes 1.20, this page used to contain both the backup and
+recovery phases of a PostgreSQL cluster. The reason was that EDB Postgres for Kubernetes
+supported only backup and recovery object stores.
-The operator can orchestrate a continuous backup infrastructure
-that is based on the [Barman](https://pgbarman.org) tool. Instead
-of using the classical architecture with a Barman server, which
-backs up many PostgreSQL instances, the operator relies on the
-`barman-cloud-wal-archive`, `barman-cloud-check-wal-archive`,
-`barman-cloud-backup`, `barman-cloud-backup-list`, and
-`barman-cloud-backup-delete` tools. As a result, base backups will
-be *tarballs*. Both base backups and WAL files can be compressed
-and encrypted.
+Version 1.21 introduces support for the Kubernetes `VolumeSnapshot` API,
+providing more possibilities for the end user.
-For this, it is required to use an image with `barman-cli-cloud` included.
-You can use the image `quay.io/enterprisedb/postgresql` for this scope,
-as it is composed of a community PostgreSQL image and the latest
-`barman-cli-cloud` package.
-
-!!! Important
- Always ensure that you are running the latest version of the operands
- in your system to take advantage of the improvements introduced in
- Barman cloud (as well as improve the security aspects of your cluster).
-
-A backup is performed from a primary or a designated primary instance in a
-`Cluster` (please refer to
-[replica clusters](replica_cluster.md)
-for more information about designated primary instances), or alternatively
-on a [standby](#backup-from-a-standby).
-
-## Cloud provider support
-
-You can archive the backup files in any service that is supported
-by the Barman Cloud infrastructure. That is:
-
-- [AWS S3](https://aws.amazon.com/s3/)
-- [Microsoft Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/)
-- [Google Cloud Storage](https://cloud.google.com/storage/)
-
-You can also use any compatible implementation of the
-supported services.
-
-The required setup depends on the chosen storage provider and is
-discussed in the following sections.
-
-### S3
-
-You can define the permissions to store backups in S3 buckets in two ways:
-
-- If EDB Postgres for Kubernetes is running in EKS. you may want to use the
- [IRSA authentication method](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html)
-- Alternatively, you can use the `ACCESS_KEY_ID` and `ACCESS_SECRET_KEY` credentials
-
-#### AWS Access key
-
-You will need the following information about your environment:
-
-- `ACCESS_KEY_ID`: the ID of the access key that will be used
- to upload files into S3
-
-- `ACCESS_SECRET_KEY`: the secret part of the access key mentioned above
-
-- `ACCESS_SESSION_TOKEN`: the optional session token, in case it is required
-
-The access key used must have permission to upload files into
-the bucket. Given that, you must create a Kubernetes secret with the
-credentials, and you can do that with the following command:
-
-```sh
-kubectl create secret generic aws-creds \
- --from-literal=ACCESS_KEY_ID= \
- --from-literal=ACCESS_SECRET_KEY=
-# --from-literal=ACCESS_SESSION_TOKEN= # if required
-```
-
-The credentials will be stored inside Kubernetes and will be encrypted
-if encryption at rest is configured in your installation.
-
-Once that secret has been created, you can configure your cluster like in
-the following example:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-[...]
-spec:
- backup:
- barmanObjectStore:
- destinationPath: ""
- s3Credentials:
- accessKeyId:
- name: aws-creds
- key: ACCESS_KEY_ID
- secretAccessKey:
- name: aws-creds
- key: ACCESS_SECRET_KEY
-```
-
-The destination path can be any URL pointing to a folder where
-the instance can upload the WAL files, e.g.
-`s3://BUCKET_NAME/path/to/folder`.
-
-#### IAM Role for Service Account (IRSA)
-
-In order to use IRSA you need to set an `annotation` in the `ServiceAccount` of
-the Postgres cluster.
-
-We can configure EDB Postgres for Kubernetes to inject them using the `serviceAccountTemplate`
-stanza:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-metadata:
-[...]
-spec:
- serviceAccountTemplate:
- metadata:
- annotations:
- eks.amazonaws.com/role-arn: arn:[...]
- [...]
-```
-
-### Other S3-compatible Object Storages providers
-
-In case you're using S3-compatible object storage, like **MinIO** or
-**Linode Object Storage**, you can specify an endpoint instead of using the
-default S3 one.
-
-In this example, it will use the `bucket` of **Linode** in the region
-`us-east1`.
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-[...]
-spec:
- backup:
- barmanObjectStore:
- destinationPath: ""
- endpointURL: "https://bucket.us-east1.linodeobjects.com"
- s3Credentials:
- [...]
-```
-
-In case you're using **Digital Ocean Spaces**, you will have to use the Path-style syntax.
-In this example, it will use the `bucket` from **Digital Ocean Spaces** in the region `SFO3`.
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-[...]
-spec:
- backup:
- barmanObjectStore:
- destinationPath: "s3://[your-bucket-name]/[your-backup-folder]/"
- endpointURL: "https://sfo3.digitaloceanspaces.com"
- s3Credentials:
- [...]
-```
-
-!!! Important
- Suppose you configure an Object Storage provider which uses a certificate signed with a private CA,
- like when using OpenShift or MinIO via HTTPS. In that case, you need to set the option `endpointCA`
- referring to a secret containing the CA bundle so that Barman can verify the certificate correctly.
-
-!!! Note
- If you want ConfigMaps and Secrets to be **automatically** reloaded by instances, you can
- add a label with key `k8s.enterprisedb.io/reload` to the Secrets/ConfigMaps. Otherwise, you will have to reload
- the instances using the `kubectl cnp reload` subcommand.
-
-### MinIO Gateway
-
-Optionally, you can use MinIO Gateway as a common interface which
-relays backup objects to other cloud storage solutions, like S3 or GCS.
-For more information, please refer to [MinIO official documentation](https://docs.min.io/).
-
-Specifically, the EDB Postgres for Kubernetes cluster can directly point to a local
-MinIO Gateway as an endpoint, using previously created credentials and service.
-
-MinIO secrets will be used by both the PostgreSQL cluster and the MinIO instance.
-Therefore, you must create them in the same namespace:
-
-```sh
-kubectl create secret generic minio-creds \
- --from-literal=MINIO_ACCESS_KEY= \
- --from-literal=MINIO_SECRET_KEY=
-```
-
-!!! Note
- Cloud Object Storage credentials will be used only by MinIO Gateway in this case.
-
-!!! Important
- In order to allow PostgreSQL to reach MinIO Gateway, it is necessary to create a
- `ClusterIP` service on port `9000` bound to the MinIO Gateway instance.
-
-For example:
-
-```yaml
-apiVersion: v1
-kind: Service
-metadata:
- name: minio-gateway-service
-spec:
- type: ClusterIP
- ports:
- - port: 9000
- targetPort: 9000
- protocol: TCP
- selector:
- app: minio
-```
-
-!!! Warning
- At the time of writing this documentation, the official
- [MinIO Operator](https://github.com/minio/minio-operator/issues/71)
- for Kubernetes does not support the gateway feature. As such, we will use a
- `deployment` instead.
-
-The MinIO deployment will use cloud storage credentials to upload objects to the
-remote bucket and relay backup files to different locations.
-
-Here is an example using AWS S3 as Cloud Object Storage:
-
-```yaml
-apiVersion: apps/v1
-kind: Deployment
-[...]
-spec:
- containers:
- - name: minio
- image: minio/minio:RELEASE.2020-06-03T22-13-49Z
- args:
- - gateway
- - s3
- env:
- # MinIO access key and secret key
- - name: MINIO_ACCESS_KEY
- valueFrom:
- secretKeyRef:
- name: minio-creds
- key: MINIO_ACCESS_KEY
- - name: MINIO_SECRET_KEY
- valueFrom:
- secretKeyRef:
- name: minio-creds
- key: MINIO_SECRET_KEY
- # AWS credentials
- - name: AWS_ACCESS_KEY_ID
- valueFrom:
- secretKeyRef:
- name: aws-creds
- key: ACCESS_KEY_ID
- - name: AWS_SECRET_ACCESS_KEY
- valueFrom:
- secretKeyRef:
- name: aws-creds
- key: ACCESS_SECRET_KEY
-# Uncomment the below section if session token is required
-# - name: AWS_SESSION_TOKEN
-# valueFrom:
-# secretKeyRef:
-# name: aws-creds
-# key: ACCESS_SESSION_TOKEN
- ports:
- - containerPort: 9000
-```
-
-Proceed by configuring MinIO Gateway service as the `endpointURL` in the `Cluster`
-definition, then choose a bucket name to replace `BUCKET_NAME`:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-[...]
-spec:
- backup:
- barmanObjectStore:
- destinationPath: s3://BUCKET_NAME/
- endpointURL: http://minio-gateway-service:9000
- s3Credentials:
- accessKeyId:
- name: minio-creds
- key: MINIO_ACCESS_KEY
- secretAccessKey:
- name: minio-creds
- key: MINIO_SECRET_KEY
- [...]
-```
-
-Verify on `s3://BUCKET_NAME/` the presence of archived WAL files before
-proceeding with a backup.
-
-### Azure Blob Storage
-
-In order to access your storage account, you will need one of the following combinations
-of credentials:
-
-- [**Connection String**](https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#configure-a-connection-string-for-an-azure-storage-account)
-- **Storage account name** and [**Storage account access key**](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage)
-- **Storage account name** and [**Storage account SAS Token**](https://docs.microsoft.com/en-us/azure/storage/blobs/sas-service-create)
-- **Storage account name** and [**Azure AD Workload Identity**](https://azure.github.io/azure-workload-identity/docs/introduction.html)
- properly configured.
-
-Using **Azure AD Workload Identity**, you can avoid saving the credentials into a Kubernetes Secret,
-and have a Cluster configuration adding the `inheritFromAzureAD` as follows:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-[...]
-spec:
- backup:
- barmanObjectStore:
- destinationPath: ""
- azureCredentials:
- inheritFromAzureAD: true
-```
-
-On the other side, using both **Storage account access key** or **Storage account SAS Token**,
-the credentials need to be stored inside a Kubernetes Secret, adding data entries only when
-needed. The following command performs that:
-
-```
-kubectl create secret generic azure-creds \
- --from-literal=AZURE_STORAGE_ACCOUNT= \
- --from-literal=AZURE_STORAGE_KEY= \
- --from-literal=AZURE_STORAGE_SAS_TOKEN= \
- --from-literal=AZURE_STORAGE_CONNECTION_STRING=
-```
-
-The credentials will be encrypted at rest, if this feature is enabled in the used
-Kubernetes cluster.
-
-Given the previous secret, the provided credentials can be injected inside the cluster
-configuration:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-[...]
-spec:
- backup:
- barmanObjectStore:
- destinationPath: ""
- azureCredentials:
- connectionString:
- name: azure-creds
- key: AZURE_CONNECTION_STRING
- storageAccount:
- name: azure-creds
- key: AZURE_STORAGE_ACCOUNT
- storageKey:
- name: azure-creds
- key: AZURE_STORAGE_KEY
- storageSasToken:
- name: azure-creds
- key: AZURE_STORAGE_SAS_TOKEN
-```
-
-When using the Azure Blob Storage, the `destinationPath` fulfills the following
-structure:
-
-```
-://..core.windows.net/
-```
-
-where `` is `/`. The **account name**,
-which is also called **storage account name**, is included in the used host name.
-
-### Other Azure Blob Storage compatible providers
-
-If you are using a different implementation of the Azure Blob Storage APIs,
-the `destinationPath` will have the following structure:
-
-```
-://://
-```
-
-In that case, `` is the first component of the path.
-
-This is required if you are testing the Azure support via the Azure Storage
-Emulator or [Azurite](https://github.com/Azure/Azurite).
-
-### Google Cloud Storage
-
-Currently, the operator supports two authentication methods for Google Cloud Storage:
-
-- the first one assumes that the pod is running inside a Google Kubernetes Engine cluster
-- the second one leverages the environment variable `GOOGLE_APPLICATION_CREDENTIALS`
-
-#### Running inside Google Kubernetes Engine
-
-When running inside Google Kubernetes Engine you can configure your backups to
-simply rely on [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity),
-without having to set any credentials. In particular, you need to:
-
-- set `.spec.backup.barmanObjectStore.googleCredentials.gkeEnvironment` to `true`
-- set the `iam.gke.io/gcp-service-account` annotation in the `serviceAccountTemplate` stanza
-
-Please use the following example as a reference:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-[...]
-spec:
- [...]
- backup:
- barmanObjectStore:
- destinationPath: "gs://"
- googleCredentials:
- gkeEnvironment: true
-
- serviceAccountTemplate:
- metadata:
- annotations:
- iam.gke.io/gcp-service-account: [...].iam.gserviceaccount.com
- [...]
-```
-
-#### Using authentication
-
-Following the [instruction from Google](https://cloud.google.com/docs/authentication/getting-started)
-you will get a JSON file that contains all the required information to authenticate.
-
-The content of the JSON file must be provided using a `Secret` that can be created
-with the following command:
-
-```shell
-kubectl create secret generic backup-creds --from-file=gcsCredentials=gcs_credentials_file.json
-```
-
-This will create the `Secret` with the name `backup-creds` to be used in the yaml file like this:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-[...]
-spec:
- backup:
- barmanObjectStore:
- destinationPath: "gs://"
- googleCredentials:
- applicationCredentials:
- name: backup-creds
- key: gcsCredentials
-```
-
-Now the operator will use the credentials to authenticate against Google Cloud Storage.
-
-!!! Important
- This way of authentication will create a JSON file inside the container with all the needed
- information to access your Google Cloud Storage bucket, meaning that if someone gets access to the pod
- will also have write permissions to the bucket.
-
-## On-demand backups
-
-To request a new backup, you need to create a new Backup resource
-like the following one:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Backup
-metadata:
- name: backup-example
-spec:
- cluster:
- name: pg-backup
-```
-
-The operator will start to orchestrate the cluster to take the
-required backup using `barman-cloud-backup`. You can check
-the backup status using the plain `kubectl describe backup `
-command:
-
-```text
-Name: backup-example
-Namespace: default
-Labels:
-Annotations: API Version: postgresql.k8s.enterprisedb.io/v1
-Kind: Backup
-Metadata:
- Creation Timestamp: 2020-10-26T13:57:40Z
- Self Link: /apis/postgresql.k8s.enterprisedb.io/v1/namespaces/default/backups/backup-example
- UID: ad5f855c-2ffd-454a-a157-900d5f1f6584
-Spec:
- Cluster:
- Name: pg-backup
-Status:
- Phase: running
- Started At: 2020-10-26T13:57:40Z
-Events:
-```
-
-When the backup has been completed, the phase will be `completed`
-like in the following example:
-
-```text
-Name: backup-example
-Namespace: default
-Labels:
-Annotations: API Version: postgresql.k8s.enterprisedb.io/v1
-Kind: Backup
-Metadata:
- Creation Timestamp: 2020-10-26T13:57:40Z
- Self Link: /apis/postgresql.k8s.enterprisedb.io/v1/namespaces/default/backups/backup-example
- UID: ad5f855c-2ffd-454a-a157-900d5f1f6584
-Spec:
- Cluster:
- Name: pg-backup
-Status:
- Backup Id: 20201026T135740
- Destination Path: s3://backups/
- Endpoint URL: http://minio:9000
- Phase: completed
- s3Credentials:
- Access Key Id:
- Key: ACCESS_KEY_ID
- Name: minio
- Secret Access Key:
- Key: ACCESS_SECRET_KEY
- Name: minio
- Server Name: pg-backup
- Started At: 2020-10-26T13:57:40Z
- Stopped At: 2020-10-26T13:57:44Z
-Events:
-```
-
-!!!Important
- This feature will not backup the secrets for the superuser and the
- application user. The secrets are supposed to be backed up as part of
- the standard backup procedures for the Kubernetes cluster.
-
-## Scheduled backups
-
-You can also schedule your backups periodically by creating a
-resource named `ScheduledBackup`. The latter is similar to a
-`Backup` but with an added field, called `schedule`.
-
-This field is a *cron schedule* specification, which follows the same
-[format used in Kubernetes CronJobs](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format).
-
-This is an example of a scheduled backup:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: ScheduledBackup
-metadata:
- name: backup-example
-spec:
- schedule: "0 0 0 * * *"
- backupOwnerReference: self
- cluster:
- name: pg-backup
-```
-
-The above example will schedule a backup every day at midnight.
-
-!!! Hint
- Backup frequency might impact your recovery time object (RTO) after a
- disaster which requires a full or Point-In-Time recovery operation. Our
- advice is that you regularly test your backups by recovering them, and then
- measuring the time it takes to recover from scratch so that you can refine
- your RTO predictability. Recovery time is influenced by the size of the
- base backup and the amount of WAL files that need to be fetched from the archive
- and replayed during recovery (remember that WAL archiving is what enables
- continuous backup in PostgreSQL!).
- Based on our experience, a weekly base backup is more than enough for most
- cases - while it is extremely rare to schedule backups more frequently than once
- a day.
-
-ScheduledBackups can be suspended if needed by setting `.spec.suspend: true`,
-this will stop any new backup to be scheduled as long as the option is set to false.
-
-In case you want to issue a backup as soon as the ScheduledBackup resource is created
-you can set `.spec.immediate: true`.
-
-!!! Note
- `.spec.backupOwnerReference` indicates which ownerReference should be put inside
- the created backup resources.
-
- - *none:* no owner reference for created backup objects (same behavior as before the field was introduced)
- - *self:* sets the Scheduled backup object as owner of the backup
- - *cluster:* set the cluster as owner of the backup
-
-## WAL archiving
-
-WAL archiving is enabled as soon as you choose a destination path
-and you configure your cloud credentials.
-
-If required, you can choose to compress WAL files as soon as they
-are uploaded and/or encrypt them:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-[...]
-spec:
- backup:
- barmanObjectStore:
- [...]
- wal:
- compression: gzip
- encryption: AES256
-```
-
-You can configure the encryption directly in your bucket, and the operator
-will use it unless you override it in the cluster configuration.
-
-PostgreSQL implements a sequential archiving scheme, where the
-`archive_command` will be executed sequentially for every WAL
-segment to be archived.
-
-!!! Important
- By default, EDB Postgres for Kubernetes sets `archive_timeout` to `5min`, ensuring
- that WAL files, even in case of low workloads, are closed and archived
- at least every 5 minutes, providing a deterministic time-based value for
- your Recovery Point Objective (RPO). Even though you change the value
- of the [`archive_timeout` setting in the PostgreSQL configuration](https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-ARCHIVE-TIMEOUT),
- our experience suggests that the default value set by the operator is
- suitable for most use cases.
-
-When the bandwidth between the PostgreSQL instance and the object
-store allows archiving more than one WAL file in parallel, you
-can use the parallel WAL archiving feature of the instance manager
-like in the following example:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-[...]
-spec:
- backup:
- barmanObjectStore:
- [...]
- wal:
- compression: gzip
- maxParallel: 8
- encryption: AES256
-```
-
-In the previous example, the instance manager optimizes the WAL
-archiving process by archiving in parallel at most eight ready
-WALs, including the one requested by PostgreSQL.
-
-When PostgreSQL will request the archiving of a WAL that has
-already been archived by the instance manager as an optimization,
-that archival request will be just dismissed with a positive status.
-
-## Backup from a standby
-
-Taking a base backup requires to scrape the whole data content of the
-PostgreSQL instance on disk, possibly resulting in I/O contention with the
-actual workload of the database.
-
-For this reason, EDB Postgres for Kubernetes allows you to take advantage of a
-feature which is directly available in PostgreSQL: **backup from a standby**.
-
-By default, backups will run on the most aligned replica of a `Cluster`. If
-no replicas are available, backups will run on the primary instance.
-
-!!! Info
- Although the standby might not always be up to date with the primary,
- in the time continuum from the first available backup to the last
- archived WAL this is normally irrelevant. The base backup indeed
- represents the starting point from which to begin a recovery operation,
- including PITR. Similarly to what happens with
- [`pg_basebackup`](https://www.postgresql.org/docs/current/app-pgbasebackup.html),
- when backing up from a standby we do not force a switch of the WAL on the
- primary. This might produce unexpected results in the short term (before
- `archive_timeout` kicks in) in deployments with low write activity.
-
-If you prefer to always run backups on the primary, you can set the backup
-target to `primary` as outlined in the example below:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-metadata:
- [...]
-spec:
- backup:
- target: "primary"
-```
-
-When the backup target is set to `prefer-standby`, such policy will ensure
-backups are run on the most up-to-date available secondary instance, or if no
-other instance is available, on the primary instance.
-
-By default, when not otherwise specified, target is automatically set to take
-backups from a standby.
-
-The backup target specified in the `Cluster` can be overridden in the `Backup`
-and `ScheduledBackup` types, like in the following example:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Backup
-metadata:
- [...]
-spec:
- cluster:
- name: [...]
- target: "primary"
-```
-
-In the previous example, EDB Postgres for Kubernetes will invariably choose the primary
-instance even if the `Cluster` is set to prefer replicas.
-
-## Recovery
-
-Cluster restores are not performed "in-place" on an existing cluster.
-You can use the data uploaded to the object storage to *bootstrap* a
-new cluster from a previously taken backup.
-The operator will orchestrate the recovery process using the
-`barman-cloud-restore` tool (for the base backup) and the
-`barman-cloud-wal-restore` tool (for WAL files, including parallel support, if
-requested).
-
-For details and instructions on the `recovery` bootstrap method, please refer
-to the ["Bootstrap from a backup" section](bootstrap.md#bootstrap-from-a-backup-recovery).
-
-!!! Important
- If you are not familiar with how [PostgreSQL PITR](https://www.postgresql.org/docs/current/continuous-archiving.html#BACKUP-PITR-RECOVERY)
- works, we suggest that you configure the recovery cluster as the original
- one when it comes to `.spec.postgresql.parameters`. Once the new cluster is
- restored, you can then change the settings as desired.
-
-Under the hood, the operator will inject an init container in the first
-instance of the new cluster, and the init container will start recovering the
-backup from the object storage.
-
-!!! Important
- The duration of the base backup copy in the new PVC depends on
- the size of the backup, as well as the speed of both the network and the
- storage.
-
-When the base backup recovery process is completed, the operator starts the
-Postgres instance in recovery mode: in this phase, PostgreSQL is up, albeit not
-able to accept connections, and the pod is healthy according to the
-liveness probe. Through the `restore_command`, PostgreSQL starts fetching WAL
-files from the archive (you can speed up this phase by setting the
-`maxParallel` option and enable the parallel WAL restore capability).
-
-This phase terminates when PostgreSQL reaches the target (either the end of the
-WAL or the required target in case of Point-In-Time-Recovery). Indeed, you can
-optionally specify a `recoveryTarget` to perform a point in time recovery. If
-left unspecified, the recovery will continue up to the latest available WAL on
-the default target timeline (`current` for PostgreSQL up to 11, `latest` for
-version 12 and above).
-
-Once the recovery is complete, the operator will set the required
-superuser password into the instance. The new primary instance will start
-as usual, and the remaining instances will join the cluster as replicas.
-
-The process is transparent for the user and it is managed by the instance
-manager running in the Pods.
-
-### Restoring into a cluster with a backup section
-
-A manifest for a cluster restore may include a `backup` section.
-This means that the new cluster, after recovery, will start archiving WAL's and
-taking backups if configured to do so.
-
-For example, the section below could be part of a manifest for a Cluster
-bootstrapping from Cluster `cluster-example-backup`, and would create a
-new folder in the storage bucket named `recoveredCluster` where the base backups
-and WAL's of the recovered cluster would be stored.
-
-```yaml
- backup:
- barmanObjectStore:
- destinationPath: s3://backups/
- endpointURL: http://minio:9000
- serverName: "recoveredCluster"
- s3Credentials:
- accessKeyId:
- name: minio
- key: ACCESS_KEY_ID
- secretAccessKey:
- name: minio
- key: ACCESS_SECRET_KEY
- retentionPolicy: "30d"
-
- externalClusters:
- - name: cluster-example-backup
- barmanObjectStore:
- destinationPath: s3://backups/
- endpointURL: http://minio:9000
- s3Credentials:
-```
-
-You should not re-use the exact same `barmanObjectStore` configuration
-for different clusters. There could be cases where the existing information
-in the storage buckets could be overwritten by the new cluster.
-
-!!! Warning
- The operator includes a safety check to ensure a cluster will not
- overwrite a storage bucket that contained information. A cluster that would
- overwrite existing storage will remain in state `Setting up primary` with
- Pods in an Error state.
- The pod logs will show:
- `ERROR: WAL archive check failed for server recoveredCluster: Expected empty archive`
-
-## Retention policies
-
-EDB Postgres for Kubernetes can manage the automated deletion of backup files from
-the backup object store, using **retention policies** based on the recovery
-window.
-
-Internally, the retention policy feature uses `barman-cloud-backup-delete`
-with `--retention-policy “RECOVERY WINDOW OF {{ retention policy value }} {{ retention policy unit }}”`.
-
-For example, you can define your backups with a retention policy of 30 days as
-follows:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-[...]
-spec:
- backup:
- barmanObjectStore:
- destinationPath: ""
- s3Credentials:
- accessKeyId:
- name: aws-creds
- key: ACCESS_KEY_ID
- secretAccessKey:
- name: aws-creds
- key: ACCESS_SECRET_KEY
- retentionPolicy: "30d"
-```
-
-!!! Note "There's more ..."
- The **recovery window retention policy** is focused on the concept of
- *Point of Recoverability* (`PoR`), a moving point in time determined by
- `current time - recovery window`. The *first valid backup* is the first
- available backup before `PoR` (in reverse chronological order).
- EDB Postgres for Kubernetes must ensure that we can recover the cluster at
- any point in time between `PoR` and the latest successfully archived WAL
- file, starting from the first valid backup. Base backups that are older
- than the first valid backup will be marked as *obsolete* and permanently
- removed after the next backup is completed.
-
-## Compression algorithms
-
-EDB Postgres for Kubernetes by default archives backups and WAL files in an
-uncompressed fashion. However, it also supports the following compression
-algorithms via `barman-cloud-backup` (for backups) and
-`barman-cloud-wal-archive` (for WAL files):
-
-- bzip2
-- gzip
-- snappy
-
-The compression settings for backups and WALs are independent. See the
-[DataBackupConfiguration](api_reference.md#DataBackupConfiguration) and
-[WALBackupConfiguration](api_reference.md#WalBackupConfiguration) sections in
-the API reference.
-
-It is important to note that archival time, restore time, and size change
-between the algorithms, so the compression algorithm should be chosen according
-to your use case.
-
-The Barman team has performed an evaluation of the performance of the supported
-algorithms for Barman Cloud. The following table summarizes a scenario where a
-backup is taken on a local MinIO deployment. The Barman GitHub project includes
-a [deeper analysis](https://github.com/EnterpriseDB/barman/issues/344#issuecomment-992547396).
-
-| Compression | Backup Time (ms) | Restore Time (ms) | Uncompressed size (MB) | Compressed size (MB) | Approx ratio |
-| ----------- | ---------------- | ----------------- | ---------------------- | -------------------- | ------------ |
-| None | 10927 | 7553 | 395 | 395 | 1:1 |
-| bzip2 | 25404 | 13886 | 395 | 67 | 5.9:1 |
-| gzip | 116281 | 3077 | 395 | 91 | 4.3:1 |
-| snappy | 8134 | 8341 | 395 | 166 | 2.4:1 |
-
-## Tagging of backup objects
-
-Barman 2.18 introduces support for tagging backup resources when saving them in
-object stores via `barman-cloud-backup` and `barman-cloud-wal-archive`. As a
-result, if your PostgreSQL container image includes Barman with version 2.18 or
-higher, EDB Postgres for Kubernetes enables you to specify tags as key-value pairs
-for backup objects, namely base backups, WAL files and history files.
-
-You can use two properties in the `.spec.backup.barmanObjectStore` definition:
-
-- `tags`: key-value pair tags to be added to backup objects and archived WAL
- file in the backup object store
-- `historyTags`: key-value pair tags to be added to archived history files in
- the backup object store
-
-The excerpt of a YAML manifest below provides an example of usage of this
-feature:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-[...]
-spec:
- backup:
- barmanObjectStore:
- [...]
- tags:
- backupRetentionPolicy: "expire"
- historyTags:
- backupRetentionPolicy: "keep"
-```
\ No newline at end of file
+As a result, [backup](backup.md) and [recovery](recovery.md) are now in two
+separate sections.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx
new file mode 100644
index 00000000000..69e31328380
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx
@@ -0,0 +1,229 @@
+---
+title: 'Backup on volume snapshots'
+originalFilePath: 'src/backup_volumesnapshot.md'
+---
+
+!!! Important
+ The current implementation of volume snapshots in EDB Postgres for Kubernetes
+ supports [cold backup](backup.md#cold-and-hot-backups) only.
+ Hot backup with direct support of
+ [PostgreSQL's low level API for base backups](https://www.postgresql.org/docs/current/continuous-archiving.html#BACKUP-LOWLEVEL-BASE-BACKUP)
+ will be added in version 1.22. Having said this, the current implementation
+ is suitable for production HA environments, as it will by default work on
+ the most aligned standby without impacting the primary.
+
+!!! Warning
+ As noted in the [backup document](backup.md), a cold snapshot explicitly
+ set to target the primary will result in the primary being fenced for
+ the duration of the backup, rendering the cluster read-only during that
+ time.
+
+!!! Warning
+ A volume snapshot backup requires fencing the target instance. For safety,
+ in a cluster already containing fenced instances, a cold snapshot would be
+ rejected.
+
+EDB Postgres for Kubernetes is one of the first known cases of database operators that
+directly leverages the Kubernetes native Volume Snapshot API for both
+backup and recovery operations, in an entirely declarative way.
+
+## About standard Volume Snapshots
+
+Volume snapshotting was first introduced in
+[Kubernetes 1.12 (2018) as alpha](https://kubernetes.io/blog/2018/10/09/introducing-volume-snapshot-alpha-for-kubernetes/),
+promoted to [beta in 1.17 (2019)](https://kubernetes.io/blog/2019/12/09/kubernetes-1-17-feature-cis-volume-snapshot-beta/),
+and [moved to GA in 1.20 (2020)](https://kubernetes.io/blog/2020/12/10/kubernetes-1.20-volume-snapshot-moves-to-ga/).
+It’s now stable, widely available, and standard, providing 3 custom resource
+definitions: `VolumeSnapshot`, `VolumeSnapshotContent` and
+`VolumeSnapshotClass`.
+
+This Kubernetes feature defines a generic interface for:
+
+- the creation of a new volume snapshot, starting from a PVC
+- the deletion of an existing snapshot
+- the creation of a new volume from a snapshot
+
+Kubernetes delegates the actual implementation to the underlying CSI drivers
+(not all of them support volume snapshots). Normally, storage classes that
+provide volume snapshotting support **incremental and differential block level
+backup in a transparent way for the application**, which can delegate the
+complexity and the independent management down the stack, including
+cross-cluster availability of the snapshots.
+
+## Requirements
+
+For Volume Snapshots to work with a EDB Postgres for Kubernetes cluster, you need to ensure
+that each storage class used to dynamically provision the PostgreSQL volumes
+(namely, `storage` and `walStorage` sections) support volume snapshots.
+
+Given that instructions vary from storage class to storage class, please
+refer to the documentation of the specific storage class and related CSI
+drivers you have deployed in your Kubernetes system.
+
+Normally, it is the [`VolumeSnapshotClass`](https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes/)
+that is responsible to ensure that snapshots can be taken from persistent
+volumes of a given storage class, and managed as `VolumeSnapshot` and
+`VolumeSnapshotContent` resources.
+
+!!! Important
+ It is your responsibility to verify with the third party vendor
+ that volume snapshots are supported. EDB Postgres for Kubernetes only interacts
+ with the Kubernetes API on this matter and we cannot support issues
+ at the storage level for each specific CSI driver.
+
+## How to configure Volume Snapshot backups
+
+EDB Postgres for Kubernetes allows you to configure a given Postgres cluster for Volume
+Snapshot backups through the `backup.volumeSnapshot` stanza.
+
+!!! Info
+ Please refer to [`VolumeSnapshotConfiguration`](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-VolumeSnapshotConfiguration)
+ in the API reference for a full list of options.
+
+A generic example with volume snapshots (assuming that PGDATA and WALs share
+the same storage class) is the following:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: snapshot-cluster
+spec:
+ instances: 3
+
+ storage:
+ storageClass: @STORAGE_CLASS@
+ size: 10Gi
+ walStorage:
+ storageClass: @STORAGE_CLASS@
+ size: 10Gi
+
+ backup:
+ # Volume snapshot backups
+ volumeSnapshot:
+ className: @VOLUME_SNAPSHOT_CLASS_NAME@
+ # WAL archive
+ barmanObjectStore:
+ # ...
+```
+
+As you can see, the `backup` section contains both the `volumeSnapshot` stanza
+(controlling physical base backups on volume snapshots) and the
+`barmanObjectStore` one (controlling the [WAL archive](wal_archiving.md)).
+
+!!! Info
+ Once you have defined the `barmanObjectStore`, you can decide to use
+ both volume snapshot and object store backup strategies simultaneously
+ to take physical backups.
+
+The `volumeSnapshot.className` option allows you to reference the default
+`VolumeSnapshotClass` object used for all the storage volumes you have
+defined in your PostgreSQL cluster.
+
+!!! Info
+ In case you are using a different storage class for `PGDATA` and
+ WAL files, you can specify a separate `VolumeSnapshotClass` for
+ that volume through the `walClassName` option (which defaults to
+ the same value as `className`).
+
+Once a cluster is defined for volume snapshot backups, you need to define
+a `ScheduledBackup` resource that requests such backups on a periodic basis.
+
+## Persistence of volume snapshot objects
+
+By default, `VolumeSnapshot` objects created by EDB Postgres for Kubernetes are retained after
+deleting the `Backup` object that originated them, or the `Cluster` they refer to.
+Such behavior is controlled by the `.spec.backup.volumeSnapshot.snapshotOwnerReference`
+option which accepts the following values:
+
+- `none`: no ownership is set, meaning that `VolumeSnapshot` objects persist
+ after the `Backup` and/or the `Cluster` resources are removed
+- `backup`: the `VolumeSnapshot` object is owned by the `Backup` resource that
+ originated it, and when the backup object is removed, the volume snapshot is
+ also removed
+- `cluster`: the `VolumeSnapshot` object is owned by the `Cluster` resource that
+ is backed up, and when the Postgres cluster is removed, the volume snapshot is
+ also removed
+
+In case a `VolumeSnapshot` is deleted, the `deletionPolicy` specified in the
+`VolumeSnapshotContent` is evaluated:
+
+- if set to `Retain`, the `VolumeSnapshotContent` object is kept
+- if set to `Delete`, the `VolumeSnapshotContent` object is removed as well
+
+!!! Warning
+ `VolumeSnapshotContent` objects do not keep all the information regarding the
+ backup and the cluster they refer to (like the annotations and labels that
+ are contained in the `VolumeSnapshot` object). Although possible, restoring
+ from just this kind of object might not be straightforward. For this reason,
+ our recommendation is to always backup the `VolumeSnapshot` definitions,
+ even using a Kubernetes level data protection solution.
+
+The value in `VolumeSnapshotContent` is determined by the `deletionPolicy` set
+in the corresponding `VolumeSnapshotClass` definition, which is
+referenced in the `.spec.backup.volumeSnapshot.className` option.
+
+Please refer to the [Kubernetes documentation on Volume Snapshot Classes](https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes/)
+for details on this standard behavior.
+
+## Example
+
+The following example shows how to configure volume snapshot base backups on an
+EKS cluster on AWS using the `ebs-sc` storage class and the `csi-aws-vsc`
+volume snapshot class.
+
+!!! Important
+ If you are interested in testing the example, please read
+ ["Volume Snapshots" for the Amazon Elastic Block Store (EBS) CSI driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/tree/master/examples/kubernetes/snapshot)
+ for detailed instructions on the installation process for the storage class and the snapshot class.
+
+The following manifest creates a `Cluster` that is ready to be used for volume
+snapshots and that stores the WAL archive in a S3 bucket via IAM role for the
+Service Account (IRSA, see [AWS S3](appendixes/object_stores.md#aws-s3)):
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: hendrix
+spec:
+ instances: 3
+
+ storage:
+ storageClass: ebs-sc
+ size: 10Gi
+ walStorage:
+ storageClass: ebs-sc
+ size: 10Gi
+
+ backup:
+ volumeSnapshot:
+ className: csi-aws-vsc
+ barmanObjectStore:
+ destinationPath: s3://@BUCKET_NAME@/
+ s3Credentials:
+ inheritFromIAMRole: true
+ wal:
+ compression: gzip
+ maxParallel: 2
+
+ serviceAccountTemplate:
+ metadata:
+ annotations:
+ eks.amazonaws.com/role-arn: "@ARN@"
+---
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: ScheduledBackup
+metadata:
+ name: hendrix-vs-backup
+spec:
+ cluster:
+ name: hendrix
+ method: volumeSnapshot
+ schedule: '0 0 0 * * *'
+ backupOwnerReference: cluster
+ immediate: true
+```
+
+The last resource defines daily volume snapshot backups at midnight, requesting
+one immediately after the cluster is created.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx b/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx
index 62e64f500ae..690b336aaa9 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx
@@ -14,7 +14,7 @@ There are primarily two ways to bootstrap a new cluster:
- from scratch (`initdb`)
- from an existing PostgreSQL cluster, either directly (`pg_basebackup`)
- or indirectly (`recovery`)
+ or indirectly through a physical base backup (`recovery`)
The `initdb` bootstrap also offers the possibility to import one or more
databases from an existing Postgres cluster, even outside Kubernetes, and
@@ -40,7 +40,7 @@ For more detailed information about this feature, please refer to the
[Kubernetes' native `VolumeSnapshot` API](https://github.com/cloudnative-pg/cloudnative-pg/issues/2081)
for both incremental and differential copy in backup and recovery
operations - if supported by the underlying storage classes.
- Please see ["Recovery from Volume Snapshot objects"](#recovery-from-volumesnapshot-objects)
+ Please see ["Recovery from Volume Snapshot objects"](recovery.md#recovery-from-volumesnapshot-objects)
for details.
## The `bootstrap` section
@@ -50,7 +50,7 @@ specification. EDB Postgres for Kubernetes currently supports the following boot
- `initdb`: initialize a new PostgreSQL cluster (default)
- `recovery`: create a PostgreSQL cluster by restoring from a base backup of an
- existing cluster, and replaying all the available WAL files or up to
+ existing cluster and, if needed, replaying all the available WAL files or up to
a given *point in time*
- `pg_basebackup`: create a PostgreSQL cluster by cloning an existing one of
the same major version using `pg_basebackup` via streaming replication protocol -
@@ -62,8 +62,12 @@ create a new cluster based on another one (either offline or online) and can be
used to spin up replica clusters. They both rely on the definition of external
clusters.
+Given that there are several possible backup methods and combinations of backup
+storage that the EDB Postgres for Kubernetes operator provides, please refer to the
+["Recovery" section](recovery.md) for guidance on each method.
+
!!! Seealso "API reference"
- Please refer to the ["API reference for the `bootstrap` section](api_reference.md#BootstrapConfiguration)
+ Please refer to the ["API reference for the `bootstrap` section](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-BootstrapConfiguration)
for more information.
## The `externalClusters` section
@@ -85,8 +89,9 @@ method or the `recovery` one. An external cluster needs to have:
- information about streaming connection
- information about the **recovery object store**, which is a Barman Cloud
- compatible object store that contains the backup files of the source
- cluster - that is, WAL archive and base backups.
+ compatible object store that contains:
+ - the WAL archive (required for Point In Time Recovery)
+ - the catalog of physical base backups for the Postgres cluster
!!! Note
A recovery object store is normally an AWS S3, or an Azure Blob Storage,
@@ -103,7 +108,7 @@ continuously fed from the source, either via streaming, via WAL shipping
through the PostgreSQL's `restore_command`, or any of the two.
!!! Seealso "API reference"
- Please refer to the ["API reference for the `externalClusters` section](api_reference.md#ExternalCluster)
+ Please refer to the ["API reference for the `externalClusters` section](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-ExternalCluster)
for more information.
## Bootstrap an empty cluster (`initdb`)
@@ -377,411 +382,9 @@ by `name` (our recommendation is to use the same `name` of the origin cluster).
### Bootstrap from a backup (`recovery`)
-The `recovery` bootstrap mode lets you create a new cluster from an existing
-physical base backup, and then reapply the WAL files containing the REDO log
-from the archive. Both base backups and WAL files are pulled from the
-*recovery object store*.
-
-Recovery from a *recovery object store* can be achieved in two ways:
-
-- using a recovery object store, that is a backup of another cluster
- created by Barman Cloud and defined via the `barmanObjectStore` option
- in the `externalClusters` section (*recommended*)
-- using an existing `Backup` object in the same namespace (this was the
- only option available before version 1.8.0).
-
-Both recovery methods enable either full recovery (up to the last
-available WAL) or up to a [point in time](#point-in-time-recovery-pitr).
-When performing a full recovery, the cluster can also be started
-in replica mode. Also, make sure that the PostgreSQL configuration
-(`.spec.postgresql.parameters`) of the recovered cluster is
-compatible, from a physical replication standpoint, with the original one.
-
-!!! Note
- You can find more information about backup and recovery of a running cluster
- in the ["Backup and recovery" page](backup_recovery.md).
-
-EDB Postgres for Kubernetes is also introducing support for Kubernetes' volume snapshots.
-With the current version of EDB Postgres for Kubernetes, you can:
-
-- take a consistent cold backup of the Postgres cluster from a standby through
- the `kubectl cnp snapshot` command - which creates the necessary
- `VolumeSnapshot` objects (currently one or two, if you have WALs in a separate
- volume)
-- recover from the above *VolumeSnapshot* objects through the `volumeSnapshots`
- option in the `.spec.bootstrap.recovery` stanza, as described in
- ["Recovery from `VolumeSnapshot` objects"](#recovery-from-volumesnapshot-objects)
- below
-
-#### Recovery from an object store
-
-You can recover from a backup created by Barman Cloud and stored on a supported
-object storage. Once you have defined the external cluster, including all the
-required configuration in the `barmanObjectStore` section, you need to
-reference it in the `.spec.recovery.source` option. The following example
-defines a recovery object store in a blob container in Azure:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-metadata:
- name: cluster-restore
-spec:
- [...]
-
- superuserSecret:
- name: superuser-secret
-
- bootstrap:
- recovery:
- source: clusterBackup
-
- externalClusters:
- - name: clusterBackup
- barmanObjectStore:
- destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/
- azureCredentials:
- storageAccount:
- name: recovery-object-store-secret
- key: storage_account_name
- storageKey:
- name: recovery-object-store-secret
- key: storage_account_key
- wal:
- maxParallel: 8
-```
-
-!!! Important
- By default the `recovery` method strictly uses the `name` of the
- cluster in the `externalClusters` section to locate the main folder
- of the backup data within the object store, which is normally reserved
- for the name of the server. You can specify a different one with the
- `barmanObjectStore.serverName` property (by default assigned to the
- value of `name` in the external clusters definition).
-
-!!! Note
- In the above example we are taking advantage of the parallel WAL restore
- feature, dedicating up to 8 jobs to concurrently fetch the required WAL
- files from the archive. This feature can appreciably reduce the recovery time.
- Make sure that you plan ahead for this scenario and correctly tune the
- value of this parameter for your environment. It will certainly make a
- difference **when** (not if) you'll need it.
-
-#### Recovery from a `Backup` object
-
-In case a Backup resource is already available in the namespace in which the
-cluster should be created, you can specify its name through
-`.spec.bootstrap.recovery.backup.name`, as in the following example:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-metadata:
- name: cluster-example-initdb
-spec:
- instances: 3
-
- superuserSecret:
- name: superuser-secret
-
- bootstrap:
- recovery:
- backup:
- name: backup-example
-
- storage:
- size: 1Gi
-```
-
-This bootstrap method allows you to specify just a reference to the
-backup that needs to be restored.
-
-#### Recovery from `VolumeSnapshot` objects
-
-EDB Postgres for Kubernetes can create a new cluster from a `VolumeSnapshot` of a PVC of an
-existing `Cluster` that's been taken with `kubectl cnp snapshot`.
-You need to specify the name of the snapshot as in the following example:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-metadata:
- name: cluster-restore
-spec:
- [...]
-
-bootstrap:
- recovery:
- volumeSnapshots:
- storage:
- name:
- kind: VolumeSnapshot
- apiGroup: snapshot.storage.k8s.io
-```
-
-!!! Warning
- As the development of declarative support for Kubernetes' `VolumeSnapshot` API
- progresses, you'll be able to use this technique in conjunction with a WAL
- archive for Point In Time Recovery operations or replica clusters.
-
-In case the backed-up cluster was using a separate PVC to store the WAL files,
-the recovery must include that too:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-metadata:
- name: cluster-restore
-spec:
- [...]
-
-bootstrap:
- recovery:
- volumeSnapshots:
- storage:
- name:
- kind: VolumeSnapshot
- apiGroup: snapshot.storage.k8s.io
-
- walStorage:
- name:
- kind: VolumeSnapshot
- apiGroup: snapshot.storage.k8s.io
-```
-
-The `kubectl cnp snapshot` command is able to take consistent snapshots of a
-replica through a technique known as *cold backup*, by fencing the standby
-before taking a physical copy of the volumes. For details, please refer to
-["Snapshotting a Postgres cluster"](kubectl-plugin/#snapshotting-a-postgres-cluster).
-
-#### Additional considerations
-
-Whether you recover from a recovery object store or an existing `Backup`
-resource, the following considerations apply:
-
-- The application database name and the application database user are preserved
- from the backup that is being restored. The operator does not currently attempt
- to back up the underlying secrets, as this is part of the usual maintenance
- activity of the Kubernetes cluster itself.
-- In case you don't supply any `superuserSecret`, a new one is automatically
- generated with a secure and random password. The secret is then used to
- reset the password for the `postgres` user of the cluster.
-- By default, the recovery will continue up to the latest
- available WAL on the default target timeline (`current` for PostgreSQL up to
- 11, `latest` for version 12 and above).
- You can optionally specify a `recoveryTarget` to perform a point in time
- recovery (see the ["Point in time recovery" section](#point-in-time-recovery-pitr)).
-
-!!! Important
- Consider using the `barmanObjectStore.wal.maxParallel` option to speed
- up WAL fetching from the archive by concurrently downloading the transaction
- logs from the recovery object store.
-
-#### Point in time recovery (PITR)
-
-Instead of replaying all the WALs up to the latest one, we can ask PostgreSQL
-to stop replaying WALs at any given point in time, after having extracted a
-base backup. PostgreSQL uses this technique to achieve *point-in-time* recovery
-(PITR).
-
-!!! Note
- PITR is available from recovery object stores as well as `Backup` objects.
-
-The operator will generate the configuration parameters required for this
-feature to work in case a recovery target is specified, like in the following
-example that uses a recovery object stored in Azure and a timestamp based
-goal:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-metadata:
- name: cluster-restore-pitr
-spec:
- instances: 3
-
- storage:
- size: 5Gi
-
- bootstrap:
- recovery:
- source: clusterBackup
- recoveryTarget:
- targetTime: "2020-11-26 15:22:00.00000+00"
-
- externalClusters:
- - name: clusterBackup
- barmanObjectStore:
- destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/
- azureCredentials:
- storageAccount:
- name: recovery-object-store-secret
- key: storage_account_name
- storageKey:
- name: recovery-object-store-secret
- key: storage_account_key
- wal:
- maxParallel: 8
-```
-
-You might have noticed that in the above example you only had to specify
-the `targetTime` in the form of a timestamp, without having to worry about
-specifying the base backup from which to start the recovery.
-
-The `backupID` option is the one that allows you to specify the base backup
-from which to initiate the recovery process. By default, this value is
-empty.
-
-If you assign a value to it (in the form of a Barman backup ID), the operator
-will use that backup as base for the recovery.
-
-!!! Important
- You need to make sure that such a backup exists and is accessible.
-
-If the backup ID is not specified, the operator will automatically detect the
-base backup for the recovery as follows:
-
-- when you use `targetTime` or `targetLSN`, the operator selects the closest
- backup that was completed before that target
-- otherwise the operator selects the last available backup in chronological
- order.
-
-Here are the recovery target criteria you can use:
-
-targetTime
-: time stamp up to which recovery will proceed, expressed in
- [RFC 3339](https://datatracker.ietf.org/doc/html/rfc3339) format
- (the precise stopping point is also influenced by the `exclusive` option)
-
-targetXID
-: transaction ID up to which recovery will proceed
- (the precise stopping point is also influenced by the `exclusive` option);
- keep in mind that while transaction IDs are assigned sequentially at
- transaction start, transactions can complete in a different numeric order.
- The transactions that will be recovered are those that committed before
- (and optionally including) the specified one
-
-targetName
-: named restore point (created with `pg_create_restore_point()`) to which
- recovery will proceed
-
-targetLSN
-: LSN of the write-ahead log location up to which recovery will proceed
- (the precise stopping point is also influenced by the `exclusive` option)
-
-targetImmediate
-: recovery should end as soon as a consistent state is reached - i.e. as early
- as possible. When restoring from an online backup, this means the point where
- taking the backup ended
-
-!!! Important
- While the operator is able to automatically retrieve the closest backup
- when either `targetTime` or `targetLSN` is specified, this is not possible
- for the remaining targets: `targetName`, `targetXID`, and `targetImmediate`.
- In such cases, it is important to specify `backupID`, unless you are OK with
- the last available backup in the catalog.
-
-The example below uses a `targetName` based recovery target:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-[...]
- bootstrap:
- recovery:
- source: clusterBackup
- recoveryTarget:
- backupID: 20220616T142236
- targetName: 'restore_point_1'
-[...]
-```
-
-You can choose only a single one among the targets above in each
-`recoveryTarget` configuration.
-
-Additionally, you can specify `targetTLI` force recovery to a specific
-timeline.
-
-By default, the previous parameters are considered to be inclusive, stopping
-just after the recovery target, matching [the behavior in PostgreSQL](https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-RECOVERY-TARGET-INCLUSIVE)
-You can request exclusive behavior,
-stopping right before the recovery target, by setting the `exclusive` parameter to
-`true` like in the following example relying on a blob container in Azure:
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-metadata:
- name: cluster-restore-pitr
-spec:
- instances: 3
-
- storage:
- size: 5Gi
-
- bootstrap:
- recovery:
- source: clusterBackup
- recoveryTarget:
- backupID: 20220616T142236
- targetName: "maintenance-activity"
- exclusive: true
-
- externalClusters:
- - name: clusterBackup
- barmanObjectStore:
- destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/
- azureCredentials:
- storageAccount:
- name: recovery-object-store-secret
- key: storage_account_name
- storageKey:
- name: recovery-object-store-secret
- key: storage_account_key
- wal:
- maxParallel: 8
-```
-
-#### Configure the application database
-
-For the recovered cluster, we can configure the application database name and
-credentials with additional configuration. To update application database
-credentials, we can generate our own passwords, store them as secrets, and
-update the database use the secrets. Or we can also let the operator generate a
-secret with randomly secure password for use. Please reference the
-["Bootstrap an empty cluster"](#bootstrap-an-empty-cluster-initdb)
-section for more information about secrets.
-
-The following example configure the application database `app` with owner
-`app`, and supplied secret `app-secret`.
-
-```yaml
-apiVersion: postgresql.k8s.enterprisedb.io/v1
-kind: Cluster
-[...]
-spec:
- bootstrap:
- recovery:
- database: app
- owner: app
- secret:
- name: app-secret
- [...]
-```
-
-With the above configuration, the following will happen after recovery is completed:
-
-1. if database `app` does not exist, a new database `app` will be created.
-2. if user `app` does not exist, a new user `app` will be created.
-3. if user `app` is not the owner of database, user `app` will be granted
- as owner of database `app`.
-4. If value of `username` match value of `owner` in secret, the password of
- application database will be changed to the value of `password` in secret.
-
-!!! Important
- For a replica cluster with replica mode enabled, the operator will not
- create any database or user in the PostgreSQL instance, as these will be
- recovered from the original cluster.
+Given the several possibilities, methods, and combinations that the
+EDB Postgres for Kubernetes operator provides in terms of backup and recovery, please refer
+to the ["Recovery" section](recovery.md).
### Bootstrap from a live cluster (`pg_basebackup`)
@@ -900,7 +503,7 @@ file on the source PostgreSQL instance:
host replication streaming_replica all md5
```
-The following manifest creates a new PostgreSQL 15.3 cluster,
+The following manifest creates a new PostgreSQL 16.0 cluster,
called `target-db`, using the `pg_basebackup` bootstrap method
to clone an external PostgreSQL cluster defined as `source-db`
(in the `externalClusters` array). As you can see, the `source-db`
@@ -915,7 +518,7 @@ metadata:
name: target-db
spec:
instances: 3
- imageName: quay.io/enterprisedb/postgresql:15.3
+ imageName: quay.io/enterprisedb/postgresql:16.0
bootstrap:
pg_basebackup:
@@ -935,7 +538,7 @@ spec:
```
All the requirements must be met for the clone operation to work, including
-the same PostgreSQL version (in our case 15.3).
+the same PostgreSQL version (in our case 16.0).
#### TLS certificate authentication
@@ -950,7 +553,7 @@ in the same Kubernetes cluster.
This example can be easily adapted to cover an instance that resides
outside the Kubernetes cluster.
-The manifest defines a new PostgreSQL 15.3 cluster called `cluster-clone-tls`,
+The manifest defines a new PostgreSQL 16.0 cluster called `cluster-clone-tls`,
which is bootstrapped using the `pg_basebackup` method from the `cluster-example`
external cluster. The host is identified by the read/write service
in the same cluster, while the `streaming_replica` user is authenticated
@@ -965,7 +568,7 @@ metadata:
name: cluster-clone-tls
spec:
instances: 3
- imageName: quay.io/enterprisedb/postgresql:15.3
+ imageName: quay.io/enterprisedb/postgresql:16.0
bootstrap:
pg_basebackup:
diff --git a/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx b/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx
new file mode 100644
index 00000000000..b04fc1dd9b4
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx
@@ -0,0 +1,4216 @@
+---
+title: 'API Reference'
+originalFilePath: 'src/cloudnative-pg.v1.md'
+---
+
+Package v1 contains API Schema definitions for the postgresql v1 API group
+
+## Resource Types
+
+- [Backup](#postgresql-k8s-enterprisedb-io-v1-Backup)
+- [Cluster](#postgresql-k8s-enterprisedb-io-v1-Cluster)
+- [Pooler](#postgresql-k8s-enterprisedb-io-v1-Pooler)
+- [ScheduledBackup](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackup)
+
+## Backup {#postgresql-k8s-enterprisedb-io-v1-Backup}
+
+Backup is the Schema for the backups API
+
+
+Field | Description |
+
+apiVersion [Required] string | postgresql.k8s.enterprisedb.io/v1 |
+kind [Required] string | Backup |
+metadata [Required]
+meta/v1.ObjectMeta
+ |
+
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field. |
+
+spec [Required]
+BackupSpec
+ |
+
+ Specification of the desired behavior of the backup.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+status
+BackupStatus
+ |
+
+ Most recently observed status of the backup. This data may not be up to
+date. Populated by the system. Read-only.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+
+
+
+## Cluster {#postgresql-k8s-enterprisedb-io-v1-Cluster}
+
+Cluster is the Schema for the PostgreSQL API
+
+
+Field | Description |
+
+apiVersion [Required] string | postgresql.k8s.enterprisedb.io/v1 |
+kind [Required] string | Cluster |
+metadata [Required]
+meta/v1.ObjectMeta
+ |
+
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field. |
+
+spec [Required]
+ClusterSpec
+ |
+
+ Specification of the desired behavior of the cluster.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+status
+ClusterStatus
+ |
+
+ Most recently observed status of the cluster. This data may not be up
+to date. Populated by the system. Read-only.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+
+
+
+## Pooler {#postgresql-k8s-enterprisedb-io-v1-Pooler}
+
+Pooler is the Schema for the poolers API
+
+
+Field | Description |
+
+apiVersion [Required] string | postgresql.k8s.enterprisedb.io/v1 |
+kind [Required] string | Pooler |
+metadata [Required]
+meta/v1.ObjectMeta
+ |
+
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field. |
+
+spec [Required]
+PoolerSpec
+ |
+
+ Specification of the desired behavior of the Pooler.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+status
+PoolerStatus
+ |
+
+ Most recently observed status of the Pooler. This data may not be up to
+date. Populated by the system. Read-only.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+
+
+
+## ScheduledBackup {#postgresql-k8s-enterprisedb-io-v1-ScheduledBackup}
+
+ScheduledBackup is the Schema for the scheduledbackups API
+
+
+Field | Description |
+
+apiVersion [Required] string | postgresql.k8s.enterprisedb.io/v1 |
+kind [Required] string | ScheduledBackup |
+metadata [Required]
+meta/v1.ObjectMeta
+ |
+
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field. |
+
+spec [Required]
+ScheduledBackupSpec
+ |
+
+ Specification of the desired behavior of the ScheduledBackup.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+status
+ScheduledBackupStatus
+ |
+
+ Most recently observed status of the ScheduledBackup. This data may not be up
+to date. Populated by the system. Read-only.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+
+
+
+## AffinityConfiguration {#postgresql-k8s-enterprisedb-io-v1-AffinityConfiguration}
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+AffinityConfiguration contains the info we need to create the
+affinity rules for Pods
+
+
+Field | Description |
+
+enablePodAntiAffinity
+bool
+ |
+
+ Activates anti-affinity for the pods. The operator will define pods
+anti-affinity unless this field is explicitly set to false
+ |
+
+topologyKey
+string
+ |
+
+ TopologyKey to use for anti-affinity configuration. See k8s documentation
+for more info on that
+ |
+
+nodeSelector
+map[string]string
+ |
+
+ NodeSelector is map of key-value pairs used to define the nodes on which
+the pods can run.
+More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ |
+
+nodeAffinity
+core/v1.NodeAffinity
+ |
+
+ NodeAffinity describes node affinity scheduling rules for the pod.
+More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ |
+
+tolerations
+[]core/v1.Toleration
+ |
+
+ Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run
+on tainted nodes.
+More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ |
+
+podAntiAffinityType
+string
+ |
+
+ PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be
+considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or
+"required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are
+added if all the existing nodes don't match the required pod anti-affinity rule.
+More info:
+https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ |
+
+additionalPodAntiAffinity
+core/v1.PodAntiAffinity
+ |
+
+ AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated
+by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false.
+ |
+
+additionalPodAffinity
+core/v1.PodAffinity
+ |
+
+ AdditionalPodAffinity allows to specify pod affinity terms to be passed to all the cluster's pods.
+ |
+
+
+
+
+## AzureCredentials {#postgresql-k8s-enterprisedb-io-v1-AzureCredentials}
+
+**Appears in:**
+
+- [BarmanCredentials](#postgresql-k8s-enterprisedb-io-v1-BarmanCredentials)
+
+AzureCredentials is the type for the credentials to be used to upload
+files to Azure Blob Storage. The connection string contains every needed
+information. If the connection string is not specified, we'll need the
+storage account name and also one (and only one) of:
+
+
+
+Field | Description |
+
+connectionString
+SecretKeySelector
+ |
+
+ The connection string to be used
+ |
+
+storageAccount
+SecretKeySelector
+ |
+
+ The storage account where to upload data
+ |
+
+storageKey
+SecretKeySelector
+ |
+
+ The storage account key to be used in conjunction
+with the storage account name
+ |
+
+storageSasToken
+SecretKeySelector
+ |
+
+ A shared-access-signature to be used in conjunction with
+the storage account name
+ |
+
+inheritFromAzureAD
+bool
+ |
+
+ Use the Azure AD based authentication without providing explicitly the keys.
+ |
+
+
+
+
+## BackupConfiguration {#postgresql-k8s-enterprisedb-io-v1-BackupConfiguration}
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+BackupConfiguration defines how the backup of the cluster are taken.
+The supported backup methods are BarmanObjectStore and VolumeSnapshot.
+For details and examples refer to the Backup and Recovery section of the
+documentation
+
+
+Field | Description |
+
+volumeSnapshot
+VolumeSnapshotConfiguration
+ |
+
+ VolumeSnapshot provides the configuration for the execution of volume snapshot backups.
+ |
+
+barmanObjectStore
+BarmanObjectStoreConfiguration
+ |
+
+ The configuration for the barman-cloud tool suite
+ |
+
+retentionPolicy
+string
+ |
+
+ RetentionPolicy is the retention policy to be used for backups
+and WALs (i.e. '60d'). The retention policy is expressed in the form
+of XXu where XX is a positive integer and u is in [dwm] -
+days, weeks, months.
+It's currently only applicable when using the BarmanObjectStore method.
+ |
+
+target
+BackupTarget
+ |
+
+ The policy to decide which instance should perform backups. Available
+options are empty string, which will default to prefer-standby policy,
+primary to have backups run always on primary instances, prefer-standby
+to have backups run preferably on the most updated standby, if available.
+ |
+
+
+
+
+## BackupMethod {#postgresql-k8s-enterprisedb-io-v1-BackupMethod}
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [BackupSpec](#postgresql-k8s-enterprisedb-io-v1-BackupSpec)
+
+- [BackupStatus](#postgresql-k8s-enterprisedb-io-v1-BackupStatus)
+
+- [ScheduledBackupSpec](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec)
+
+BackupMethod defines the way of executing the physical base backups of
+the selected PostgreSQL instance
+
+## BackupPhase {#postgresql-k8s-enterprisedb-io-v1-BackupPhase}
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [BackupStatus](#postgresql-k8s-enterprisedb-io-v1-BackupStatus)
+
+BackupPhase is the phase of the backup
+
+## BackupSnapshotElementStatus {#postgresql-k8s-enterprisedb-io-v1-BackupSnapshotElementStatus}
+
+**Appears in:**
+
+- [BackupSnapshotStatus](#postgresql-k8s-enterprisedb-io-v1-BackupSnapshotStatus)
+
+BackupSnapshotElementStatus is a volume snapshot that is part of a volume snapshot method backup
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name is the snapshot resource name
+ |
+
+type [Required]
+string
+ |
+
+ Type is tho role of the snapshot in the cluster, such as PG_DATA and PG_WAL
+ |
+
+
+
+
+## BackupSnapshotStatus {#postgresql-k8s-enterprisedb-io-v1-BackupSnapshotStatus}
+
+**Appears in:**
+
+- [BackupStatus](#postgresql-k8s-enterprisedb-io-v1-BackupStatus)
+
+BackupSnapshotStatus the fields exclusive to the volumeSnapshot method backup
+
+
+
+## BackupSource {#postgresql-k8s-enterprisedb-io-v1-BackupSource}
+
+**Appears in:**
+
+- [BootstrapRecovery](#postgresql-k8s-enterprisedb-io-v1-BootstrapRecovery)
+
+BackupSource contains the backup we need to restore from, plus some
+information that could be needed to correctly restore it.
+
+
+Field | Description |
+
+LocalObjectReference
+LocalObjectReference
+ |
+(Members of LocalObjectReference are embedded into this type.)
+ No description provided. |
+
+endpointCA
+SecretKeySelector
+ |
+
+ EndpointCA store the CA bundle of the barman endpoint.
+Useful when using self-signed certificates to avoid
+errors with certificate issuer and barman-cloud-wal-archive.
+ |
+
+
+
+
+## BackupSpec {#postgresql-k8s-enterprisedb-io-v1-BackupSpec}
+
+**Appears in:**
+
+- [Backup](#postgresql-k8s-enterprisedb-io-v1-Backup)
+
+BackupSpec defines the desired state of Backup
+
+
+Field | Description |
+
+cluster [Required]
+LocalObjectReference
+ |
+
+ The cluster to backup
+ |
+
+target
+BackupTarget
+ |
+
+ The policy to decide which instance should perform this backup. If empty,
+it defaults to cluster.spec.backup.target .
+Available options are empty string, primary and prefer-standby .
+primary to have backups run always on primary instances,
+prefer-standby to have backups run preferably on the most updated
+standby, if available.
+ |
+
+method
+BackupMethod
+ |
+
+ The backup method to be used, possible options are barmanObjectStore
+and volumeSnapshot . Defaults to: barmanObjectStore .
+ |
+
+
+
+
+## BackupStatus {#postgresql-k8s-enterprisedb-io-v1-BackupStatus}
+
+**Appears in:**
+
+- [Backup](#postgresql-k8s-enterprisedb-io-v1-Backup)
+
+BackupStatus defines the observed state of Backup
+
+
+Field | Description |
+
+BarmanCredentials
+BarmanCredentials
+ |
+(Members of BarmanCredentials are embedded into this type.)
+ The potential credentials for each cloud provider
+ |
+
+endpointCA
+SecretKeySelector
+ |
+
+ EndpointCA store the CA bundle of the barman endpoint.
+Useful when using self-signed certificates to avoid
+errors with certificate issuer and barman-cloud-wal-archive.
+ |
+
+endpointURL
+string
+ |
+
+ Endpoint to be used to upload data to the cloud,
+overriding the automatic endpoint discovery
+ |
+
+destinationPath
+string
+ |
+
+ The path where to store the backup (i.e. s3://bucket/path/to/folder)
+this path, with different destination folders, will be used for WALs
+and for data. This may not be populated in case of errors.
+ |
+
+serverName
+string
+ |
+
+ The server name on S3, the cluster name is used if this
+parameter is omitted
+ |
+
+encryption
+string
+ |
+
+ Encryption method required to S3 API
+ |
+
+backupId
+string
+ |
+
+ The ID of the Barman backup
+ |
+
+backupName
+string
+ |
+
+ The Name of the Barman backup
+ |
+
+phase
+BackupPhase
+ |
+
+ The last backup status
+ |
+
+startedAt
+meta/v1.Time
+ |
+
+ When the backup was started
+ |
+
+stoppedAt
+meta/v1.Time
+ |
+
+ When the backup was terminated
+ |
+
+beginWal
+string
+ |
+
+ The starting WAL
+ |
+
+endWal
+string
+ |
+
+ The ending WAL
+ |
+
+beginLSN
+string
+ |
+
+ The starting xlog
+ |
+
+endLSN
+string
+ |
+
+ The ending xlog
+ |
+
+error
+string
+ |
+
+ The detected error
+ |
+
+commandOutput
+string
+ |
+
+ Unused. Retained for compatibility with old versions.
+ |
+
+commandError
+string
+ |
+
+ The backup command output in case of error
+ |
+
+instanceID
+InstanceID
+ |
+
+ Information to identify the instance where the backup has been taken from
+ |
+
+snapshotBackupStatus
+BackupSnapshotStatus
+ |
+
+ Status of the volumeSnapshot backup
+ |
+
+method
+BackupMethod
+ |
+
+ The backup method being used
+ |
+
+
+
+
+## BackupTarget {#postgresql-k8s-enterprisedb-io-v1-BackupTarget}
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [BackupConfiguration](#postgresql-k8s-enterprisedb-io-v1-BackupConfiguration)
+
+- [BackupSpec](#postgresql-k8s-enterprisedb-io-v1-BackupSpec)
+
+- [ScheduledBackupSpec](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec)
+
+BackupTarget describes the preferred targets for a backup
+
+## BarmanCredentials {#postgresql-k8s-enterprisedb-io-v1-BarmanCredentials}
+
+**Appears in:**
+
+- [BackupStatus](#postgresql-k8s-enterprisedb-io-v1-BackupStatus)
+
+- [BarmanObjectStoreConfiguration](#postgresql-k8s-enterprisedb-io-v1-BarmanObjectStoreConfiguration)
+
+BarmanCredentials an object containing the potential credentials for each cloud provider
+
+
+Field | Description |
+
+googleCredentials
+GoogleCredentials
+ |
+
+ The credentials to use to upload data to Google Cloud Storage
+ |
+
+s3Credentials
+S3Credentials
+ |
+
+ The credentials to use to upload data to S3
+ |
+
+azureCredentials
+AzureCredentials
+ |
+
+ The credentials to use to upload data to Azure Blob Storage
+ |
+
+
+
+
+## BarmanObjectStoreConfiguration {#postgresql-k8s-enterprisedb-io-v1-BarmanObjectStoreConfiguration}
+
+**Appears in:**
+
+- [BackupConfiguration](#postgresql-k8s-enterprisedb-io-v1-BackupConfiguration)
+
+- [ExternalCluster](#postgresql-k8s-enterprisedb-io-v1-ExternalCluster)
+
+BarmanObjectStoreConfiguration contains the backup configuration
+using Barman against an S3-compatible object storage
+
+
+Field | Description |
+
+BarmanCredentials
+BarmanCredentials
+ |
+(Members of BarmanCredentials are embedded into this type.)
+ The potential credentials for each cloud provider
+ |
+
+endpointURL
+string
+ |
+
+ Endpoint to be used to upload data to the cloud,
+overriding the automatic endpoint discovery
+ |
+
+endpointCA
+SecretKeySelector
+ |
+
+ EndpointCA store the CA bundle of the barman endpoint.
+Useful when using self-signed certificates to avoid
+errors with certificate issuer and barman-cloud-wal-archive
+ |
+
+destinationPath [Required]
+string
+ |
+
+ The path where to store the backup (i.e. s3://bucket/path/to/folder)
+this path, with different destination folders, will be used for WALs
+and for data
+ |
+
+serverName
+string
+ |
+
+ The server name on S3, the cluster name is used if this
+parameter is omitted
+ |
+
+wal
+WalBackupConfiguration
+ |
+
+ The configuration for the backup of the WAL stream.
+When not defined, WAL files will be stored uncompressed and may be
+unencrypted in the object store, according to the bucket default policy.
+ |
+
+data
+DataBackupConfiguration
+ |
+
+ The configuration to be used to backup the data files
+When not defined, base backups files will be stored uncompressed and may
+be unencrypted in the object store, according to the bucket default
+policy.
+ |
+
+tags
+map[string]string
+ |
+
+ Tags is a list of key value pairs that will be passed to the
+Barman --tags option.
+ |
+
+historyTags
+map[string]string
+ |
+
+ HistoryTags is a list of key value pairs that will be passed to the
+Barman --history-tags option.
+ |
+
+
+
+
+## BootstrapConfiguration {#postgresql-k8s-enterprisedb-io-v1-BootstrapConfiguration}
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+BootstrapConfiguration contains information about how to create the PostgreSQL
+cluster. Only a single bootstrap method can be defined among the supported
+ones. initdb
will be used as the bootstrap method if left
+unspecified. Refer to the Bootstrap page of the documentation for more
+information.
+
+
+Field | Description |
+
+initdb
+BootstrapInitDB
+ |
+
+ Bootstrap the cluster via initdb
+ |
+
+recovery
+BootstrapRecovery
+ |
+
+ Bootstrap the cluster from a backup
+ |
+
+pg_basebackup
+BootstrapPgBaseBackup
+ |
+
+ Bootstrap the cluster taking a physical backup of another compatible
+PostgreSQL instance
+ |
+
+
+
+
+## BootstrapInitDB {#postgresql-k8s-enterprisedb-io-v1-BootstrapInitDB}
+
+**Appears in:**
+
+- [BootstrapConfiguration](#postgresql-k8s-enterprisedb-io-v1-BootstrapConfiguration)
+
+BootstrapInitDB is the configuration of the bootstrap process when
+initdb is used
+Refer to the Bootstrap page of the documentation for more information.
+
+
+Field | Description |
+
+database
+string
+ |
+
+ Name of the database used by the application. Default: app .
+ |
+
+owner
+string
+ |
+
+ Name of the owner of the database in the instance to be used
+by applications. Defaults to the value of the database key.
+ |
+
+secret
+LocalObjectReference
+ |
+
+ Name of the secret containing the initial credentials for the
+owner of the user database. If empty a new secret will be
+created from scratch
+ |
+
+redwood
+bool
+ |
+
+ If we need to enable/disable Redwood compatibility. Requires
+EPAS and for EPAS defaults to true
+ |
+
+options
+[]string
+ |
+
+ The list of options that must be passed to initdb when creating the cluster.
+Deprecated: This could lead to inconsistent configurations,
+please use the explicit provided parameters instead.
+If defined, explicit values will be ignored.
+ |
+
+dataChecksums
+bool
+ |
+
+ Whether the -k option should be passed to initdb,
+enabling checksums on data pages (default: false )
+ |
+
+encoding
+string
+ |
+
+ The value to be passed as option --encoding for initdb (default:UTF8 )
+ |
+
+localeCollate
+string
+ |
+
+ The value to be passed as option --lc-collate for initdb (default:C )
+ |
+
+localeCType
+string
+ |
+
+ The value to be passed as option --lc-ctype for initdb (default:C )
+ |
+
+walSegmentSize
+int
+ |
+
+ The value in megabytes (1 to 1024) to be passed to the --wal-segsize
+option for initdb (default: empty, resulting in PostgreSQL default: 16MB)
+ |
+
+postInitSQL
+[]string
+ |
+
+ List of SQL queries to be executed as a superuser immediately
+after the cluster has been created - to be used with extreme care
+(by default empty)
+ |
+
+postInitApplicationSQL
+[]string
+ |
+
+ List of SQL queries to be executed as a superuser in the application
+database right after is created - to be used with extreme care
+(by default empty)
+ |
+
+postInitTemplateSQL
+[]string
+ |
+
+ List of SQL queries to be executed as a superuser in the template1
+after the cluster has been created - to be used with extreme care
+(by default empty)
+ |
+
+import
+Import
+ |
+
+ Bootstraps the new cluster by importing data from an existing PostgreSQL
+instance using logical backup (pg_dump and pg_restore )
+ |
+
+postInitApplicationSQLRefs
+PostInitApplicationSQLRefs
+ |
+
+ PostInitApplicationSQLRefs points references to ConfigMaps or Secrets which
+contain SQL files, the general implementation order to these references is
+from all Secrets to all ConfigMaps, and inside Secrets or ConfigMaps,
+the implementation order is same as the order of each array
+(by default empty)
+ |
+
+
+
+
+## BootstrapPgBaseBackup {#postgresql-k8s-enterprisedb-io-v1-BootstrapPgBaseBackup}
+
+**Appears in:**
+
+- [BootstrapConfiguration](#postgresql-k8s-enterprisedb-io-v1-BootstrapConfiguration)
+
+BootstrapPgBaseBackup contains the configuration required to take
+a physical backup of an existing PostgreSQL cluster
+
+
+Field | Description |
+
+source [Required]
+string
+ |
+
+ The name of the server of which we need to take a physical backup
+ |
+
+database
+string
+ |
+
+ Name of the database used by the application. Default: app .
+ |
+
+owner
+string
+ |
+
+ Name of the owner of the database in the instance to be used
+by applications. Defaults to the value of the database key.
+ |
+
+secret
+LocalObjectReference
+ |
+
+ Name of the secret containing the initial credentials for the
+owner of the user database. If empty a new secret will be
+created from scratch
+ |
+
+
+
+
+## BootstrapRecovery {#postgresql-k8s-enterprisedb-io-v1-BootstrapRecovery}
+
+**Appears in:**
+
+- [BootstrapConfiguration](#postgresql-k8s-enterprisedb-io-v1-BootstrapConfiguration)
+
+BootstrapRecovery contains the configuration required to restore
+from an existing cluster using 3 methodologies: external cluster,
+volume snapshots or backup objects. Full recovery and Point-In-Time
+Recovery are supported.
+The method can be also be used to create clusters in continuous recovery
+(replica clusters), also supporting cascading replication when instances
>
+
+- Once the cluster exits recovery, the password for the superuser
+will be changed through the provided secret.
+Refer to the Bootstrap page of the documentation for more information.
+
+
+
+Field | Description |
+
+backup
+BackupSource
+ |
+
+ The backup object containing the physical base backup from which to
+initiate the recovery procedure.
+Mutually exclusive with source and volumeSnapshots .
+ |
+
+source
+string
+ |
+
+ The external cluster whose backup we will restore. This is also
+used as the name of the folder under which the backup is stored,
+so it must be set to the name of the source cluster
+Mutually exclusive with backup .
+ |
+
+volumeSnapshots
+DataSource
+ |
+
+ The static PVC data source(s) from which to initiate the
+recovery procedure. Currently supporting VolumeSnapshot
+and PersistentVolumeClaim resources that map an existing
+PVC group, compatible with EDB Postgres for Kubernetes, and taken with
+a cold backup copy on a fenced Postgres instance (limitation
+which will be removed in the future when online backup
+will be implemented).
+Mutually exclusive with backup .
+ |
+
+recoveryTarget
+RecoveryTarget
+ |
+
+ By default, the recovery process applies all the available
+WAL files in the archive (full recovery). However, you can also
+end the recovery as soon as a consistent state is reached or
+recover to a point-in-time (PITR) by specifying a RecoveryTarget object,
+as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...).
+More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET
+ |
+
+database
+string
+ |
+
+ Name of the database used by the application. Default: app .
+ |
+
+owner
+string
+ |
+
+ Name of the owner of the database in the instance to be used
+by applications. Defaults to the value of the database key.
+ |
+
+secret
+LocalObjectReference
+ |
+
+ Name of the secret containing the initial credentials for the
+owner of the user database. If empty a new secret will be
+created from scratch
+ |
+
+
+
+
+## CertificatesConfiguration {#postgresql-k8s-enterprisedb-io-v1-CertificatesConfiguration}
+
+**Appears in:**
+
+- [CertificatesStatus](#postgresql-k8s-enterprisedb-io-v1-CertificatesStatus)
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+CertificatesConfiguration contains the needed configurations to handle server certificates.
+
+
+Field | Description |
+
+serverCASecret
+string
+ |
+
+ The secret containing the Server CA certificate. If not defined, a new secret will be created
+with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+Contains:
+
+
+ca.crt : CA that should be used to validate the server certificate,
+used as sslrootcert in client connection strings.
+ca.key : key used to generate Server SSL certs, if ServerTLSSecret is provided,
+this can be omitted.
+
+ |
+
+serverTLSSecret
+string
+ |
+
+ The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as
+ssl_cert_file and ssl_key_file so that clients can connect to postgres securely.
+If not defined, ServerCASecret must provide also ca.key and a new secret will be
+created using the provided CA.
+ |
+
+replicationTLSSecret
+string
+ |
+
+ The secret of type kubernetes.io/tls containing the client certificate to authenticate as
+the streaming_replica user.
+If not defined, ClientCASecret must provide also ca.key , and a new secret will be
+created using the provided CA.
+ |
+
+clientCASecret
+string
+ |
+
+ The secret containing the Client CA certificate. If not defined, a new secret will be created
+with a self-signed CA and will be used to generate all the client certificates.
+
+Contains:
+
+
+ca.crt : CA that should be used to validate the client certificates,
+used as ssl_ca_file of all the instances.
+ca.key : key used to generate client certificates, if ReplicationTLSSecret is provided,
+this can be omitted.
+
+ |
+
+serverAltDNSNames
+[]string
+ |
+
+ The list of the server alternative DNS names to be added to the generated server TLS certificates, when required.
+ |
+
+
+
+
+## CertificatesStatus {#postgresql-k8s-enterprisedb-io-v1-CertificatesStatus}
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+CertificatesStatus contains configuration certificates and related expiration dates.
+
+
+Field | Description |
+
+CertificatesConfiguration
+CertificatesConfiguration
+ |
+(Members of CertificatesConfiguration are embedded into this type.)
+ Needed configurations to handle server certificates, initialized with default values, if needed.
+ |
+
+expirations
+map[string]string
+ |
+
+ Expiration dates for all certificates.
+ |
+
+
+
+
+## ClusterSpec {#postgresql-k8s-enterprisedb-io-v1-ClusterSpec}
+
+**Appears in:**
+
+- [Cluster](#postgresql-k8s-enterprisedb-io-v1-Cluster)
+
+ClusterSpec defines the desired state of Cluster
+
+
+Field | Description |
+
+description
+string
+ |
+
+ Description of this PostgreSQL cluster
+ |
+
+inheritedMetadata
+EmbeddedObjectMetadata
+ |
+
+ Metadata that will be inherited by all objects related to the Cluster
+ |
+
+imageName
+string
+ |
+
+ Name of the container image, supporting both tags (<image>:<tag> )
+and digests for deterministic and repeatable deployments
+(<image>:<tag>@sha256:<digestValue> )
+ |
+
+imagePullPolicy
+core/v1.PullPolicy
+ |
+
+ Image pull policy.
+One of Always , Never or IfNotPresent .
+If not defined, it defaults to IfNotPresent .
+Cannot be updated.
+More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ |
+
+schedulerName
+string
+ |
+
+ If specified, the pod will be dispatched by specified Kubernetes
+scheduler. If not specified, the pod will be dispatched by the default
+scheduler. More info:
+https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/
+ |
+
+postgresUID
+int64
+ |
+
+ The UID of the postgres user inside the image, defaults to 26
+ |
+
+postgresGID
+int64
+ |
+
+ The GID of the postgres user inside the image, defaults to 26
+ |
+
+instances [Required]
+int
+ |
+
+ Number of instances required in the cluster
+ |
+
+minSyncReplicas
+int
+ |
+
+ Minimum number of instances required in synchronous replication with the
+primary. Undefined or 0 allow writes to complete when no standby is
+available.
+ |
+
+maxSyncReplicas
+int
+ |
+
+ The target value for the synchronous replication quorum, that can be
+decreased if the number of ready standbys is lower than this.
+Undefined or 0 disable synchronous replication.
+ |
+
+postgresql
+PostgresConfiguration
+ |
+
+ Configuration of the PostgreSQL server
+ |
+
+replicationSlots
+ReplicationSlotsConfiguration
+ |
+
+ Replication slots management configuration
+ |
+
+bootstrap
+BootstrapConfiguration
+ |
+
+ Instructions to bootstrap this cluster
+ |
+
+replica
+ReplicaClusterConfiguration
+ |
+
+ Replica cluster configuration
+ |
+
+superuserSecret
+LocalObjectReference
+ |
+
+ The secret containing the superuser password. If not defined a new
+secret will be created with a randomly generated password
+ |
+
+enableSuperuserAccess
+bool
+ |
+
+ When this option is enabled, the operator will use the SuperuserSecret
+to update the postgres user password (if the secret is
+not present, the operator will automatically create one). When this
+option is disabled, the operator will ignore the SuperuserSecret content, delete
+it when automatically created, and then blank the password of the postgres
+user by setting it to NULL . Disabled by default.
+ |
+
+certificates
+CertificatesConfiguration
+ |
+
+ The configuration for the CA and related certificates
+ |
+
+imagePullSecrets
+[]LocalObjectReference
+ |
+
+ The list of pull secrets to be used to pull the images. If the license key
+contains a pull secret that secret will be automatically included.
+ |
+
+storage
+StorageConfiguration
+ |
+
+ Configuration of the storage of the instances
+ |
+
+serviceAccountTemplate
+ServiceAccountTemplate
+ |
+
+ Configure the generation of the service account
+ |
+
+walStorage
+StorageConfiguration
+ |
+
+ Configuration of the storage for PostgreSQL WAL (Write-Ahead Log)
+ |
+
+startDelay
+int32
+ |
+
+ The time in seconds that is allowed for a PostgreSQL instance to
+successfully start up (default 3600).
+The startup probe failure threshold is derived from this value using the formula:
+ceiling(startDelay / 10).
+ |
+
+stopDelay
+int32
+ |
+
+ The time in seconds that is allowed for a PostgreSQL instance to
+gracefully shutdown (default 1800)
+ |
+
+smartShutdownTimeout
+int32
+ |
+
+ The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete.
+Make sure you reserve enough time for the operator to request a fast shutdown of Postgres
+(that is: stopDelay - smartShutdownTimeout ).
+ |
+
+switchoverDelay
+int32
+ |
+
+ The time in seconds that is allowed for a primary PostgreSQL instance
+to gracefully shutdown during a switchover.
+Default value is 3600 seconds (1 hour).
+ |
+
+failoverDelay
+int32
+ |
+
+ The amount of time (in seconds) to wait before triggering a failover
+after the primary PostgreSQL instance in the cluster was detected
+to be unhealthy
+ |
+
+affinity
+AffinityConfiguration
+ |
+
+ Affinity/Anti-affinity rules for Pods
+ |
+
+topologySpreadConstraints
+[]core/v1.TopologySpreadConstraint
+ |
+
+ TopologySpreadConstraints specifies how to spread matching pods among the given topology.
+More info:
+https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
+ |
+
+resources
+core/v1.ResourceRequirements
+ |
+
+ Resources requirements of every generated Pod. Please refer to
+https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+for more information.
+ |
+
+ephemeralVolumesSizeLimit [Required]
+EphemeralVolumesSizeLimitConfiguration
+ |
+
+ EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral
+volumes
+ |
+
+priorityClassName
+string
+ |
+
+ Name of the priority class which will be used in every generated Pod, if the PriorityClass
+specified does not exist, the pod will not be able to schedule. Please refer to
+https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
+for more information
+ |
+
+primaryUpdateStrategy
+PrimaryUpdateStrategy
+ |
+
+ Deployment strategy to follow to upgrade the primary server during a rolling
+update procedure, after all replicas have been successfully updated:
+it can be automated (unsupervised - default) or manual (supervised )
+ |
+
+primaryUpdateMethod
+PrimaryUpdateMethod
+ |
+
+ Method to follow to upgrade the primary server during a rolling
+update procedure, after all replicas have been successfully updated:
+it can be with a switchover (switchover ) or in-place (restart - default)
+ |
+
+backup
+BackupConfiguration
+ |
+
+ The configuration to be used for backups
+ |
+
+nodeMaintenanceWindow
+NodeMaintenanceWindow
+ |
+
+ Define a maintenance window for the Kubernetes nodes
+ |
+
+licenseKey
+string
+ |
+
+ The license key of the cluster. When empty, the cluster operates in
+trial mode and after the expiry date (default 30 days) the operator
+will cease any reconciliation attempt. For details, please refer to
+the license agreement that comes with the operator.
+ |
+
+licenseKeySecret
+core/v1.SecretKeySelector
+ |
+
+ The reference to the license key. When this is set it take precedence over LicenseKey.
+ |
+
+monitoring
+MonitoringConfiguration
+ |
+
+ The configuration of the monitoring infrastructure of this cluster
+ |
+
+externalClusters
+[]ExternalCluster
+ |
+
+ The list of external clusters which are used in the configuration
+ |
+
+logLevel
+string
+ |
+
+ The instances' log level, one of the following values: error, warning, info (default), debug, trace
+ |
+
+projectedVolumeTemplate
+core/v1.ProjectedVolumeSource
+ |
+
+ Template to be used to define projected volumes, projected volumes will be mounted
+under /projected base folder
+ |
+
+env
+[]core/v1.EnvVar
+ |
+
+ Env follows the Env format to pass environment variables
+to the pods created in the cluster
+ |
+
+envFrom
+[]core/v1.EnvFromSource
+ |
+
+ EnvFrom follows the EnvFrom format to pass environment variables
+sources to the pods to be used by Env
+ |
+
+managed
+ManagedConfiguration
+ |
+
+ The configuration that is used by the portions of PostgreSQL that are managed by the instance manager
+ |
+
+seccompProfile
+core/v1.SeccompProfile
+ |
+
+ The SeccompProfile applied to every Pod and Container.
+Defaults to: RuntimeDefault
+ |
+
+
+
+
+## ClusterStatus {#postgresql-k8s-enterprisedb-io-v1-ClusterStatus}
+
+**Appears in:**
+
+- [Cluster](#postgresql-k8s-enterprisedb-io-v1-Cluster)
+
+ClusterStatus defines the observed state of Cluster
+
+
+Field | Description |
+
+instances
+int
+ |
+
+ The total number of PVC Groups detected in the cluster. It may differ from the number of existing instance pods.
+ |
+
+readyInstances
+int
+ |
+
+ The total number of ready instances in the cluster. It is equal to the number of ready instance pods.
+ |
+
+instancesStatus
+map[github.com/EnterpriseDB/cloud-native-postgres/pkg/utils.PodStatus][]string
+ |
+
+ InstancesStatus indicates in which status the instances are
+ |
+
+instancesReportedState
+map[github.com/EnterpriseDB/cloud-native-postgres/api/v1.PodName]github.com/EnterpriseDB/cloud-native-postgres/api/v1.InstanceReportedState
+ |
+
+ The reported state of the instances during the last reconciliation loop
+ |
+
+managedRolesStatus
+ManagedRoles
+ |
+
+ ManagedRolesStatus reports the state of the managed roles in the cluster
+ |
+
+timelineID
+int
+ |
+
+ The timeline of the Postgres cluster
+ |
+
+topology
+Topology
+ |
+
+ Instances topology.
+ |
+
+latestGeneratedNode
+int
+ |
+
+ ID of the latest generated node (used to avoid node name clashing)
+ |
+
+currentPrimary
+string
+ |
+
+ Current primary instance
+ |
+
+targetPrimary
+string
+ |
+
+ Target primary instance, this is different from the previous one
+during a switchover or a failover
+ |
+
+pvcCount
+int32
+ |
+
+ How many PVCs have been created by this cluster
+ |
+
+jobCount
+int32
+ |
+
+ How many Jobs have been created by this cluster
+ |
+
+danglingPVC
+[]string
+ |
+
+ List of all the PVCs created by this cluster and still available
+which are not attached to a Pod
+ |
+
+resizingPVC
+[]string
+ |
+
+ List of all the PVCs that have ResizingPVC condition.
+ |
+
+initializingPVC
+[]string
+ |
+
+ List of all the PVCs that are being initialized by this cluster
+ |
+
+healthyPVC
+[]string
+ |
+
+ List of all the PVCs not dangling nor initializing
+ |
+
+unusablePVC
+[]string
+ |
+
+ List of all the PVCs that are unusable because another PVC is missing
+ |
+
+licenseStatus
+github.com/EnterpriseDB/cloud-native-postgres/pkg/licensekey.Status
+ |
+
+ Status of the license
+ |
+
+writeService
+string
+ |
+
+ Current write pod
+ |
+
+readService
+string
+ |
+
+ Current list of read pods
+ |
+
+phase
+string
+ |
+
+ Current phase of the cluster
+ |
+
+phaseReason
+string
+ |
+
+ Reason for the current phase
+ |
+
+secretsResourceVersion
+SecretsResourceVersion
+ |
+
+ The list of resource versions of the secrets
+managed by the operator. Every change here is done in the
+interest of the instance manager, which will refresh the
+secret data
+ |
+
+configMapResourceVersion
+ConfigMapResourceVersion
+ |
+
+ The list of resource versions of the configmaps,
+managed by the operator. Every change here is done in the
+interest of the instance manager, which will refresh the
+configmap data
+ |
+
+certificates
+CertificatesStatus
+ |
+
+ The configuration for the CA and related certificates, initialized with defaults.
+ |
+
+firstRecoverabilityPoint
+string
+ |
+
+ The first recoverability point, stored as a date in RFC3339 format
+ |
+
+lastSuccessfulBackup
+string
+ |
+
+ Stored as a date in RFC3339 format
+ |
+
+lastFailedBackup
+string
+ |
+
+ Stored as a date in RFC3339 format
+ |
+
+cloudNativePostgresqlCommitHash
+string
+ |
+
+ The commit hash number of which this operator running
+ |
+
+currentPrimaryTimestamp
+string
+ |
+
+ The timestamp when the last actual promotion to primary has occurred
+ |
+
+currentPrimaryFailingSinceTimestamp
+string
+ |
+
+ The timestamp when the primary was detected to be unhealthy
+This field is reported when spec.failoverDelay is populated or during online upgrades
+ |
+
+targetPrimaryTimestamp
+string
+ |
+
+ The timestamp when the last request for a new primary has occurred
+ |
+
+poolerIntegrations
+PoolerIntegrations
+ |
+
+ The integration needed by poolers referencing the cluster
+ |
+
+cloudNativePostgresqlOperatorHash
+string
+ |
+
+ The hash of the binary of the operator
+ |
+
+conditions
+[]meta/v1.Condition
+ |
+
+ Conditions for cluster object
+ |
+
+instanceNames
+[]string
+ |
+
+ List of instance names in the cluster
+ |
+
+onlineUpdateEnabled
+bool
+ |
+
+ OnlineUpdateEnabled shows if the online upgrade is enabled inside the cluster
+ |
+
+azurePVCUpdateEnabled
+bool
+ |
+
+ AzurePVCUpdateEnabled shows if the PVC online upgrade is enabled for this cluster
+ |
+
+
+
+
+## CompressionType {#postgresql-k8s-enterprisedb-io-v1-CompressionType}
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [DataBackupConfiguration](#postgresql-k8s-enterprisedb-io-v1-DataBackupConfiguration)
+
+- [WalBackupConfiguration](#postgresql-k8s-enterprisedb-io-v1-WalBackupConfiguration)
+
+CompressionType encapsulates the available types of compression
+
+## ConfigMapKeySelector {#postgresql-k8s-enterprisedb-io-v1-ConfigMapKeySelector}
+
+**Appears in:**
+
+- [MonitoringConfiguration](#postgresql-k8s-enterprisedb-io-v1-MonitoringConfiguration)
+
+- [PostInitApplicationSQLRefs](#postgresql-k8s-enterprisedb-io-v1-PostInitApplicationSQLRefs)
+
+ConfigMapKeySelector contains enough information to let you locate
+the key of a ConfigMap
+
+
+Field | Description |
+
+LocalObjectReference
+LocalObjectReference
+ |
+(Members of LocalObjectReference are embedded into this type.)
+ The name of the secret in the pod's namespace to select from.
+ |
+
+key [Required]
+string
+ |
+
+ The key to select
+ |
+
+
+
+
+## ConfigMapResourceVersion {#postgresql-k8s-enterprisedb-io-v1-ConfigMapResourceVersion}
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+ConfigMapResourceVersion is the resource versions of the secrets
+managed by the operator
+
+
+Field | Description |
+
+metrics
+map[string]string
+ |
+
+ A map with the versions of all the config maps used to pass metrics.
+Map keys are the config map names, map values are the versions
+ |
+
+
+
+
+## DataBackupConfiguration {#postgresql-k8s-enterprisedb-io-v1-DataBackupConfiguration}
+
+**Appears in:**
+
+- [BarmanObjectStoreConfiguration](#postgresql-k8s-enterprisedb-io-v1-BarmanObjectStoreConfiguration)
+
+DataBackupConfiguration is the configuration of the backup of
+the data directory
+
+
+Field | Description |
+
+compression
+CompressionType
+ |
+
+ Compress a backup file (a tar file per tablespace) while streaming it
+to the object store. Available options are empty string (no
+compression, default), gzip , bzip2 or snappy .
+ |
+
+encryption
+EncryptionType
+ |
+
+ Whenever to force the encryption of files (if the bucket is
+not already configured for that).
+Allowed options are empty string (use the bucket policy, default),
+AES256 and aws:kms
+ |
+
+jobs
+int32
+ |
+
+ The number of parallel jobs to be used to upload the backup, defaults
+to 2
+ |
+
+immediateCheckpoint
+bool
+ |
+
+ Control whether the I/O workload for the backup initial checkpoint will
+be limited, according to the checkpoint_completion_target setting on
+the PostgreSQL server. If set to true, an immediate checkpoint will be
+used, meaning PostgreSQL will complete the checkpoint as soon as
+possible. false by default.
+ |
+
+
+
+
+## DataSource {#postgresql-k8s-enterprisedb-io-v1-DataSource}
+
+**Appears in:**
+
+- [BootstrapRecovery](#postgresql-k8s-enterprisedb-io-v1-BootstrapRecovery)
+
+DataSource contains the configuration required to bootstrap a
+PostgreSQL cluster from an existing storage
+
+
+
+## EPASConfiguration {#postgresql-k8s-enterprisedb-io-v1-EPASConfiguration}
+
+**Appears in:**
+
+- [PostgresConfiguration](#postgresql-k8s-enterprisedb-io-v1-PostgresConfiguration)
+
+EPASConfiguration contains EDB Postgres Advanced Server specific configurations
+
+
+Field | Description |
+
+audit
+bool
+ |
+
+ If true enables edb_audit logging
+ |
+
+tde
+TDEConfiguration
+ |
+
+ TDE configuration
+ |
+
+
+
+
+## EmbeddedObjectMetadata {#postgresql-k8s-enterprisedb-io-v1-EmbeddedObjectMetadata}
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+EmbeddedObjectMetadata contains metadata to be inherited by all resources related to a Cluster
+
+
+Field | Description |
+
+labels
+map[string]string
+ |
+
+ No description provided. |
+
+annotations
+map[string]string
+ |
+
+ No description provided. |
+
+
+
+
+## EncryptionType {#postgresql-k8s-enterprisedb-io-v1-EncryptionType}
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [DataBackupConfiguration](#postgresql-k8s-enterprisedb-io-v1-DataBackupConfiguration)
+
+- [WalBackupConfiguration](#postgresql-k8s-enterprisedb-io-v1-WalBackupConfiguration)
+
+EncryptionType encapsulated the available types of encryption
+
+## EnsureOption {#postgresql-k8s-enterprisedb-io-v1-EnsureOption}
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [RoleConfiguration](#postgresql-k8s-enterprisedb-io-v1-RoleConfiguration)
+
+EnsureOption represents whether we should enforce the presence or absence of
+a Role in a PostgreSQL instance
+
+## EphemeralVolumesSizeLimitConfiguration {#postgresql-k8s-enterprisedb-io-v1-EphemeralVolumesSizeLimitConfiguration}
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+EphemeralVolumesSizeLimitConfiguration contains the configuration of the ephemeral
+storage
+
+
+Field | Description |
+
+shm [Required]
+k8s.io/apimachinery/pkg/api/resource.Quantity
+ |
+
+ Shm is the size limit of the shared memory volume
+ |
+
+temporaryData [Required]
+k8s.io/apimachinery/pkg/api/resource.Quantity
+ |
+
+ TemporaryData is the size limit of the temporary data volume
+ |
+
+
+
+
+## ExternalCluster {#postgresql-k8s-enterprisedb-io-v1-ExternalCluster}
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+ExternalCluster represents the connection parameters to an
+external cluster which is used in the other sections of the configuration
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ The server name, required
+ |
+
+connectionParameters
+map[string]string
+ |
+
+ The list of connection parameters, such as dbname, host, username, etc
+ |
+
+sslCert
+core/v1.SecretKeySelector
+ |
+
+ The reference to an SSL certificate to be used to connect to this
+instance
+ |
+
+sslKey
+core/v1.SecretKeySelector
+ |
+
+ The reference to an SSL private key to be used to connect to this
+instance
+ |
+
+sslRootCert
+core/v1.SecretKeySelector
+ |
+
+ The reference to an SSL CA public key to be used to connect to this
+instance
+ |
+
+password
+core/v1.SecretKeySelector
+ |
+
+ The reference to the password to be used to connect to the server
+ |
+
+barmanObjectStore
+BarmanObjectStoreConfiguration
+ |
+
+ The configuration for the barman-cloud tool suite
+ |
+
+
+
+
+## GoogleCredentials {#postgresql-k8s-enterprisedb-io-v1-GoogleCredentials}
+
+**Appears in:**
+
+- [BarmanCredentials](#postgresql-k8s-enterprisedb-io-v1-BarmanCredentials)
+
+GoogleCredentials is the type for the Google Cloud Storage credentials.
+This needs to be specified even if we run inside a GKE environment.
+
+
+Field | Description |
+
+applicationCredentials
+SecretKeySelector
+ |
+
+ The secret containing the Google Cloud Storage JSON file with the credentials
+ |
+
+gkeEnvironment
+bool
+ |
+
+ If set to true, will presume that it's running inside a GKE environment,
+default to false.
+ |
+
+
+
+
+## Import {#postgresql-k8s-enterprisedb-io-v1-Import}
+
+**Appears in:**
+
+- [BootstrapInitDB](#postgresql-k8s-enterprisedb-io-v1-BootstrapInitDB)
+
+Import contains the configuration to init a database from a logic snapshot of an externalCluster
+
+
+Field | Description |
+
+source [Required]
+ImportSource
+ |
+
+ The source of the import
+ |
+
+type [Required]
+SnapshotType
+ |
+
+ The import type. Can be microservice or monolith .
+ |
+
+databases [Required]
+[]string
+ |
+
+ The databases to import
+ |
+
+roles
+[]string
+ |
+
+ The roles to import
+ |
+
+postImportApplicationSQL
+[]string
+ |
+
+ List of SQL queries to be executed as a superuser in the application
+database right after is imported - to be used with extreme care
+(by default empty). Only available in microservice type.
+ |
+
+schemaOnly
+bool
+ |
+
+ When set to true, only the pre-data and post-data sections of
+pg_restore are invoked, avoiding data import. Default: false .
+ |
+
+
+
+
+## ImportSource {#postgresql-k8s-enterprisedb-io-v1-ImportSource}
+
+**Appears in:**
+
+- [Import](#postgresql-k8s-enterprisedb-io-v1-Import)
+
+ImportSource describes the source for the logical snapshot
+
+
+Field | Description |
+
+externalCluster [Required]
+string
+ |
+
+ The name of the externalCluster used for import
+ |
+
+
+
+
+## InstanceID {#postgresql-k8s-enterprisedb-io-v1-InstanceID}
+
+**Appears in:**
+
+- [BackupStatus](#postgresql-k8s-enterprisedb-io-v1-BackupStatus)
+
+InstanceID contains the information to identify an instance
+
+
+Field | Description |
+
+podName
+string
+ |
+
+ The pod name
+ |
+
+ContainerID
+string
+ |
+
+ The container ID
+ |
+
+
+
+
+## InstanceReportedState {#postgresql-k8s-enterprisedb-io-v1-InstanceReportedState}
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+InstanceReportedState describes the last reported state of an instance during a reconciliation loop
+
+
+Field | Description |
+
+isPrimary [Required]
+bool
+ |
+
+ indicates if an instance is the primary one
+ |
+
+timeLineID
+int
+ |
+
+ indicates on which TimelineId the instance is
+ |
+
+
+
+
+## LDAPBindAsAuth {#postgresql-k8s-enterprisedb-io-v1-LDAPBindAsAuth}
+
+**Appears in:**
+
+- [LDAPConfig](#postgresql-k8s-enterprisedb-io-v1-LDAPConfig)
+
+LDAPBindAsAuth provides the required fields to use the
+bind authentication for LDAP
+
+
+Field | Description |
+
+prefix
+string
+ |
+
+ Prefix for the bind authentication option
+ |
+
+suffix
+string
+ |
+
+ Suffix for the bind authentication option
+ |
+
+
+
+
+## LDAPBindSearchAuth {#postgresql-k8s-enterprisedb-io-v1-LDAPBindSearchAuth}
+
+**Appears in:**
+
+- [LDAPConfig](#postgresql-k8s-enterprisedb-io-v1-LDAPConfig)
+
+LDAPBindSearchAuth provides the required fields to use
+the bind+search LDAP authentication process
+
+
+Field | Description |
+
+baseDN
+string
+ |
+
+ Root DN to begin the user search
+ |
+
+bindDN
+string
+ |
+
+ DN of the user to bind to the directory
+ |
+
+bindPassword
+core/v1.SecretKeySelector
+ |
+
+ Secret with the password for the user to bind to the directory
+ |
+
+searchAttribute
+string
+ |
+
+ Attribute to match against the username
+ |
+
+searchFilter
+string
+ |
+
+ Search filter to use when doing the search+bind authentication
+ |
+
+
+
+
+## LDAPConfig {#postgresql-k8s-enterprisedb-io-v1-LDAPConfig}
+
+**Appears in:**
+
+- [PostgresConfiguration](#postgresql-k8s-enterprisedb-io-v1-PostgresConfiguration)
+
+LDAPConfig contains the parameters needed for LDAP authentication
+
+
+Field | Description |
+
+server
+string
+ |
+
+ LDAP hostname or IP address
+ |
+
+port
+int
+ |
+
+ LDAP server port
+ |
+
+scheme
+LDAPScheme
+ |
+
+ LDAP schema to be used, possible options are ldap and ldaps
+ |
+
+bindAsAuth
+LDAPBindAsAuth
+ |
+
+ Bind as authentication configuration
+ |
+
+bindSearchAuth
+LDAPBindSearchAuth
+ |
+
+ Bind+Search authentication configuration
+ |
+
+tls
+bool
+ |
+
+ Set to 'true' to enable LDAP over TLS. 'false' is default
+ |
+
+
+
+
+## LDAPScheme {#postgresql-k8s-enterprisedb-io-v1-LDAPScheme}
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [LDAPConfig](#postgresql-k8s-enterprisedb-io-v1-LDAPConfig)
+
+LDAPScheme defines the possible schemes for LDAP
+
+## LocalObjectReference {#postgresql-k8s-enterprisedb-io-v1-LocalObjectReference}
+
+**Appears in:**
+
+- [BackupSource](#postgresql-k8s-enterprisedb-io-v1-BackupSource)
+
+- [BackupSpec](#postgresql-k8s-enterprisedb-io-v1-BackupSpec)
+
+- [BootstrapInitDB](#postgresql-k8s-enterprisedb-io-v1-BootstrapInitDB)
+
+- [BootstrapPgBaseBackup](#postgresql-k8s-enterprisedb-io-v1-BootstrapPgBaseBackup)
+
+- [BootstrapRecovery](#postgresql-k8s-enterprisedb-io-v1-BootstrapRecovery)
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+- [ConfigMapKeySelector](#postgresql-k8s-enterprisedb-io-v1-ConfigMapKeySelector)
+
+- [PgBouncerSpec](#postgresql-k8s-enterprisedb-io-v1-PgBouncerSpec)
+
+- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec)
+
+- [RoleConfiguration](#postgresql-k8s-enterprisedb-io-v1-RoleConfiguration)
+
+- [ScheduledBackupSpec](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec)
+
+- [SecretKeySelector](#postgresql-k8s-enterprisedb-io-v1-SecretKeySelector)
+
+LocalObjectReference contains enough information to let you locate a
+local object with a known type inside the same namespace
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name of the referent.
+ |
+
+
+
+
+## ManagedConfiguration {#postgresql-k8s-enterprisedb-io-v1-ManagedConfiguration}
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+ManagedConfiguration represents the portions of PostgreSQL that are managed
+by the instance manager
+
+
+Field | Description |
+
+roles
+[]RoleConfiguration
+ |
+
+ Database roles managed by the Cluster
+ |
+
+
+
+
+## ManagedRoles {#postgresql-k8s-enterprisedb-io-v1-ManagedRoles}
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+ManagedRoles tracks the status of a cluster's managed roles
+
+
+Field | Description |
+
+byStatus
+map[github.com/EnterpriseDB/cloud-native-postgres/api/v1.RoleStatus][]string
+ |
+
+ ByStatus gives the list of roles in each state
+ |
+
+cannotReconcile
+map[string][]string
+ |
+
+ CannotReconcile lists roles that cannot be reconciled in PostgreSQL,
+with an explanation of the cause
+ |
+
+passwordStatus
+map[string]github.com/EnterpriseDB/cloud-native-postgres/api/v1.PasswordState
+ |
+
+ PasswordStatus gives the last transaction id and password secret version for each managed role
+ |
+
+
+
+
+## Metadata {#postgresql-k8s-enterprisedb-io-v1-Metadata}
+
+**Appears in:**
+
+- [PodTemplateSpec](#postgresql-k8s-enterprisedb-io-v1-PodTemplateSpec)
+
+- [ServiceAccountTemplate](#postgresql-k8s-enterprisedb-io-v1-ServiceAccountTemplate)
+
+Metadata is a structure similar to the metav1.ObjectMeta, but still
+parseable by controller-gen to create a suitable CRD for the user.
+The comment of PodTemplateSpec has an explanation of why we are
+not using the core data types.
+
+
+Field | Description |
+
+labels
+map[string]string
+ |
+
+ Map of string keys and values that can be used to organize and categorize
+(scope and select) objects. May match selectors of replication controllers
+and services.
+More info: http://kubernetes.io/docs/user-guide/labels
+ |
+
+annotations
+map[string]string
+ |
+
+ Annotations is an unstructured key value map stored with a resource that may be
+set by external tools to store and retrieve arbitrary metadata. They are not
+queryable and should be preserved when modifying objects.
+More info: http://kubernetes.io/docs/user-guide/annotations
+ |
+
+
+
+
+## MonitoringConfiguration {#postgresql-k8s-enterprisedb-io-v1-MonitoringConfiguration}
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+MonitoringConfiguration is the type containing all the monitoring
+configuration for a certain cluster
+
+
+Field | Description |
+
+disableDefaultQueries
+bool
+ |
+
+ Whether the default queries should be injected.
+Set it to true if you don't want to inject default queries into the cluster.
+Default: false.
+ |
+
+customQueriesConfigMap
+[]ConfigMapKeySelector
+ |
+
+ The list of config maps containing the custom queries
+ |
+
+customQueriesSecret
+[]SecretKeySelector
+ |
+
+ The list of secrets containing the custom queries
+ |
+
+enablePodMonitor
+bool
+ |
+
+ Enable or disable the PodMonitor
+ |
+
+
+
+
+## NodeMaintenanceWindow {#postgresql-k8s-enterprisedb-io-v1-NodeMaintenanceWindow}
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+NodeMaintenanceWindow contains information that the operator
+will use while upgrading the underlying node.
+This option is only useful when the chosen storage prevents the Pods
+from being freely moved across nodes.
+
+
+Field | Description |
+
+reusePVC
+bool
+ |
+
+ Reuse the existing PVC (wait for the node to come
+up again) or not (recreate it elsewhere - when instances >1)
+ |
+
+inProgress
+bool
+ |
+
+ Is there a node maintenance activity in progress?
+ |
+
+
+
+
+## PasswordState {#postgresql-k8s-enterprisedb-io-v1-PasswordState}
+
+**Appears in:**
+
+- [ManagedRoles](#postgresql-k8s-enterprisedb-io-v1-ManagedRoles)
+
+PasswordState represents the state of the password of a managed RoleConfiguration
+
+
+Field | Description |
+
+transactionID
+int64
+ |
+
+ the last transaction ID to affect the role definition in PostgreSQL
+ |
+
+resourceVersion
+string
+ |
+
+ the resource version of the password secret
+ |
+
+
+
+
+## PgBouncerIntegrationStatus {#postgresql-k8s-enterprisedb-io-v1-PgBouncerIntegrationStatus}
+
+**Appears in:**
+
+- [PoolerIntegrations](#postgresql-k8s-enterprisedb-io-v1-PoolerIntegrations)
+
+PgBouncerIntegrationStatus encapsulates the needed integration for the pgbouncer poolers referencing the cluster
+
+
+Field | Description |
+
+secrets
+[]string
+ |
+
+ No description provided. |
+
+
+
+
+## PgBouncerPoolMode {#postgresql-k8s-enterprisedb-io-v1-PgBouncerPoolMode}
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [PgBouncerSpec](#postgresql-k8s-enterprisedb-io-v1-PgBouncerSpec)
+
+PgBouncerPoolMode is the mode of PgBouncer
+
+## PgBouncerSecrets {#postgresql-k8s-enterprisedb-io-v1-PgBouncerSecrets}
+
+**Appears in:**
+
+- [PoolerSecrets](#postgresql-k8s-enterprisedb-io-v1-PoolerSecrets)
+
+PgBouncerSecrets contains the versions of the secrets used
+by pgbouncer
+
+
+Field | Description |
+
+authQuery
+SecretVersion
+ |
+
+ The auth query secret version
+ |
+
+
+
+
+## PgBouncerSpec {#postgresql-k8s-enterprisedb-io-v1-PgBouncerSpec}
+
+**Appears in:**
+
+- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec)
+
+PgBouncerSpec defines how to configure PgBouncer
+
+
+Field | Description |
+
+poolMode
+PgBouncerPoolMode
+ |
+
+ The pool mode. Default: session .
+ |
+
+authQuerySecret
+LocalObjectReference
+ |
+
+ The credentials of the user that need to be used for the authentication
+query. In case it is specified, also an AuthQuery
+(e.g. "SELECT usename, passwd FROM pg_shadow WHERE usename=$1")
+has to be specified and no automatic CNP Cluster integration will be triggered.
+ |
+
+authQuery
+string
+ |
+
+ The query that will be used to download the hash of the password
+of a certain user. Default: "SELECT usename, passwd FROM user_search($1)".
+In case it is specified, also an AuthQuerySecret has to be specified and
+no automatic CNP Cluster integration will be triggered.
+ |
+
+parameters
+map[string]string
+ |
+
+ Additional parameters to be passed to PgBouncer - please check
+the CNP documentation for a list of options you can configure
+ |
+
+pg_hba
+[]string
+ |
+
+ PostgreSQL Host Based Authentication rules (lines to be appended
+to the pg_hba.conf file)
+ |
+
+paused
+bool
+ |
+
+ When set to true , PgBouncer will disconnect from the PostgreSQL
+server, first waiting for all queries to complete, and pause all new
+client connections until this value is set to false (default). Internally,
+the operator calls PgBouncer's PAUSE and RESUME commands.
+ |
+
+
+
+
+## PodTemplateSpec {#postgresql-k8s-enterprisedb-io-v1-PodTemplateSpec}
+
+**Appears in:**
+
+- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec)
+
+PodTemplateSpec is a structure allowing the user to set
+a template for Pod generation.
+Unfortunately we can't use the corev1.PodTemplateSpec
+type because the generated CRD won't have the field for the
+metadata section.
+References:
+https://github.com/kubernetes-sigs/controller-tools/issues/385
+https://github.com/kubernetes-sigs/controller-tools/issues/448
+https://github.com/prometheus-operator/prometheus-operator/issues/3041
+
+
+Field | Description |
+
+metadata
+Metadata
+ |
+
+ Standard object's metadata.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ |
+
+spec
+core/v1.PodSpec
+ |
+
+ Specification of the desired behavior of the pod.
+More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ |
+
+
+
+
+## PodTopologyLabels {#postgresql-k8s-enterprisedb-io-v1-PodTopologyLabels}
+
+(Alias of `map[string]string`)
+
+**Appears in:**
+
+- [Topology](#postgresql-k8s-enterprisedb-io-v1-Topology)
+
+PodTopologyLabels represent the topology of a Pod. map[labelName]labelValue
+
+## PoolerIntegrations {#postgresql-k8s-enterprisedb-io-v1-PoolerIntegrations}
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+PoolerIntegrations encapsulates the needed integration for the poolers referencing the cluster
+
+
+
+## PoolerMonitoringConfiguration {#postgresql-k8s-enterprisedb-io-v1-PoolerMonitoringConfiguration}
+
+**Appears in:**
+
+- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec)
+
+PoolerMonitoringConfiguration is the type containing all the monitoring
+configuration for a certain Pooler.
+Mirrors the Cluster's MonitoringConfiguration but without the custom queries
+part for now.
+
+
+Field | Description |
+
+enablePodMonitor
+bool
+ |
+
+ Enable or disable the PodMonitor
+ |
+
+
+
+
+## PoolerSecrets {#postgresql-k8s-enterprisedb-io-v1-PoolerSecrets}
+
+**Appears in:**
+
+- [PoolerStatus](#postgresql-k8s-enterprisedb-io-v1-PoolerStatus)
+
+PoolerSecrets contains the versions of all the secrets used
+
+
+Field | Description |
+
+serverTLS
+SecretVersion
+ |
+
+ The server TLS secret version
+ |
+
+serverCA
+SecretVersion
+ |
+
+ The server CA secret version
+ |
+
+clientCA
+SecretVersion
+ |
+
+ The client CA secret version
+ |
+
+pgBouncerSecrets
+PgBouncerSecrets
+ |
+
+ The version of the secrets used by PgBouncer
+ |
+
+
+
+
+## PoolerSpec {#postgresql-k8s-enterprisedb-io-v1-PoolerSpec}
+
+**Appears in:**
+
+- [Pooler](#postgresql-k8s-enterprisedb-io-v1-Pooler)
+
+PoolerSpec defines the desired state of Pooler
+
+
+Field | Description |
+
+cluster [Required]
+LocalObjectReference
+ |
+
+ This is the cluster reference on which the Pooler will work.
+Pooler name should never match with any cluster name within the same namespace.
+ |
+
+type
+PoolerType
+ |
+
+ Type of service to forward traffic to. Default: rw .
+ |
+
+instances
+int32
+ |
+
+ The number of replicas we want. Default: 1.
+ |
+
+template
+PodTemplateSpec
+ |
+
+ The template of the Pod to be created
+ |
+
+pgbouncer [Required]
+PgBouncerSpec
+ |
+
+ The PgBouncer configuration
+ |
+
+deploymentStrategy
+apps/v1.DeploymentStrategy
+ |
+
+ The deployment strategy to use for pgbouncer to replace existing pods with new ones
+ |
+
+monitoring
+PoolerMonitoringConfiguration
+ |
+
+ The configuration of the monitoring infrastructure of this pooler.
+ |
+
+
+
+
+## PoolerStatus {#postgresql-k8s-enterprisedb-io-v1-PoolerStatus}
+
+**Appears in:**
+
+- [Pooler](#postgresql-k8s-enterprisedb-io-v1-Pooler)
+
+PoolerStatus defines the observed state of Pooler
+
+
+Field | Description |
+
+secrets
+PoolerSecrets
+ |
+
+ The resource version of the config object
+ |
+
+instances
+int32
+ |
+
+ The number of pods trying to be scheduled
+ |
+
+
+
+
+## PoolerType {#postgresql-k8s-enterprisedb-io-v1-PoolerType}
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [PoolerSpec](#postgresql-k8s-enterprisedb-io-v1-PoolerSpec)
+
+PoolerType is the type of the connection pool, meaning the service
+we are targeting. Allowed values are rw
and ro
.
+
+## PostInitApplicationSQLRefs {#postgresql-k8s-enterprisedb-io-v1-PostInitApplicationSQLRefs}
+
+**Appears in:**
+
+- [BootstrapInitDB](#postgresql-k8s-enterprisedb-io-v1-BootstrapInitDB)
+
+PostInitApplicationSQLRefs points references to ConfigMaps or Secrets which
+contain SQL files, the general implementation order to these references is
+from all Secrets to all ConfigMaps, and inside Secrets or ConfigMaps,
+the implementation order is same as the order of each array
+
+
+Field | Description |
+
+secretRefs
+[]SecretKeySelector
+ |
+
+ SecretRefs holds a list of references to Secrets
+ |
+
+configMapRefs
+[]ConfigMapKeySelector
+ |
+
+ ConfigMapRefs holds a list of references to ConfigMaps
+ |
+
+
+
+
+## PostgresConfiguration {#postgresql-k8s-enterprisedb-io-v1-PostgresConfiguration}
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+PostgresConfiguration defines the PostgreSQL configuration
+
+
+Field | Description |
+
+parameters
+map[string]string
+ |
+
+ PostgreSQL configuration options (postgresql.conf)
+ |
+
+pg_hba
+[]string
+ |
+
+ PostgreSQL Host Based Authentication rules (lines to be appended
+to the pg_hba.conf file)
+ |
+
+epas
+EPASConfiguration
+ |
+
+ EDB Postgres Advanced Server specific configurations
+ |
+
+syncReplicaElectionConstraint
+SyncReplicaElectionConstraints
+ |
+
+ Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be
+set up.
+ |
+
+shared_preload_libraries
+[]string
+ |
+
+ Lists of shared preload libraries to add to the default ones
+ |
+
+ldap
+LDAPConfig
+ |
+
+ Options to specify LDAP configuration
+ |
+
+promotionTimeout
+int32
+ |
+
+ Specifies the maximum number of seconds to wait when promoting an instance to primary.
+Default value is 40000000, greater than one year in seconds,
+big enough to simulate an infinite timeout
+ |
+
+
+
+
+## PrimaryUpdateMethod {#postgresql-k8s-enterprisedb-io-v1-PrimaryUpdateMethod}
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+PrimaryUpdateMethod contains the method to use when upgrading
+the primary server of the cluster as part of rolling updates
+
+## PrimaryUpdateStrategy {#postgresql-k8s-enterprisedb-io-v1-PrimaryUpdateStrategy}
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+PrimaryUpdateStrategy contains the strategy to follow when upgrading
+the primary server of the cluster as part of rolling updates
+
+## RecoveryTarget {#postgresql-k8s-enterprisedb-io-v1-RecoveryTarget}
+
+**Appears in:**
+
+- [BootstrapRecovery](#postgresql-k8s-enterprisedb-io-v1-BootstrapRecovery)
+
+RecoveryTarget allows to configure the moment where the recovery process
+will stop. All the target options except TargetTLI are mutually exclusive.
+
+
+Field | Description |
+
+backupID
+string
+ |
+
+ The ID of the backup from which to start the recovery process.
+If empty (default) the operator will automatically detect the backup
+based on targetTime or targetLSN if specified. Otherwise use the
+latest available backup in chronological order.
+ |
+
+targetTLI
+string
+ |
+
+ The target timeline ("latest" or a positive integer)
+ |
+
+targetXID
+string
+ |
+
+ The target transaction ID
+ |
+
+targetName
+string
+ |
+
+ The target name (to be previously created
+with pg_create_restore_point )
+ |
+
+targetLSN
+string
+ |
+
+ The target LSN (Log Sequence Number)
+ |
+
+targetTime
+string
+ |
+
+ The target time as a timestamp in the RFC3339 standard
+ |
+
+targetImmediate
+bool
+ |
+
+ End recovery as soon as a consistent state is reached
+ |
+
+exclusive
+bool
+ |
+
+ Set the target to be exclusive. If omitted, defaults to false, so that
+in Postgres, recovery_target_inclusive will be true
+ |
+
+
+
+
+## ReplicaClusterConfiguration {#postgresql-k8s-enterprisedb-io-v1-ReplicaClusterConfiguration}
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+ReplicaClusterConfiguration encapsulates the configuration of a replica
+cluster
+
+
+Field | Description |
+
+source [Required]
+string
+ |
+
+ The name of the external cluster which is the replication origin
+ |
+
+enabled [Required]
+bool
+ |
+
+ If replica mode is enabled, this cluster will be a replica of an
+existing cluster. Replica cluster can be created from a recovery
+object store or via streaming through pg_basebackup.
+Refer to the Replica clusters page of the documentation for more information.
+ |
+
+
+
+
+## ReplicationSlotsConfiguration {#postgresql-k8s-enterprisedb-io-v1-ReplicationSlotsConfiguration}
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+ReplicationSlotsConfiguration encapsulates the configuration
+of replication slots
+
+
+Field | Description |
+
+highAvailability
+ReplicationSlotsHAConfiguration
+ |
+
+ Replication slots for high availability configuration
+ |
+
+updateInterval
+int
+ |
+
+ Standby will update the status of the local replication slots
+every updateInterval seconds (default 30).
+ |
+
+
+
+
+## ReplicationSlotsHAConfiguration {#postgresql-k8s-enterprisedb-io-v1-ReplicationSlotsHAConfiguration}
+
+**Appears in:**
+
+- [ReplicationSlotsConfiguration](#postgresql-k8s-enterprisedb-io-v1-ReplicationSlotsConfiguration)
+
+ReplicationSlotsHAConfiguration encapsulates the configuration
+of the replication slots that are automatically managed by
+the operator to control the streaming replication connections
+with the standby instances for high availability (HA) purposes.
+Replication slots are a PostgreSQL feature that makes sure
+that PostgreSQL automatically keeps WAL files in the primary
+when a streaming client (in this specific case a replica that
+is part of the HA cluster) gets disconnected.
+
+
+Field | Description |
+
+enabled
+bool
+ |
+
+ If enabled (default), the operator will automatically manage replication slots
+on the primary instance and use them in streaming replication
+connections with all the standby instances that are part of the HA
+cluster. If disabled, the operator will not take advantage
+of replication slots in streaming connections with the replicas.
+This feature also controls replication slots in replica cluster,
+from the designated primary to its cascading replicas.
+ |
+
+slotPrefix
+string
+ |
+
+ Prefix for replication slots managed by the operator for HA.
+It may only contain lower case letters, numbers, and the underscore character.
+This can only be set at creation time. By default set to _cnp_ .
+ |
+
+
+
+
+## RoleConfiguration {#postgresql-k8s-enterprisedb-io-v1-RoleConfiguration}
+
+**Appears in:**
+
+- [ManagedConfiguration](#postgresql-k8s-enterprisedb-io-v1-ManagedConfiguration)
+
+RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role
+with the additional field Ensure specifying whether to ensure the presence or
+absence of the role in the database
+The defaults of the CREATE ROLE command are applied
+Reference: https://www.postgresql.org/docs/current/sql-createrole.html
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name of the role
+ |
+
+comment
+string
+ |
+
+ Description of the role
+ |
+
+ensure
+EnsureOption
+ |
+
+ Ensure the role is present or absent - defaults to "present"
+ |
+
+passwordSecret
+LocalObjectReference
+ |
+
+ Secret containing the password of the role (if present)
+If null, the password will be ignored unless DisablePassword is set
+ |
+
+connectionLimit
+int64
+ |
+
+ If the role can log in, this specifies how many concurrent
+connections the role can make. -1 (the default) means no limit.
+ |
+
+validUntil
+meta/v1.Time
+ |
+
+ Date and time after which the role's password is no longer valid.
+When omitted, the password will never expire (default).
+ |
+
+inRoles
+[]string
+ |
+
+ List of one or more existing roles to which this role will be
+immediately added as a new member. Default empty.
+ |
+
+inherit
+bool
+ |
+
+ Whether a role "inherits" the privileges of roles it is a member of.
+Defaults is true .
+ |
+
+disablePassword
+bool
+ |
+
+ DisablePassword indicates that a role's password should be set to NULL in Postgres
+ |
+
+superuser
+bool
+ |
+
+ Whether the role is a superuser who can override all access
+restrictions within the database - superuser status is dangerous and
+should be used only when really needed. You must yourself be a
+superuser to create a new superuser. Defaults is false .
+ |
+
+createdb
+bool
+ |
+
+ When set to true , the role being defined will be allowed to create
+new databases. Specifying false (default) will deny a role the
+ability to create databases.
+ |
+
+createrole
+bool
+ |
+
+ Whether the role will be permitted to create, alter, drop, comment
+on, change the security label for, and grant or revoke membership in
+other roles. Default is false .
+ |
+
+login
+bool
+ |
+
+ Whether the role is allowed to log in. A role having the login
+attribute can be thought of as a user. Roles without this attribute
+are useful for managing database privileges, but are not users in
+the usual sense of the word. Default is false .
+ |
+
+replication
+bool
+ |
+
+ Whether a role is a replication role. A role must have this
+attribute (or be a superuser) in order to be able to connect to the
+server in replication mode (physical or logical replication) and in
+order to be able to create or drop replication slots. A role having
+the replication attribute is a very highly privileged role, and
+should only be used on roles actually used for replication. Default
+is false .
+ |
+
+bypassrls
+bool
+ |
+
+ Whether a role bypasses every row-level security (RLS) policy.
+Default is false .
+ |
+
+
+
+
+## S3Credentials {#postgresql-k8s-enterprisedb-io-v1-S3Credentials}
+
+**Appears in:**
+
+- [BarmanCredentials](#postgresql-k8s-enterprisedb-io-v1-BarmanCredentials)
+
+S3Credentials is the type for the credentials to be used to upload
+files to S3. It can be provided in two alternative ways:
+
+
+
+Field | Description |
+
+accessKeyId
+SecretKeySelector
+ |
+
+ The reference to the access key id
+ |
+
+secretAccessKey
+SecretKeySelector
+ |
+
+ The reference to the secret access key
+ |
+
+region
+SecretKeySelector
+ |
+
+ The reference to the secret containing the region name
+ |
+
+sessionToken
+SecretKeySelector
+ |
+
+ The references to the session key
+ |
+
+inheritFromIAMRole
+bool
+ |
+
+ Use the role based authentication without providing explicitly the keys.
+ |
+
+
+
+
+## ScheduledBackupSpec {#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec}
+
+**Appears in:**
+
+- [ScheduledBackup](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackup)
+
+ScheduledBackupSpec defines the desired state of ScheduledBackup
+
+
+Field | Description |
+
+suspend
+bool
+ |
+
+ If this backup is suspended or not
+ |
+
+immediate
+bool
+ |
+
+ If the first backup has to be immediately start after creation or not
+ |
+
+schedule [Required]
+string
+ |
+
+ The schedule does not follow the same format used in Kubernetes CronJobs
+as it includes an additional seconds specifier,
+see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format
+ |
+
+cluster [Required]
+LocalObjectReference
+ |
+
+ The cluster to backup
+ |
+
+backupOwnerReference
+string
+ |
+
+ Indicates which ownerReference should be put inside the created backup resources.
+
+- none: no owner reference for created backup objects (same behavior as before the field was introduced)
+- self: sets the Scheduled backup object as owner of the backup
+- cluster: set the cluster as owner of the backup
+
+ |
+
+target
+BackupTarget
+ |
+
+ The policy to decide which instance should perform this backup. If empty,
+it defaults to cluster.spec.backup.target .
+Available options are empty string, primary and prefer-standby .
+primary to have backups run always on primary instances,
+prefer-standby to have backups run preferably on the most updated
+standby, if available.
+ |
+
+method
+BackupMethod
+ |
+
+ The backup method to be used, possible options are barmanObjectStore
+and volumeSnapshot . Defaults to: barmanObjectStore .
+ |
+
+
+
+
+## ScheduledBackupStatus {#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupStatus}
+
+**Appears in:**
+
+- [ScheduledBackup](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackup)
+
+ScheduledBackupStatus defines the observed state of ScheduledBackup
+
+
+Field | Description |
+
+lastCheckTime
+meta/v1.Time
+ |
+
+ The latest time the schedule
+ |
+
+lastScheduleTime
+meta/v1.Time
+ |
+
+ Information when was the last time that backup was successfully scheduled.
+ |
+
+nextScheduleTime
+meta/v1.Time
+ |
+
+ Next time we will run a backup
+ |
+
+
+
+
+## SecretKeySelector {#postgresql-k8s-enterprisedb-io-v1-SecretKeySelector}
+
+**Appears in:**
+
+- [AzureCredentials](#postgresql-k8s-enterprisedb-io-v1-AzureCredentials)
+
+- [BackupSource](#postgresql-k8s-enterprisedb-io-v1-BackupSource)
+
+- [BackupStatus](#postgresql-k8s-enterprisedb-io-v1-BackupStatus)
+
+- [BarmanObjectStoreConfiguration](#postgresql-k8s-enterprisedb-io-v1-BarmanObjectStoreConfiguration)
+
+- [GoogleCredentials](#postgresql-k8s-enterprisedb-io-v1-GoogleCredentials)
+
+- [MonitoringConfiguration](#postgresql-k8s-enterprisedb-io-v1-MonitoringConfiguration)
+
+- [PostInitApplicationSQLRefs](#postgresql-k8s-enterprisedb-io-v1-PostInitApplicationSQLRefs)
+
+- [S3Credentials](#postgresql-k8s-enterprisedb-io-v1-S3Credentials)
+
+SecretKeySelector contains enough information to let you locate
+the key of a Secret
+
+
+Field | Description |
+
+LocalObjectReference
+LocalObjectReference
+ |
+(Members of LocalObjectReference are embedded into this type.)
+ The name of the secret in the pod's namespace to select from.
+ |
+
+key [Required]
+string
+ |
+
+ The key to select
+ |
+
+
+
+
+## SecretVersion {#postgresql-k8s-enterprisedb-io-v1-SecretVersion}
+
+**Appears in:**
+
+- [PgBouncerSecrets](#postgresql-k8s-enterprisedb-io-v1-PgBouncerSecrets)
+
+- [PoolerSecrets](#postgresql-k8s-enterprisedb-io-v1-PoolerSecrets)
+
+SecretVersion contains a secret name and its ResourceVersion
+
+
+Field | Description |
+
+name
+string
+ |
+
+ The name of the secret
+ |
+
+version
+string
+ |
+
+ The ResourceVersion of the secret
+ |
+
+
+
+
+## SecretsResourceVersion {#postgresql-k8s-enterprisedb-io-v1-SecretsResourceVersion}
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+SecretsResourceVersion is the resource versions of the secrets
+managed by the operator
+
+
+Field | Description |
+
+superuserSecretVersion
+string
+ |
+
+ The resource version of the "postgres" user secret
+ |
+
+replicationSecretVersion
+string
+ |
+
+ The resource version of the "streaming_replica" user secret
+ |
+
+applicationSecretVersion
+string
+ |
+
+ The resource version of the "app" user secret
+ |
+
+managedRoleSecretVersion
+map[string]string
+ |
+
+ The resource versions of the managed roles secrets
+ |
+
+caSecretVersion
+string
+ |
+
+ Unused. Retained for compatibility with old versions.
+ |
+
+clientCaSecretVersion
+string
+ |
+
+ The resource version of the PostgreSQL client-side CA secret version
+ |
+
+serverCaSecretVersion
+string
+ |
+
+ The resource version of the PostgreSQL server-side CA secret version
+ |
+
+serverSecretVersion
+string
+ |
+
+ The resource version of the PostgreSQL server-side secret version
+ |
+
+barmanEndpointCA
+string
+ |
+
+ The resource version of the Barman Endpoint CA if provided
+ |
+
+metrics
+map[string]string
+ |
+
+ A map with the versions of all the secrets used to pass metrics.
+Map keys are the secret names, map values are the versions
+ |
+
+
+
+
+## ServiceAccountTemplate {#postgresql-k8s-enterprisedb-io-v1-ServiceAccountTemplate}
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+ServiceAccountTemplate contains the template needed to generate the service accounts
+
+
+Field | Description |
+
+metadata [Required]
+Metadata
+ |
+
+ Metadata are the metadata to be used for the generated
+service account
+ |
+
+
+
+
+## SnapshotOwnerReference {#postgresql-k8s-enterprisedb-io-v1-SnapshotOwnerReference}
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [VolumeSnapshotConfiguration](#postgresql-k8s-enterprisedb-io-v1-VolumeSnapshotConfiguration)
+
+SnapshotOwnerReference defines the reference type for the owner of the snapshot.
+This specifies which owner the processed resources should relate to.
+
+## SnapshotType {#postgresql-k8s-enterprisedb-io-v1-SnapshotType}
+
+(Alias of `string`)
+
+**Appears in:**
+
+- [Import](#postgresql-k8s-enterprisedb-io-v1-Import)
+
+SnapshotType is a type of allowed import
+
+## StorageConfiguration {#postgresql-k8s-enterprisedb-io-v1-StorageConfiguration}
+
+**Appears in:**
+
+- [ClusterSpec](#postgresql-k8s-enterprisedb-io-v1-ClusterSpec)
+
+StorageConfiguration is the configuration of the storage of the PostgreSQL instances
+
+
+Field | Description |
+
+storageClass
+string
+ |
+
+ StorageClass to use for database data (PGDATA ). Applied after
+evaluating the PVC template, if available.
+If not specified, generated PVCs will be satisfied by the
+default storage class
+ |
+
+size
+string
+ |
+
+ Size of the storage. Required if not already specified in the PVC template.
+Changes to this field are automatically reapplied to the created PVCs.
+Size cannot be decreased.
+ |
+
+resizeInUseVolumes
+bool
+ |
+
+ Resize existent PVCs, defaults to true
+ |
+
+pvcTemplate
+core/v1.PersistentVolumeClaimSpec
+ |
+
+ Template to be used to generate the Persistent Volume Claim
+ |
+
+
+
+
+## SyncReplicaElectionConstraints {#postgresql-k8s-enterprisedb-io-v1-SyncReplicaElectionConstraints}
+
+**Appears in:**
+
+- [PostgresConfiguration](#postgresql-k8s-enterprisedb-io-v1-PostgresConfiguration)
+
+SyncReplicaElectionConstraints contains the constraints for sync replicas election.
+For anti-affinity parameters two instances are considered in the same location
+if all the labels values match.
+In future synchronous replica election restriction by name will be supported.
+
+
+Field | Description |
+
+nodeLabelsAntiAffinity
+[]string
+ |
+
+ A list of node labels values to extract and compare to evaluate if the pods reside in the same topology or not
+ |
+
+enabled [Required]
+bool
+ |
+
+ This flag enables the constraints for sync replicas
+ |
+
+
+
+
+## TDEConfiguration {#postgresql-k8s-enterprisedb-io-v1-TDEConfiguration}
+
+**Appears in:**
+
+- [EPASConfiguration](#postgresql-k8s-enterprisedb-io-v1-EPASConfiguration)
+
+TDEConfiguration contains the Transparent Data Encryption configuration
+
+
+Field | Description |
+
+enabled
+bool
+ |
+
+ True if we want to have TDE enabled
+ |
+
+secretKeyRef
+core/v1.SecretKeySelector
+ |
+
+ Reference to the secret that contains the encryption key
+ |
+
+wrapCommand
+core/v1.SecretKeySelector
+ |
+
+ WrapCommand is the encrypt command provided by the user
+ |
+
+unwrapCommand
+core/v1.SecretKeySelector
+ |
+
+ UnwrapCommand is the decryption command provided by the user
+ |
+
+passphraseCommand
+core/v1.SecretKeySelector
+ |
+
+ PassphraseCommand is the command executed to get the passphrase that will be
+passed to the OpenSSL command to encrypt and decrypt
+ |
+
+
+
+
+## Topology {#postgresql-k8s-enterprisedb-io-v1-Topology}
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-k8s-enterprisedb-io-v1-ClusterStatus)
+
+Topology contains the cluster topology
+
+
+Field | Description |
+
+instances
+map[github.com/EnterpriseDB/cloud-native-postgres/api/v1.PodName]github.com/EnterpriseDB/cloud-native-postgres/api/v1.PodTopologyLabels
+ |
+
+ Instances contains the pod topology of the instances
+ |
+
+nodesUsed
+int32
+ |
+
+ NodesUsed represents the count of distinct nodes accommodating the instances.
+A value of '1' suggests that all instances are hosted on a single node,
+implying the absence of High Availability (HA). Ideally, this value should
+be the same as the number of instances in the Postgres HA cluster, implying
+shared nothing architecture on the compute side.
+ |
+
+successfullyExtracted
+bool
+ |
+
+ SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors
+in synchronous replica election in case of failures
+ |
+
+
+
+
+## VolumeSnapshotConfiguration {#postgresql-k8s-enterprisedb-io-v1-VolumeSnapshotConfiguration}
+
+**Appears in:**
+
+- [BackupConfiguration](#postgresql-k8s-enterprisedb-io-v1-BackupConfiguration)
+
+VolumeSnapshotConfiguration represents the configuration for the execution of snapshot backups.
+
+
+Field | Description |
+
+labels
+map[string]string
+ |
+
+ Labels are key-value pairs that will be added to .metadata.labels snapshot resources.
+ |
+
+annotations
+map[string]string
+ |
+
+ Annotations key-value pairs that will be added to .metadata.annotations snapshot resources.
+ |
+
+className
+string
+ |
+
+ ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim.
+It is the default class for the other types if no specific class is present
+ |
+
+walClassName
+string
+ |
+
+ WalClassName specifies the Snapshot Class to be used for the PG_WAL PersistentVolumeClaim.
+ |
+
+snapshotOwnerReference
+SnapshotOwnerReference
+ |
+
+ SnapshotOwnerReference indicates the type of owner reference the snapshot should have
+ |
+
+
+
+
+## WalBackupConfiguration {#postgresql-k8s-enterprisedb-io-v1-WalBackupConfiguration}
+
+**Appears in:**
+
+- [BarmanObjectStoreConfiguration](#postgresql-k8s-enterprisedb-io-v1-BarmanObjectStoreConfiguration)
+
+WalBackupConfiguration is the configuration of the backup of the
+WAL stream
+
+
+Field | Description |
+
+compression
+CompressionType
+ |
+
+ Compress a WAL file before sending it to the object store. Available
+options are empty string (no compression, default), gzip , bzip2 or snappy .
+ |
+
+encryption
+EncryptionType
+ |
+
+ Whenever to force the encryption of files (if the bucket is
+not already configured for that).
+Allowed options are empty string (use the bucket policy, default),
+AES256 and aws:kms
+ |
+
+maxParallel
+int
+ |
+
+ Number of WAL files to be either archived in parallel (when the
+PostgreSQL instance is archiving to a backup object store) or
+restored in parallel (when a PostgreSQL standby is fetching WAL
+files from a recovery object store). If not specified, WAL files
+will be processed one at a time. It accepts a positive integer as a
+value - with 1 being the minimum accepted value.
+ |
+
+
+
\ No newline at end of file
diff --git a/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx b/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx
index 7b3f273e583..0529567f01e 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx
@@ -38,6 +38,25 @@ spec:
You can find a complete example using projected volume template to mount Secret and Configmap in
the [cluster-example-projected-volume.yaml](../samples/cluster-example-projected-volume.yaml) deployment manifest.
+## Ephemeral volumes
+
+EDB Postgres for Kubernetes relies on [ephemeral volumes](https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/)
+for part of the internal activities. Ephemeral volumes exist for the sole duration of
+a pod's life, without persisting across pod restarts.
+
+### Volume for temporary storage
+
+An ephemeral volume used for temporary storage. An upper bound on the size can be
+configured via the `spec.ephemeralVolumesSizeLimit.temporaryData` field in the cluster
+spec.
+
+### Volume for shared memory
+
+This volume is used as shared memory space for Postgres, also an ephemeral type but
+stored in-memory. An upper bound on the size can be configured via the
+`spec.ephemeralVolumesSizeLimit.shm` field in the cluster spec. This is used only
+in case of [PostgreSQL running with `posix` shared memory dynamic allocation](postgresql_conf.md#dynamic-shared-memory-settings).
+
## Environment variables
Some system behavior can be customized using environment variables. One example is
diff --git a/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx b/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx
index 63521eb0afc..103c6fb197f 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx
@@ -71,7 +71,7 @@ Additionally, EDB Postgres for Kubernetes automatically creates a secret with th
same name of the pooler containing the configuration files used with PgBouncer.
!!! Seealso "API reference"
- For details, please refer to [`PgBouncerSpec` section](api_reference.md#PgBouncerSpec)
+ For details, please refer to [`PgBouncerSpec` section](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-PgBouncerSpec)
in the API reference.
## Pooler resource lifecycle
@@ -177,7 +177,7 @@ GRANT EXECUTE ON FUNCTION user_search(text)
You can take advantage of pod templates specification in the `template`
section of a `Pooler` resource. For details, please refer to [`PoolerSpec`
-section](api_reference.md#PoolerSpec) in the API reference.
+section](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-PoolerSpec) in the API reference.
Through templates you can configure pods as you like, including fine
control over affinity and anti-affinity rules for pods and nodes.
@@ -344,12 +344,13 @@ metrics having the `cnp_pgbouncer_` prefix, by running:
Similarly to the EDB Postgres for Kubernetes instance, the exporter runs on port
`9127` of each pod running PgBouncer, and also provides metrics related to the
-Go runtime (with prefix `go_*`). You can debug the exporter on a pod running
-PgBouncer through the following command:
+Go runtime (with prefix `go_*`).
-```console
-kubectl exec -ti -- curl 127.0.0.1:9127/metrics
-```
+!!! Info
+ You can inspect the exported metrics on a pod running PgBouncer, by following
+ the instructions provided in the
+ ["How to inspect the exported metrics" section from the "Monitoring" page](monitoring.md/#how-to-inspect-the-exported-metrics),
+ making sure that you use the correct IP and the `9127` port.
An example of the output for `cnp_pgbouncer` metrics:
diff --git a/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx b/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx
index bd6906061e8..5dacb1ae67f 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx
@@ -61,7 +61,7 @@ $ kubectl cnp status
Cluster Summary
Name: cluster-example
Namespace: default
-PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3
+PostgreSQL Image: quay.io/enterprisedb/postgresql:16.0
Primary instance: cluster-example-2
Status: Cluster in healthy state
Instances: 3
diff --git a/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx b/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx
index 44b34bd9470..2d963716e9f 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx
@@ -42,7 +42,7 @@ spec:
The role specification in `spec.managed.roles` adheres to the
[PostgreSQL structure and naming conventions](https://www.postgresql.org/docs/current/sql-createrole.html).
-Please refer to the [API reference](api_reference.md#RoleConfiguration) for
+Please refer to the [API reference](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-RoleConfiguration) for
the full list of attributes you can define for each role.
A few points are worth noting:
diff --git a/product_docs/docs/postgres_for_kubernetes/1/default-monitoring.yaml b/product_docs/docs/postgres_for_kubernetes/1/default-monitoring.yaml
index 08dd66346a8..bc2a4fa4877 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/default-monitoring.yaml
+++ b/product_docs/docs/postgres_for_kubernetes/1/default-monitoring.yaml
@@ -132,13 +132,15 @@ data:
description: "Number of streaming replicas connected to the instance"
pg_replication_slots:
- primary: true
query: |
SELECT slot_name,
slot_type,
database,
active,
- pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn)
+ (CASE pg_catalog.pg_is_in_recovery()
+ WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn)
+ ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn)
+ END) as pg_wal_lsn_diff
FROM pg_catalog.pg_replication_slots
WHERE NOT temporary
metrics:
@@ -319,6 +321,7 @@ data:
SELECT usename
, COALESCE(application_name, '') AS application_name
, COALESCE(client_addr::text, '') AS client_addr
+ , COALESCE(client_port::text, '') AS client_port
, EXTRACT(EPOCH FROM backend_start) AS backend_start
, COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age
, pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes
@@ -339,6 +342,9 @@ data:
- client_addr:
usage: "LABEL"
description: "Client IP address"
+ - client_port:
+ usage: "LABEL"
+ description: "Client TCP port"
- backend_start:
usage: "COUNTER"
description: "Time when this process was started"
diff --git a/product_docs/docs/postgres_for_kubernetes/1/evaluation.mdx b/product_docs/docs/postgres_for_kubernetes/1/evaluation.mdx
index 9d73191c64b..f30c78cf359 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/evaluation.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/evaluation.mdx
@@ -4,32 +4,25 @@ originalFilePath: 'src/evaluation.md'
---
EDB Postgres for Kubernetes is available for a free evaluation.
-The process is different between Vanilla/Community PostgreSQL
-and EDB Postgres Advanced.
-Refer to [License and License keys](license_keys.md)
-for terms and more details.
+The process is different between Community PostgreSQL and EDB Postgres Advanced Server.
-## Evaluating PostgreSQL
+## Evaluating using PostgreSQL
By default, EDB Postgres for Kubernetes installs the latest available
-version of Community PostgreSQL. The operator automatically
-generates an implicit trial license for the cluster that lasts for
-30 days.
+version of Community PostgreSQL.
-This license is ideal for evaluation, proof of concept, integration with CI/CD pipelines, and so on.
+No license key is required. The operator automatically generates an implicit trial license for the cluster that lasts for
+30 days. This trial license is ideal for evaluation, proof of concept, integration with CI/CD pipelines, and so on.
-PostgreSQL container images are available at
-[quay.io/enterprisedb/postgresql](https://quay.io/repository/enterprisedb/postgresql).
+PostgreSQL container images are available at [quay.io/enterprisedb/postgresql](https://quay.io/repository/enterprisedb/postgresql).
-## Evaluating EDB Postgres Advanced Server
+## Evaluating using EDB Postgres Advanced Server
-You can use EDB Postgres for Kubernetes with EDB Postgres Advanced Server
-too. You need to request a trial license key from the
-[EDB website](https://cloud-native.enterprisedb.com).
+You can use EDB Postgres for Kubernetes with EDB Postgres Advanced Server. You will need a trial key to use EDB Postgres Advanced Server.
-EDB Postgres Advanced container images are available at
-[quay.io/enterprisedb/edb-postgres-advanced](https://quay.io/repository/enterprisedb/edb-postgres-advanced).
+!!! Note Obtaining your trial key
+ You can request a key from the **[EDB Postgres for Kubernetes Trial License Request](https://cloud-native.enterprisedb.com/trial/)** page. You will also need to be signed into your EDB Account. If you do not have an EDB Account, you can [register for one](https://www.enterprisedb.com/accounts/register) on the EDB site.
Once you have received the license key, you can use EDB Postgres Advanced Server
by setting in the `spec` section of the `Cluster` deployment configuration file:
@@ -37,4 +30,11 @@ by setting in the `spec` section of the `Cluster` deployment configuration file:
- `imageName` to point to the `quay.io/enterprisedb/edb-postgres-advanced` repository
- `licenseKey` to your license key (in the form of a string)
-Please refer to the full example in the [configuration samples](samples.md) section.
\ No newline at end of file
+EDB Postgres Advanced container images are available at
+[quay.io/enterprisedb/edb-postgres-advanced](https://quay.io/repository/enterprisedb/edb-postgres-advanced).
+
+To see how `imageName` and `licenseKey` is set, refer to the [cluster-full-example](../samples/cluster-example-full.yaml) file from the the [configuration samples](samples.md) section.
+
+## Further Information
+
+Refer to [License and License keys](license_keys.md) for terms and more details.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx b/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx
index 1c59d5874c2..a193b6a86d3 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx
@@ -75,6 +75,33 @@ or starting from a physical backup of the *primary* otherwise.
Self-healing will happen as soon as the *apiserver* is notified.
+You can trigger a sudden failure on a given pod of the cluster using the
+following generic command:
+
+```sh
+kubectl delete -n [namespace] \
+ pod/[cluster-name]-[serial] --grace-period=1
+```
+
+For example, if you want to simulate a real failure on the primary and trigger
+the failover process, you can run:
+
+```sh
+kubectl delete pod [primary pod] --grace-period=1
+```
+
+!!! Warning
+ Never use `--grace-period=0` in your failover simulation tests, as this
+ might produce misleading results with your PostgreSQL cluster. A grace
+ period of 0 guarantees that the pod is immediately removed from the
+ Kubernetes API server, without first ensuring that the PID 1 process of
+ the `postgres` container (the instance manager) is shut down - contrary
+ to what would happen in case of a real failure (e.g. unplug the power cord
+ cable or network partitioning).
+ As a result, the operator doesn't see the pod of the primary anymore, and
+ triggers a failover promoting the most aligned standby, without
+ the guarantee that the primary had been shut down.
+
### Readiness probe failure
After 3 failures, the pod will be considered *not ready*. The pod will still
diff --git a/product_docs/docs/postgres_for_kubernetes/1/faq.mdx b/product_docs/docs/postgres_for_kubernetes/1/faq.mdx
index 1c9f2646b15..45a79ef1a60 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/faq.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/faq.mdx
@@ -134,6 +134,30 @@ Kubernetes controller. If the desired state and the actual state don't
match, reconciliation loops trigger self-healing procedures. That's
where an operator like EDB Postgres for Kubernetes comes into play.
+**Are there any other operators for Postgres out there?**
+
+Yes, of course. And our advice is that you look at all of them and compare
+them with EDB Postgres for Kubernetes before making your decision. You will see that
+most of these operators use an external failover management tool (Patroni
+or similar) and rely on StatefulSets.
+
+Here is a non exhaustive list, in chronological order from their
+publication on GitHub:
+
+- [Crunchy Data Postgres Operator](https://github.com/CrunchyData/postgres-operator) (2017)
+- [Zalando Postgres Operator](https://github.com/zalando/postgres-operator) (2017)
+- [Stackgres](https://github.com/ongres/stackgres) (2020)
+- [Percona Operator for PostgreSQL](https://github.com/percona/percona-postgresql-operator) (2021)
+- [Kubegres](https://github.com/reactive-tech/kubegres) (2021)
+
+Feel free to report any relevant missing entry as a PR.
+
+!!! Info
+ The [Data on Kubernetes Community](https://dok.community)
+ (which includes some of our maintainers) is working on an independent and
+ vendor neutral project to list the operators called
+ [Operator Feature Matrix](https://github.com/dokc/operator-feature-matrix).
+
**You say that EDB Postgres for Kubernetes is a fully declarative operator.
What do you mean by that?**
@@ -206,14 +230,18 @@ of truth to:
- control the Kubernetes services, that is the entry points for your
applications
+**Should I manually resync a former primary with the new one following a
+failover?**
+
+No. The operator does that automatically for you, and relies on `pg_rewind` to
+synchronize the former primary with the new one.
+
Red Hat OpenShift Container Platform users can test the certified operator for
-EDB Postgres for Kubernetes on the [Red Hat CodeReady Containers (CRC)](https://developers.redhat.com/products/codeready-containers/overview)
-for OpenShift.
+EDB Postgres for Kubernetes on the [Red Hat OpenShift Local](https://developers.redhat.com/products/openshift-local/overview) (formerly Red Hat CodeReady Containers).
!!! Warning
The instructions contained in this section are for demonstration,
@@ -32,7 +31,7 @@ cluster on your local Kubernetes/Openshift installation and experiment with it.
!!! Important
Make sure that you have `kubectl` installed on your machine in order
- to connect to the Kubernetes cluster, or `oc` if using CRC for OpenShift.
+ to connect to the Kubernetes cluster, or `oc` if using OpenShift Local.
Please follow the Kubernetes documentation on [how to install `kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
or the Openshift documentation on [how to install `oc`](https://docs.openshift.com/container-platform/4.6/cli_reference/openshift_cli/getting-started-cli.html).
@@ -40,9 +39,9 @@ cluster on your local Kubernetes/Openshift installation and experiment with it.
If you are running Openshift, use `oc` every time `kubectl` is mentioned
in this documentation. `kubectl` commands are compatible with `oc` ones.
-## Part 1: Setup the local Kubernetes/Openshift playground
+## Part 1 - Setup the local Kubernetes/Openshift Local playground
-The first part is about installing Minikube, Kind, or CRC. Please spend some time
+The first part is about installing Minikube, Kind, or OpenShift Local. Please spend some time
reading about the systems and decide which one to proceed with.
After setting up one of them, please proceed with part 2.
@@ -85,9 +84,9 @@ then create a Kubernetes cluster with:
kind create cluster --name pg
```
-### CodeReady Containers (CRC)
+### OpenShift Local (formerly CodeReady Containers (CRC))
-1. [Download Red Hat CRC](https://developers.redhat.com/products/codeready-containers/overview)
+1. [Download OpenShift Local](https://developers.redhat.com/products/openshift-local/overview)
and move the binary inside a directory in your `PATH`.
2. Run the following commands:
@@ -106,7 +105,7 @@ kind create cluster --name pg
command. You can also open the web console running `crc console`.
User and password are the same as for the `oc login` command.
-5. CRC doesn't come with a StorageClass, so one has to be configured.
+5. OpenShift Local doesn't come with a StorageClass, so one has to be configured.
Follow the [Dynamic volume provisioning wiki page](https://github.com/code-ready/crc/wiki/Dynamic-volume-provisioning)
and install `rancher/local-path-provisioner`.
@@ -150,7 +149,7 @@ spec:
!!! Note "There's more"
For more detailed information about the available options, please refer
- to the ["API Reference" section](api_reference.md).
+ to the ["API Reference" section](cloudnative-pg.v1.md).
In order to create the 3-node PostgreSQL cluster, you need to run the following command:
diff --git a/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx b/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx
new file mode 100644
index 00000000000..c2578e9e2ba
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx
@@ -0,0 +1,615 @@
+---
+title: 'Recovery'
+originalFilePath: 'src/recovery.md'
+---
+
+In PostgreSQL terminology, recovery is the process of starting a PostgreSQL
+instance using a previously taken backup. The PostgreSQL recovery mechanism
+is very solid and rich. It also supports Point In Time Recovery, which allows
+you to restore a given cluster up to any point in time from the first available
+backup in your catalog to the last archived WAL (as you can see, the WAL
+archive is mandatory in this case).
+
+In EDB Postgres for Kubernetes, recovery cannot be performed "in-place" on an existing
+cluster. Recovery is rather a way to bootstrap a new Postgres cluster
+starting from an available physical backup.
+
+!!! Note
+ For details on the `bootstrap` stanza, please refer to the
+ ["Bootstrap" section](bootstrap.md).
+
+The `recovery` bootstrap mode lets you create a new cluster from an existing
+physical base backup, and then reapply the WAL files containing the REDO log
+from the archive.
+
+WAL files are pulled from the defined *recovery object store*.
+
+Base backups may be taken either on object stores, or using volume snapshots
+(from version 1.21).
+
+!!! Warning
+ Recovery using volume snapshots had an initial release on 1.20.1. Because of
+ the amount of progress on the feature for 1.21.0, it is strongly advised
+ that you upgrade to 1.21.0 or more advanced releases to use volume
+ snapshots.
+
+Recovery from a *recovery object store* can be achieved in two ways:
+
+- using a recovery object store, that is, a backup of another cluster
+ created by Barman Cloud and defined via the `barmanObjectStore` option
+ in the `externalClusters` section (*recommended*)
+- using an existing `Backup` object in the same namespace (this was the
+ only option available before version 1.8.0).
+
+Both recovery methods enable either full recovery (up to the last
+available WAL) or up to a [point in time](#point-in-time-recovery-pitr).
+When performing a full recovery, the cluster can also be started
+in replica mode (see [replica clusters](replica_cluster.md) for reference).
+If using replica mode, make sure that the PostgreSQL configuration
+(`.spec.postgresql.parameters`) of the recovered cluster is
+compatible, from a physical replication standpoint, with the original one.
+
+For recovery using volume snapshots:
+
+- take a consistent cold backup of the Postgres cluster from a standby through
+ the `kubectl cnp backup` command (see the [plugin document](kubectl-plugin.md#requesting-a-new-base-backup)
+ for reference), which creates the necessary `VolumeSnapshot` objects (two if
+ you have a separate volume for WALs, one if you don't) - recover from the above
+ *VolumeSnapshot* objects through the `volumeSnapshots` option in the
+ `.spec.bootstrap.recovery` stanza, as described in
+ ["Recovery from `VolumeSnapshot` objects"](#recovery-from-volumesnapshot-objects)
+ below
+
+## Recovery from an object store
+
+You can recover from a backup created by Barman Cloud and stored on a supported
+object store. Once you have defined the external cluster, including all the
+required configuration in the `barmanObjectStore` section, you need to
+reference it in the `.spec.recovery.source` option. The following example
+defines a recovery object store in a blob container in Azure:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: cluster-restore
+spec:
+ [...]
+
+ superuserSecret:
+ name: superuser-secret
+
+ bootstrap:
+ recovery:
+ source: clusterBackup
+
+ externalClusters:
+ - name: clusterBackup
+ barmanObjectStore:
+ destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/
+ azureCredentials:
+ storageAccount:
+ name: recovery-object-store-secret
+ key: storage_account_name
+ storageKey:
+ name: recovery-object-store-secret
+ key: storage_account_key
+ wal:
+ maxParallel: 8
+```
+
+!!! Important
+ By default the `recovery` method strictly uses the `name` of the
+ cluster in the `externalClusters` section as the name of the main folder
+ of the backup data within the object store, which is normally reserved
+ for the name of the server. You can specify a different folder name
+ with the `barmanObjectStore.serverName` property.
+
+!!! Note
+ In the above example we are taking advantage of the parallel WAL restore
+ feature, dedicating up to 8 jobs to concurrently fetch the required WAL
+ files from the archive. This feature can appreciably reduce the recovery time.
+ Make sure that you plan ahead for this scenario and correctly tune the
+ value of this parameter for your environment. It will certainly make a
+ difference **when** (not if) you'll need it.
+
+## Recovery from `VolumeSnapshot` objects
+
+!!! Warning
+ When creating replicas after having recovered the primary instance from
+ the volume snapshot, the operator might end up using `pg_basebackup`
+ to synchronize them, resulting in a slower process depending on the size
+ of the database. This limitation will be lifted in the future when support
+ for online backups will be introduced.
+
+EDB Postgres for Kubernetes can create a new cluster from a `VolumeSnapshot` of a PVC of an
+existing `Cluster` that's been taken using the declarative API for
+[volume snapshot backups](backup_volumesnapshot.md).
+You will need to specify the name of the snapshot, as in the following example:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: cluster-restore
+spec:
+ [...]
+
+bootstrap:
+ recovery:
+ volumeSnapshots:
+ storage:
+ name:
+ kind: VolumeSnapshot
+ apiGroup: snapshot.storage.k8s.io
+```
+
+In case the backed-up cluster was using a separate PVC to store the WAL files,
+the recovery must include that too:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: cluster-restore
+spec:
+ [...]
+
+bootstrap:
+ recovery:
+ volumeSnapshots:
+ storage:
+ name:
+ kind: VolumeSnapshot
+ apiGroup: snapshot.storage.k8s.io
+
+ walStorage:
+ name:
+ kind: VolumeSnapshot
+ apiGroup: snapshot.storage.k8s.io
+```
+
+!!! Warning
+ If bootstrapping a replica-mode cluster from snapshots, to leverage
+ snapshots for the standby instances and not just the primary,
+ it would be advisable to:
+
+ 1. start with a single instance replica cluster. The primary instance will
+ be recovered using the snapshot and available WALs form the source cluster
+ 2. take a snapshot of the primary in the replica cluster
+ 3. increase the number of instances in the replica cluster as desired
+
+## Recovery from a `Backup` object
+
+!!! Important
+ Recovery from `Backup` objects works only on object store backups,
+ not on volume snapshots.
+
+In case a `Backup` resource is already available in the namespace in which the
+cluster should be created, you can specify its name through
+`.spec.bootstrap.recovery.backup.name`, as in the following example:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: cluster-example-initdb
+spec:
+ instances: 3
+
+ superuserSecret:
+ name: superuser-secret
+
+ bootstrap:
+ recovery:
+ backup:
+ name: backup-example
+
+ storage:
+ size: 1Gi
+```
+
+This bootstrap method allows you to specify just a reference to the
+backup that needs to be restored.
+
+The previous example implies the application database and its owning user to be
+the default one, `app`. If the PostgreSQL cluster being restored was using
+different names, they can be specified as documented in the [Configure the
+application database](#configure-the-application-database) section.
+
+## Additional considerations
+
+Whether you recover from a recovery object store, a volume snapshot, or an
+existing `Backup` resource, the following considerations apply:
+
+- The application database name and the application database user are preserved
+ from the backup that is being restored. The operator does not currently attempt
+ to back up the underlying secrets, as this is part of the usual maintenance
+ activity of the Kubernetes cluster itself.
+- In case you don't supply any `superuserSecret`, a new one is automatically
+ generated with a secure and random password. The secret is then used to
+ reset the password for the `postgres` user of the cluster.
+- By default, the recovery will continue up to the latest
+ available WAL on the default target timeline (`current` for PostgreSQL up to
+ 11, `latest` for version 12 and above).
+ You can optionally specify a `recoveryTarget` to perform a point in time
+ recovery (see the ["Point in time recovery" section](#point-in-time-recovery-pitr)).
+
+!!! Important
+ Consider using the `barmanObjectStore.wal.maxParallel` option to speed
+ up WAL fetching from the archive by concurrently downloading the transaction
+ logs from the recovery object store.
+
+## Point in time recovery (PITR)
+
+Instead of replaying all the WALs up to the latest one, we can ask PostgreSQL
+to stop replaying WALs at any given point in time, after having extracted a
+base backup. PostgreSQL uses this technique to achieve *point-in-time* recovery
+(PITR). The presence of a WAL archive is mandatory.
+
+!!! Important
+ PITR requires you to specify a **recovery target**, by using the options
+ described in the ["Recovery targets" section](#recovery-targets) below.
+
+The operator will generate the configuration parameters required for this
+feature to work in case a recovery target is specified.
+
+### PITR from an object store
+
+The example below uses a recovery object store in Azure that contains both
+the base backups and the WAL archive. The recovery target is based on a
+requested timestamp:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: cluster-restore-pitr
+spec:
+ instances: 3
+
+ storage:
+ size: 5Gi
+
+ bootstrap:
+ recovery:
+ # Recovery object store containing WAL archive and base backups
+ source: clusterBackup
+ recoveryTarget:
+ # Time base target for the recovery
+ targetTime: "2023-08-11 11:14:21.00000+02"
+
+ externalClusters:
+ - name: clusterBackup
+ barmanObjectStore:
+ destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/
+ azureCredentials:
+ storageAccount:
+ name: recovery-object-store-secret
+ key: storage_account_name
+ storageKey:
+ name: recovery-object-store-secret
+ key: storage_account_key
+ wal:
+ maxParallel: 8
+```
+
+You might have noticed that in the above example you only had to specify
+the `targetTime` in the form of a timestamp, without having to worry about
+specifying the base backup from which to start the recovery.
+
+The `backupID` option is the one that allows you to specify the base backup
+from which to initiate the recovery process. By default, this value is
+empty.
+
+If you assign a value to it (in the form of a Barman backup ID), the operator
+will use that backup as base for the recovery.
+
+!!! Important
+ You need to make sure that such a backup exists and is accessible.
+
+If the backup ID is not specified, the operator will automatically detect the
+base backup for the recovery as follows:
+
+- when you use `targetTime` or `targetLSN`, the operator selects the closest
+ backup that was completed before that target
+- otherwise the operator selects the last available backup in chronological
+ order.
+
+### PITR from `VolumeSnapshot` Objects
+
+The example below uses:
+
+- a Kubernetes volume snapshot for the `PGDATA` containing the base backup from
+ which to start the recovery process, identified in the
+ `recovery.volumeSnapshots` section and called `test-snapshot-1`
+- a recovery object store in MinIO containing the WAL archive, identified by
+ the `recovery.source` option in the form of an external cluster definition
+
+The recovery target is based on a requested timestamp.
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: cluster-example-snapshot
+spec:
+ # ...
+ bootstrap:
+ recovery:
+ source: cluster-example-with-backup
+ volumeSnapshots:
+ storage:
+ name: test-snapshot-1
+ kind: VolumeSnapshot
+ apiGroup: snapshot.storage.k8s.io
+ recoveryTarget:
+ targetTime: "2023-07-06T08:00:39"
+ externalClusters:
+ - name: cluster-example-with-backup
+ barmanObjectStore:
+ destinationPath: s3://backups/
+ endpointURL: http://minio:9000
+ s3Credentials:
+ accessKeyId:
+ name: minio
+ key: ACCESS_KEY_ID
+ secretAccessKey:
+ name: minio
+ key: ACCESS_SECRET_KEY
+```
+
+!!! Note
+ In case the backed up Cluster had `walStorage` enabled, you also must
+ specify the volume snapshot containing the `PGWAL` directory, as mentioned
+ in the [Recovery from VolumeSnapshot objects](#recovery-from-volumesnapshot-objects)
+ section.
+
+!!! Warning
+ It is your responsibility to ensure that the end time of the base backup in
+ the volume snapshot is prior to the recovery target timestamp.
+
+### Recovery targets
+
+Here are the recovery target criteria you can use:
+
+targetTime
+: time stamp up to which recovery will proceed, expressed in
+ [RFC 3339](https://datatracker.ietf.org/doc/html/rfc3339) format
+ (the precise stopping point is also influenced by the `exclusive` option)
+
+targetXID
+: transaction ID up to which recovery will proceed
+ (the precise stopping point is also influenced by the `exclusive` option);
+ keep in mind that while transaction IDs are assigned sequentially at
+ transaction start, transactions can complete in a different numeric order.
+ The transactions that will be recovered are those that committed before
+ (and optionally including) the specified one
+
+targetName
+: named restore point (created with `pg_create_restore_point()`) to which
+ recovery will proceed
+
+targetLSN
+: LSN of the write-ahead log location up to which recovery will proceed
+ (the precise stopping point is also influenced by the `exclusive` option)
+
+targetImmediate
+: recovery should end as soon as a consistent state is reached - i.e. as early
+ as possible. When restoring from an online backup, this means the point where
+ taking the backup ended
+
+!!! Important
+ While the operator is able to automatically retrieve the closest backup
+ when either `targetTime` or `targetLSN` is specified, this is not possible
+ for the remaining targets: `targetName`, `targetXID`, and `targetImmediate`.
+ In such cases, it is important to specify `backupID`, unless you are OK with
+ the last available backup in the catalog.
+
+The example below uses a `targetName` based recovery target:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+[...]
+ bootstrap:
+ recovery:
+ source: clusterBackup
+ recoveryTarget:
+ backupID: 20220616T142236
+ targetName: 'restore_point_1'
+[...]
+```
+
+You can choose only a single one among the targets above in each
+`recoveryTarget` configuration.
+
+Additionally, you can specify `targetTLI` force recovery to a specific
+timeline.
+
+By default, the previous parameters are considered to be inclusive, stopping
+just after the recovery target, matching [the behavior in PostgreSQL](https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-RECOVERY-TARGET-INCLUSIVE)
+You can request exclusive behavior,
+stopping right before the recovery target, by setting the `exclusive` parameter to
+`true` like in the following example relying on a blob container in Azure
+for both base backups and the WAL archive:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: cluster-restore-pitr
+spec:
+ instances: 3
+
+ storage:
+ size: 5Gi
+
+ bootstrap:
+ recovery:
+ source: clusterBackup
+ recoveryTarget:
+ backupID: 20220616T142236
+ targetName: "maintenance-activity"
+ exclusive: true
+
+ externalClusters:
+ - name: clusterBackup
+ barmanObjectStore:
+ destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/
+ azureCredentials:
+ storageAccount:
+ name: recovery-object-store-secret
+ key: storage_account_name
+ storageKey:
+ name: recovery-object-store-secret
+ key: storage_account_key
+ wal:
+ maxParallel: 8
+```
+
+## Configure the application database
+
+For the recovered cluster, we can configure the application database name and
+credentials with additional configuration. To update application database
+credentials, we can generate our own passwords, store them as secrets, and
+update the database use the secrets. Or we can also let the operator generate a
+secret with randomly secure password for use. Please reference the
+["Bootstrap an empty cluster"](bootstrap.md#bootstrap-an-empty-cluster-initdb)
+section for more information about secrets.
+
+The following example configure the application database `app` with owner
+`app`, and supplied secret `app-secret`.
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+[...]
+spec:
+ bootstrap:
+ recovery:
+ database: app
+ owner: app
+ secret:
+ name: app-secret
+ [...]
+```
+
+With the above configuration, the following will happen after recovery is completed:
+
+1. if database `app` does not exist, a new database `app` will be created.
+2. if user `app` does not exist, a new user `app` will be created.
+3. if user `app` is not the owner of database, user `app` will be granted
+ as owner of database `app`.
+4. If value of `username` match value of `owner` in secret, the password of
+ application database will be changed to the value of `password` in secret.
+
+!!! Important
+ For a replica cluster with replica mode enabled, the operator will not
+ create any database or user in the PostgreSQL instance, as these will be
+ recovered from the original cluster.
+
+## How recovery works under the hood
+
+
+
+You can use the data uploaded to the object storage to *bootstrap* a
+new cluster from a previously taken backup.
+The operator will orchestrate the recovery process using the
+`barman-cloud-restore` tool (for the base backup) and the
+`barman-cloud-wal-restore` tool (for WAL files, including parallel support, if
+requested).
+
+For details and instructions on the `recovery` bootstrap method, please refer
+to the ["Bootstrap from a backup" section](bootstrap.md#bootstrap-from-a-backup-recovery).
+
+!!! Important
+ If you are not familiar with how [PostgreSQL PITR](https://www.postgresql.org/docs/current/continuous-archiving.html#BACKUP-PITR-RECOVERY)
+ works, we suggest that you configure the recovery cluster as the original
+ one when it comes to `.spec.postgresql.parameters`. Once the new cluster is
+ restored, you can then change the settings as desired.
+
+Under the hood, the operator will inject an init container in the first
+instance of the new cluster, and the init container will start recovering the
+backup from the object storage.
+
+!!! Important
+ The duration of the base backup copy in the new PVC depends on
+ the size of the backup, as well as the speed of both the network and the
+ storage.
+
+When the base backup recovery process is completed, the operator starts the
+Postgres instance in recovery mode: in this phase, PostgreSQL is up, albeit not
+able to accept connections, and the pod is healthy according to the
+liveness probe. Through the `restore_command`, PostgreSQL starts fetching WAL
+files from the archive (you can speed up this phase by setting the
+`maxParallel` option and enable the parallel WAL restore capability).
+
+This phase terminates when PostgreSQL reaches the target (either the end of the
+WAL or the required target in case of Point-In-Time-Recovery). Indeed, you can
+optionally specify a `recoveryTarget` to perform a point in time recovery. If
+left unspecified, the recovery will continue up to the latest available WAL on
+the default target timeline (`current` for PostgreSQL up to 11, `latest` for
+version 12 and above).
+
+Once the recovery is complete, the operator will set the required
+superuser password into the instance. The new primary instance will start
+as usual, and the remaining instances will join the cluster as replicas.
+
+The process is transparent for the user and it is managed by the instance
+manager running in the Pods.
+
+## Restoring into a cluster with a backup section
+
+
+
+A manifest for a cluster restore may include a `backup` section.
+This means that the new cluster, after recovery, will start archiving WAL's and
+taking backups if configured to do so.
+
+For example, the section below could be part of a manifest for a Cluster
+bootstrapping from Cluster `cluster-example-backup`, and would create a
+new folder in the storage bucket named `recoveredCluster` where the base backups
+and WAL's of the recovered cluster would be stored.
+
+```yaml
+ backup:
+ barmanObjectStore:
+ destinationPath: s3://backups/
+ endpointURL: http://minio:9000
+ serverName: "recoveredCluster"
+ s3Credentials:
+ accessKeyId:
+ name: minio
+ key: ACCESS_KEY_ID
+ secretAccessKey:
+ name: minio
+ key: ACCESS_SECRET_KEY
+ retentionPolicy: "30d"
+
+ externalClusters:
+ - name: cluster-example-backup
+ barmanObjectStore:
+ destinationPath: s3://backups/
+ endpointURL: http://minio:9000
+ s3Credentials:
+```
+
+You should not re-use the exact same `barmanObjectStore` configuration
+for different clusters. There could be cases where the existing information
+in the storage buckets could be overwritten by the new cluster.
+
+!!! Warning
+ The operator includes a safety check to ensure a cluster will not
+ overwrite a storage bucket that contained information. A cluster that would
+ overwrite existing storage will remain in state `Setting up primary` with
+ Pods in an Error state.
+ The pod logs will show:
+ `ERROR: WAL archive check failed for server recoveredCluster: Expected empty archive`
+
+!!! Important
+ If you set the `k8s.enterprisedb.io/skipEmptyWalArchiveCheck` annotation to `enabled` in
+ the recovered cluster, you can skip the above check. This is not recommended
+ as for the general use case the above check works fine. Please don't do
+ this unless you are familiar with PostgreSQL recovery system, as this can lead
+ you to severe data loss.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx b/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx
index e568ba94361..1372ffb7b86 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx
@@ -25,29 +25,39 @@ and kept synchronized through the
[replica cluster](architecture.md#deployments-across-kubernetes-clusters) feature. The source
can be a primary cluster or another replica cluster (cascading replica cluster).
-The available options in terms of replication, both at bootstrap and continuous
-recovery level, are:
+The first step is to bootstrap the replica cluster, choosing among one of the
+available methods:
+
+- streaming replication, via `pg_basebackup`
+- recovery from a volume snapshot
+- recovery from a Barman Cloud backup in an object store
+
+Please refer to the ["Bootstrap" section](bootstrap.md#bootstrap-from-another-cluster)
+for information on how to clone a PostgreSQL server using either
+`pg_basebackup` (streaming) or `recovery` (volume snapshot or object store).
+
+Once the replica cluster's base backup is available, you need to define how
+changes are replicated from the origin, through PostgreSQL continuous recovery.
+There are two options:
- use streaming replication between the replica cluster and the source
(this will certainly require some administrative and security related
work to be done to make sure that the network connection between the
two clusters are correctly setup)
-- use a Barman Cloud object store for recovery of the base backups and
- the WAL files that are regularly shipped from the source to the object
- store and pulled by `barman-cloud-wal-restore` in the replica cluster
+- use the WAL archive (on an object store) to fetch the WAL files that are
+ regularly shipped from the source to the object store and pulled by
+ `barman-cloud-wal-restore` in the replica cluster
- any of the two
All you have to do is actually define an external cluster.
-Please refer to the ["Bootstrap" section](bootstrap.md#bootstrap-from-another-cluster)
-for information on how to clone a PostgreSQL server using either
-`pg_basebackup` (streaming) or `recovery` (object store).
If the external cluster contains a `barmanObjectStore` section:
+- you'll be able to use the WAL archive, and EDB Postgres for Kubernetes will automatically
+ set the `restore_command` in the designated primary instance
- you'll be able to bootstrap the replica cluster from an object store
- using the `recovery` section
-- EDB Postgres for Kubernetes will automatically set the `restore_command`
- in the designated primary instance
+ using the `recovery` section, in case you cannot take advantage of
+ volume snapshots
If the external cluster contains a `connectionParameters` section:
@@ -79,12 +89,14 @@ file and define the following parts accordingly:
- define the `externalClusters` section in the replica cluster
- define the bootstrap part for the replica cluster. We can either bootstrap via
- streaming using the `pg_basebackup` section, or bootstrap from an object store
- using the `recovery` section
+ streaming using the `pg_basebackup` section, or bootstrap from a volume snapshot
+ or an object store using the `recovery` section
- define the continuous recovery part (`spec.replica`) in the replica cluster. All
we need to do is to enable the replica mode through option `spec.replica.enabled`
and set the `externalClusters` name in option `spec.replica.source`
+#### Example using pg_basebackup
+
This **first example** defines a replica cluster using streaming replication in
both bootstrap and continuous recovery. The replica cluster connects to the
source cluster using TLS authentication.
@@ -128,6 +140,8 @@ in case the replica cluster is in a separate namespace.
key: ca.crt
```
+#### Example using a Backup from an object store
+
The **second example** defines a replica cluster that bootstraps from an object
store using the `recovery` section and continuous recovery using both streaming
replication and the given object store. For streaming replication, the replica
@@ -176,6 +190,21 @@ a backup of the source cluster has been created already.
clusters, and that all the necessary secrets which hold passwords or
certificates are properly created in advance.
+#### Example using a Volume Snapshot
+
+If you use volume snapshots and your storage class provides
+snapshots cross-cluster availability, you can leverage that to
+bootstrap a replica cluster through a volume snapshot of the
+source cluster.
+
+The **third example** defines a replica cluster that bootstraps
+from a volume snapshot using the `recovery` section. It uses
+streaming replication (via basic authentication) and the object
+store to fetch the WAL files.
+
+You can check the [sample YAML](../samples/cluster-example-replica-from-volume-snapshot.yaml)
+for it in the `samples/` subdirectory.
+
## Promoting the designated primary in the replica cluster
To promote the **designated primary** to **primary**, all we need to do is to
diff --git a/product_docs/docs/postgres_for_kubernetes/1/replication.mdx b/product_docs/docs/postgres_for_kubernetes/1/replication.mdx
index 21277446ce8..c1e606d617f 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/replication.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/replication.mdx
@@ -229,11 +229,11 @@ In EDB Postgres for Kubernetes, we use the terms:
This feature, introduced in EDB Postgres for Kubernetes 1.18, is now enabled by default and
can be disabled via configuration. For details, please refer to the
-["replicationSlots" section in the API reference](api_reference.md#ReplicationSlotsConfiguration).
+["replicationSlots" section in the API reference](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-ReplicationSlotsConfiguration).
Here follows a brief description of the main options:
`.spec.replicationSlots.highAvailability.enabled`
-: if true, the feature is enabled (`true` is the default - since 1.20)
+: if true, the feature is enabled (`true` is the default since 1.21)
`.spec.replicationSlots.highAvailability.slotPrefix`
: the prefix that identifies replication slots managed by the operator
@@ -258,8 +258,8 @@ Here follows a brief description of the main options:
Although it is not recommended, if you desire a different behavior, you can
customize the above options.
-For example, the following manifest will create a cluster without replication
-slots enabled.
+For example, the following manifest will create a cluster with replication
+slots disabled.
```yaml
apiVersion: postgresql.k8s.enterprisedb.io/v1
@@ -305,4 +305,28 @@ the lag from the primary.
!!! Seealso "Monitoring"
Please refer to the ["Monitoring" section](monitoring.md) for details on
- how to monitor a EDB Postgres for Kubernetes deployment.
\ No newline at end of file
+ how to monitor a EDB Postgres for Kubernetes deployment.
+
+### Capping the WAL size retained for replication slots
+
+When replication slots is enabled, you might end up running out of disk
+space due to PostgreSQL trying to retain WAL files requested by a replication
+slot. This might happen due to a standby that is (temporarily?) down, or
+lagging, or simply an orphan replication slot.
+
+Starting with PostgreSQL 13, you can take advantage of the
+[`max_slot_wal_keep_size`](https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-MAX-SLOT-WAL-KEEP-SIZE)
+configuration option controlling the maximum size of WAL files that replication
+slots are allowed to retain in the `pg_wal` directory at checkpoint time.
+By default, in PostgreSQL `max_slot_wal_keep_size` is set to `-1`, meaning that
+replication slots may retain an unlimited amount of WAL files.
+As a result, our recommendation is to explicitly set `max_slot_wal_keep_size`
+when replication slots support is enabled. For example:
+
+```ini
+ # ...
+ postgresql:
+ parameters:
+ max_slot_wal_keep_size: "10GB"
+ # ...
+```
\ No newline at end of file
diff --git a/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx b/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx
index 42e2d78e0fc..704ef4eb44c 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx
@@ -54,7 +54,7 @@ while creating a cluster:
in a VM or physical machine scenario - see below).
- Set up database server pods on a dedicated node using nodeSelector.
See the "nodeSelector" and "tolerations" fields of the
- [“affinityconfiguration"](api_reference.md#affinityconfiguration) resource on the API reference page.
+ [“affinityconfiguration"](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-AffinityConfiguration) resource on the API reference page.
You can refer to the following example manifest:
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples.mdx b/product_docs/docs/postgres_for_kubernetes/1/samples.mdx
index 3b97553f228..7add2d6e076 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/samples.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples.mdx
@@ -48,7 +48,7 @@ PostGIS example
: [`postgis-example.yaml`](../samples/postgis-example.yaml):
an example of "PostGIS cluster" (see the [PostGIS section](postgis.md) for details.)
-Replica cluster via streaming
+Replica cluster via streaming (pg_basebackup)
: **Prerequisites**: [`cluster-example.yaml`](../samples/cluster-example.yaml)
applied and Healthy
: [`cluster-example-replica-streaming.yaml`](../samples/cluster-example-replica-streaming.yaml): a replica cluster following `cluster-example` with streaming replication.
@@ -59,7 +59,7 @@ Simple cluster with backup configured
: [`cluster-example-with-backup.yaml`](../samples/cluster-example-with-backup.yaml)
a basic cluster with backups configured.
-Replica cluster via backup
+Replica cluster via Backup from an object store
: **Prerequisites**:
[`cluster-storage-class-with-backup.yaml`](../samples/cluster-storage-class-with-backup.yaml) applied and Healthy.
And a backup
@@ -68,6 +68,15 @@ Replica cluster via backup
: [`cluster-example-replica-from-backup-simple.yaml`](../samples/cluster-example-replica-from-backup-simple.yaml):
a replica cluster following a cluster with backup configured.
+Replica cluster via Volume Snapshot
+: **Prerequisites**:
+ [`cluster-example-with-volume-snapshot.yaml`](../samples/cluster-example-with-volume-snapshot.yaml) applied and Healthy.
+ And a volume snapshot
+ [`backup-with-volume-snapshot.yaml`](../samples/backup-with-volume-snapshot.yaml)
+ applied and Completed.
+: [`cluster-example-replica-from-volume-snapshot.yaml`](../samples/cluster-example-replica-from-volume-snapshot.yaml):
+ a replica cluster following a cluster with volume snapshot configured.
+
Bootstrap cluster with SQL files
: [`cluster-example-initdb-sql-refs.yaml`](../samples/cluster-example-initdb-sql-refs.yaml):
a cluster example that will execute a set of queries defined in a Secret and a ConfigMap right after the database is created.
@@ -90,4 +99,4 @@ Cluster with TDE enabled
an EPAS 15 cluster with TDE. Note that you will need access credentials
to download the image used.
-For a list of available options, please refer to the ["API Reference" page](api_reference.md).
\ No newline at end of file
+For a list of available options, please refer to the ["API Reference" page](cloudnative-pg.v1.md).
\ No newline at end of file
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/backup-with-volume-snapshot.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/backup-with-volume-snapshot.yaml
new file mode 100644
index 00000000000..371c8f0beba
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/backup-with-volume-snapshot.yaml
@@ -0,0 +1,8 @@
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Backup
+metadata:
+ name: backup-with-volume-snapshot
+spec:
+ method: volumeSnapshot
+ cluster:
+ name: cluster-example-with-volume-snapshot
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-full.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-full.yaml
index 79b833f6912..f7e772b6c02 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-full.yaml
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-full.yaml
@@ -35,7 +35,7 @@ metadata:
name: cluster-example-full
spec:
description: "Example of cluster"
- imageName: quay.io/enterprisedb/postgresql:15.3
+ imageName: quay.io/enterprisedb/postgresql:16.0
# imagePullSecret is only required if the images are located in a private registry
# imagePullSecrets:
# - name: private_registry_access
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-monitoring.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-monitoring.yaml
index 42834b2f7fd..88e6951652a 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-monitoring.yaml
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-monitoring.yaml
@@ -25,38 +25,6 @@ metadata:
k8s.enterprisedb.io/reload: ""
data:
custom-queries: |
- pg_replication:
- query: "SELECT CASE WHEN NOT pg_is_in_recovery()
- THEN 0
- ELSE GREATEST (0,
- EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp())))
- END AS lag,
- pg_is_in_recovery() AS in_recovery,
- EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up,
- (SELECT count(*) FROM pg_stat_replication) AS streaming_replicas"
-
- metrics:
- - lag:
- usage: "GAUGE"
- description: "Replication lag behind primary in seconds"
- - in_recovery:
- usage: "GAUGE"
- description: "Whether the instance is in recovery"
- - is_wal_receiver_up:
- usage: "GAUGE"
- description: "Whether the instance wal_receiver is up"
- - streaming_replicas:
- usage: "GAUGE"
- description: "Number of streaming replicas connected to the instance"
-
- pg_postmaster:
- query: "SELECT pg_postmaster_start_time as start_time from pg_postmaster_start_time()"
- primary: true
- metrics:
- - start_time:
- usage: "GAUGE"
- description: "Time at which postgres started"
-
pg_stat_user_tables:
target_databases:
- "*"
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-replica-from-volume-snapshot.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-replica-from-volume-snapshot.yaml
new file mode 100644
index 00000000000..ca3bc3dc2eb
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-replica-from-volume-snapshot.yaml
@@ -0,0 +1,54 @@
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: cluster-example-replica-from-snapshot
+spec:
+ instances: 1
+
+ storage:
+ storageClass: csi-hostpath-sc
+ size: 1Gi
+ walStorage:
+ storageClass: csi-hostpath-sc
+ size: 1Gi
+
+ bootstrap:
+ recovery:
+ source: cluster-example-with-volume-snapshot
+ volumeSnapshots:
+ storage:
+ name: cluster-example-with-volume-snapshot-2-1692618163
+ kind: VolumeSnapshot
+ apiGroup: snapshot.storage.k8s.io
+ walStorage:
+ name: cluster-example-with-volume-snapshot-2-wal-1692618163
+ kind: VolumeSnapshot
+ apiGroup: snapshot.storage.k8s.io
+
+ replica:
+ enabled: true
+ source: cluster-example-with-volume-snapshot
+
+ externalClusters:
+ - name: cluster-example-with-volume-snapshot
+
+ connectionParameters:
+ host: cluster-example-with-volume-snapshot-rw.default.svc
+ user: postgres
+ dbname: postgres
+ password:
+ name: cluster-example-with-volume-snapshot-superuser
+ key: password
+
+ barmanObjectStore:
+ destinationPath: s3://backups/
+ endpointURL: http://minio:9000
+ s3Credentials:
+ accessKeyId:
+ name: minio
+ key: ACCESS_KEY_ID
+ secretAccessKey:
+ name: minio
+ key: ACCESS_SECRET_KEY
+ wal:
+ maxParallel: 8
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-replica-streaming.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-replica-streaming.yaml
index 847a9d4dbe3..63eba35085d 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-replica-streaming.yaml
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-replica-streaming.yaml
@@ -10,7 +10,7 @@ spec:
source: cluster-example
replica:
- enabled: false
+ enabled: true
source: cluster-example
storage:
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-with-backup.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-with-backup.yaml
index 9caf09bb71d..a0a99d90b41 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-with-backup.yaml
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-with-backup.yaml
@@ -8,7 +8,7 @@ spec:
# Persistent storage configuration
storage:
- storageClass: standard
+ storageClass: csi-hostpath-sc
size: 1Gi
# Backup properties
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-with-volume-snapshot.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-with-volume-snapshot.yaml
new file mode 100644
index 00000000000..ef58162a061
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-example-with-volume-snapshot.yaml
@@ -0,0 +1,32 @@
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: cluster-example-with-volume-snapshot
+spec:
+ instances: 3
+ primaryUpdateStrategy: unsupervised
+
+ # Persistent storage configuration
+ storage:
+ storageClass: csi-hostpath-sc
+ size: 1Gi
+ walStorage:
+ storageClass: csi-hostpath-sc
+ size: 1Gi
+
+ # Backup properties
+ backup:
+ volumeSnapshot:
+ className: csi-hostpath-snapclass
+ barmanObjectStore:
+ destinationPath: s3://backups/
+ endpointURL: http://minio:9000
+ s3Credentials:
+ accessKeyId:
+ name: minio
+ key: ACCESS_KEY_ID
+ secretAccessKey:
+ name: minio
+ key: ACCESS_SECRET_KEY
+ wal:
+ compression: gzip
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-restore-snapshot-full.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-restore-snapshot-full.yaml
new file mode 100644
index 00000000000..72d7d11507e
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-restore-snapshot-full.yaml
@@ -0,0 +1,18 @@
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: cluster-restore-full
+spec:
+ instances: 3
+
+ storage:
+ size: 1Gi
+ storageClass: csi-hostpath-sc
+
+ bootstrap:
+ recovery:
+ volumeSnapshots:
+ storage:
+ name: cluster-example-2-1695821489
+ kind: VolumeSnapshot
+ apiGroup: snapshot.storage.k8s.io
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-restore-snapshot-pitr.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-restore-snapshot-pitr.yaml
new file mode 100644
index 00000000000..67890530b5f
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-restore-snapshot-pitr.yaml
@@ -0,0 +1,40 @@
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+metadata:
+ name: cluster-restore-pitr
+spec:
+ instances: 3
+
+ storage:
+ size: 1Gi
+ storageClass: csi-hostpath-sc
+
+ externalClusters:
+ - name: origin
+
+ barmanObjectStore:
+ serverName: cluster-example-with-backup
+ destinationPath: s3://backups/
+ endpointURL: http://minio:9000
+ s3Credentials:
+ accessKeyId:
+ name: minio
+ key: ACCESS_KEY_ID
+ secretAccessKey:
+ name: minio
+ key: ACCESS_SECRET_KEY
+ wal:
+ maxParallel: 8
+
+ bootstrap:
+ recovery:
+ source: origin
+
+ recoveryTarget:
+ targetTime: "2023-08-21 12:00:00.00000+00"
+
+ volumeSnapshots:
+ storage:
+ name: cluster-example-with-backup-3-1692618163
+ kind: VolumeSnapshot
+ apiGroup: snapshot.storage.k8s.io
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-restore-snapshot.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-restore-snapshot.yaml
index 5a2f24f2883..4b232d016d1 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-restore-snapshot.yaml
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/cluster-restore-snapshot.yaml
@@ -7,12 +7,13 @@ spec:
storage:
size: 1Gi
+ storageClass: csi-hostpath-sc
bootstrap:
recovery:
volumeSnapshots:
storage:
- name: my-backup
+ name: cluster-example-20230930084154
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/alerts.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/alerts.yaml
index 7e1987b1521..a0bb9098d01 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/alerts.yaml
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/alerts.yaml
@@ -55,3 +55,12 @@ groups:
for: 1m
labels:
severity: warning
+ - alert: ReplicaFailingReplication
+ annotations:
+ description: Replica {{ $labels.pod }} is failing to replicate
+ summary: Checks if the replica is failing to replicate
+ expr: |-
+ cnp_pg_replication_in_recovery > cnp_pg_replication_is_wal_receiver_up
+ for: 1m
+ labels:
+ severity: warning
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-configmap.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-configmap.yaml
index 1e14184ae1f..a8ed15e61d6 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-configmap.yaml
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-configmap.yaml
@@ -23,6 +23,85 @@ metadata:
data:
cnp.json: |-
{
+ "__inputs": [
+ {
+ "name": "DS_PROMETHEUS",
+ "label": "Prometheus",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "prometheus",
+ "pluginName": "Prometheus"
+ }
+ ],
+ "__elements": {},
+ "__requires": [
+ {
+ "type": "panel",
+ "id": "alertlist",
+ "name": "Alert list",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "bargauge",
+ "name": "Bar gauge",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "gauge",
+ "name": "Gauge",
+ "version": ""
+ },
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "9.5.1"
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph (old)",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "heatmap",
+ "name": "Heatmap",
+ "version": ""
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "stat",
+ "name": "Stat",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "table",
+ "name": "Table",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "text",
+ "name": "Text",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "timeseries",
+ "name": "Time series",
+ "version": ""
+ }
+ ],
"annotations": {
"list": [
{
@@ -48,7 +127,7 @@ data:
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
- "id": 452,
+ "id": null,
"links": [
{
"asDropdown": false,
@@ -68,7 +147,10 @@ data:
"liveNow": false,
"panels": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 7,
"w": 3,
@@ -77,8 +159,8 @@ data:
},
"id": 334,
"options": {
- "alertInstanceLabelFilter": "",
- "alertName": "Database",
+ "alertInstanceLabelFilter": "{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"}",
+ "alertName": "",
"dashboardAlerts": false,
"folder": "",
"groupBy": [],
@@ -98,7 +180,10 @@ data:
"type": "alertlist"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 15,
@@ -120,7 +205,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 3,
@@ -142,7 +230,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 3,
@@ -164,7 +255,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -210,7 +304,10 @@ data:
"pluginVersion": "9.5.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": false,
"expr": "max(cnp_pg_postmaster_start_time{namespace=~\"$namespace\",pod=~\"$instances\"})*1000",
@@ -228,7 +325,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -272,7 +372,10 @@ data:
"pluginVersion": "9.5.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": true,
"expr": "sum(rate(cnp_pg_stat_database_xact_commit{namespace=~\"$namespace\",pod=~\"$instances\"}[$__interval])) + sum(rate(cnp_pg_stat_database_xact_rollback{namespace=~\"$namespace\",pod=~\"$instances\"}[$__interval]))",
@@ -286,7 +389,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "CPU Utilisation from Requests",
"fieldConfig": {
"defaults": {
@@ -341,7 +447,10 @@ data:
"pluginVersion": "9.5.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{ namespace=\"$namespace\"}) / sum(kube_pod_container_resource_requests{job=\"kube-state-metrics\", namespace=\"$namespace\", resource=\"cpu\"})",
"format": "time_series",
@@ -354,7 +463,10 @@ data:
"type": "gauge"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "Memory Utilisation from Requests",
"fieldConfig": {
"defaults": {
@@ -409,7 +521,10 @@ data:
"pluginVersion": "9.5.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"expr": "sum(container_memory_working_set_bytes{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) / sum(max by(pod) (kube_pod_container_resource_requests{job=\"kube-state-metrics\", namespace=\"$namespace\", resource=\"memory\"}))",
"format": "time_series",
@@ -422,7 +537,10 @@ data:
"type": "gauge"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -480,9 +598,12 @@ data:
"pluginVersion": "9.5.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
- "expr": "cnp_pg_replication_lag{namespace=~\"$namespace\",pod=~\"$instances\"}",
+ "expr": "max(cnp_pg_replication_lag{namespace=~\"$namespace\",pod=~\"$instances\"})",
"legendFormat": "__auto",
"range": true,
"refId": "A"
@@ -492,7 +613,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -548,7 +672,10 @@ data:
"pluginVersion": "9.5.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"expr": "max(cnp_pg_stat_replication_write_lag_seconds{namespace=~\"$namespace\",pod=~\"$instances\"})",
"legendFormat": "__auto",
@@ -560,7 +687,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -612,21 +742,40 @@ data:
"pluginVersion": "9.5.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"expr": "max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace=\"$namespace\", persistentvolumeclaim=~\"$instances\"} / kubelet_volume_stats_capacity_bytes{namespace=\"$namespace\", persistentvolumeclaim=~\"$instances\"}))",
"format": "time_series",
"interval": "",
- "legendFormat": "{{persistentvolumeclaim}}",
+ "legendFormat": "DATA",
"range": true,
- "refId": "FREE_SPACE"
+ "refId": "DATA"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace=\"$namespace\", persistentvolumeclaim=~\"(${instances})-wal\"} / kubelet_volume_stats_capacity_bytes{namespace=\"$namespace\", persistentvolumeclaim=~\"(${instances})-wal\"}))",
+ "format": "time_series",
+ "interval": "",
+ "legendFormat": "WAL",
+ "range": true,
+ "refId": "WAL"
}
],
"title": "Volume Space Usage",
"type": "gauge"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "Elapsed time since the last successful base backup.",
"fieldConfig": {
"defaults": {
@@ -645,6 +794,18 @@ data:
"to": 1e+42
},
"type": "range"
+ },
+ {
+ "options": {
+ "from": -2147483648,
+ "result": {
+ "color": "red",
+ "index": 1,
+ "text": "N/A"
+ },
+ "to": -1577847600
+ },
+ "type": "range"
}
],
"thresholds": {
@@ -700,7 +861,10 @@ data:
"pluginVersion": "9.5.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"expr": "-(time() - max(cnp_collector_last_available_backup_timestamp{namespace=\"$namespace\",pod=~\"$instances\"}))",
"legendFormat": "__auto",
@@ -712,7 +876,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -780,10 +947,13 @@ data:
"pluginVersion": "9.5.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": true,
- "expr": "(1 - cnp_pg_replication_in_recovery{namespace=~\"$namespace\",pod=~\"$instances\"} + cnp_pg_replication_is_wal_receiver_up{namespace=~\"$namespace\",pod=~\"$instances\"}) * (time() - timestamp(cnp_pg_stat_archiver_seconds_since_last_archival{namespace=~\"$namespace\",pod=~\"$instances\"}) -\ncnp_pg_stat_archiver_seconds_since_last_archival{namespace=~\"$namespace\",pod=~\"$instances\"})",
+ "expr": "min((1 - cnp_pg_replication_in_recovery{namespace=~\"$namespace\",pod=~\"$instances\"} + cnp_pg_replication_is_wal_receiver_up{namespace=~\"$namespace\",pod=~\"$instances\"}) * (time() - timestamp(cnp_pg_stat_archiver_seconds_since_last_archival{namespace=~\"$namespace\",pod=~\"$instances\"}) -\ncnp_pg_stat_archiver_seconds_since_last_archival{namespace=~\"$namespace\",pod=~\"$instances\"}))",
"format": "time_series",
"interval": "",
"legendFormat": "__auto",
@@ -795,7 +965,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -842,7 +1015,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "builder",
"exemplar": false,
"expr": "cnp_collector_postgres_version{namespace=~\"$namespace\",pod=~\"$instances\"}",
@@ -861,7 +1037,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -917,7 +1096,10 @@ data:
"pluginVersion": "9.5.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"expr": "max(cnp_pg_stat_replication_flush_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"})",
"legendFormat": "__auto",
@@ -929,7 +1111,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -985,7 +1170,10 @@ data:
"pluginVersion": "9.5.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"expr": "max(cnp_pg_stat_replication_replay_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"})",
"legendFormat": "__auto",
@@ -997,7 +1185,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -1052,7 +1243,10 @@ data:
"pluginVersion": "9.5.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": true,
"expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{ namespace=\"$namespace\"})",
@@ -1066,7 +1260,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "Excluding cache",
"fieldConfig": {
"defaults": {
@@ -1120,7 +1317,10 @@ data:
"pluginVersion": "9.5.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": true,
"expr": "sum(container_memory_working_set_bytes{pod=~\"$instances\", namespace=\"$namespace\", container!=\"\", image!=\"\"})",
@@ -1134,7 +1334,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -1189,7 +1392,10 @@ data:
"pluginVersion": "9.5.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": false,
"expr": "cnp_pg_database_size_bytes{namespace=\"$namespace\"}",
@@ -1223,13 +1429,26 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [
+ {
+ "options": {
+ "0": {
+ "color": "red",
+ "index": 1,
+ "text": "N/A"
+ }
+ },
+ "type": "value"
+ },
{
"options": {
"match": "null",
@@ -1279,7 +1498,10 @@ data:
"pluginVersion": "9.5.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": true,
"expr": "max(cnp_collector_first_recoverability_point{namespace=~\"$namespace\",pod=~\"$instances\"})*1000",
@@ -1318,7 +1540,10 @@ data:
"type": "row"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 3,
@@ -1339,7 +1564,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -1353,7 +1581,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 2,
@@ -1374,7 +1605,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -1387,7 +1621,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 3,
@@ -1408,7 +1645,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -1421,7 +1661,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 2,
@@ -1442,7 +1685,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -1455,7 +1701,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 4,
@@ -1476,7 +1725,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -1489,7 +1741,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 3,
@@ -1510,7 +1765,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -1523,7 +1781,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"gridPos": {
"h": 1,
@@ -1545,7 +1806,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -1558,7 +1822,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 2,
@@ -1579,7 +1846,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -1592,7 +1862,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 2,
@@ -1613,7 +1886,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -1626,7 +1902,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 3,
"w": 3,
@@ -1648,7 +1927,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -1660,7 +1942,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -1724,7 +2009,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": true,
"expr": "min(kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"})",
@@ -1737,7 +2025,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -1805,7 +2096,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "1 - cnp_pg_replication_in_recovery{namespace=~\"$namespace\",pod=~\"$instances\"} + cnp_pg_replication_is_wal_receiver_up{namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -1818,7 +2112,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -1870,7 +2167,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_replication_streaming_replicas{namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -1883,7 +2183,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "This metric depends on exporting the: `topology.kubernetes.io/zone` label through kube-state-metrics (not enabled by default). Can be added by changing its configuration with:\n\n```yaml\nmetricLabelsAllowlist:\n - nodes=[topology.kubernetes.io/zone]\n```",
"fieldConfig": {
"defaults": {
@@ -1932,7 +2235,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": true,
"expr": "kube_pod_info{namespace=~\"$namespace\",pod=~\"$instances\"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels",
@@ -1947,7 +2253,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -1967,6 +2276,7 @@ data:
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -2024,7 +2334,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "sum by (pod) (cnp_backends_total{namespace=~\"$namespace\",pod=~\"$instances\"})",
"instant": false,
@@ -2036,7 +2349,10 @@ data:
"type": "timeseries"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -2094,7 +2410,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "100 * sum by (pod) (cnp_backends_total{namespace=~\"$namespace\",pod=~\"$instances\"}) / sum by (pod) (cnp_pg_settings_setting{name=\"max_connections\",namespace=~\"$namespace\",pod=~\"$instances\"})",
"instant": true,
@@ -2106,7 +2425,10 @@ data:
"type": "gauge"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -2164,7 +2486,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "max by (pod) (cnp_pg_database_xid_age{namespace=~\"$namespace\",pod=~\"$instances\"})",
"instant": true,
@@ -2176,7 +2501,10 @@ data:
"type": "bargauge"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -2224,7 +2552,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": false,
"expr": "cnp_pg_postmaster_start_time{namespace=~\"$namespace\",pod=~\"$instances\"}*1000",
"format": "time_series",
@@ -2240,7 +2571,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -2288,7 +2622,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": false,
"expr": "cnp_collector_postgres_version{namespace=~\"$namespace\",pod=~\"$instances\"}",
@@ -2329,7 +2666,10 @@ data:
"type": "row"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 3,
@@ -2350,7 +2690,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -2364,7 +2707,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 3,
@@ -2385,7 +2731,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -2398,7 +2747,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 3,
@@ -2419,7 +2771,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -2432,7 +2787,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 3,
@@ -2453,7 +2811,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -2466,7 +2827,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 3,
@@ -2487,7 +2851,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -2500,7 +2867,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 3,
@@ -2521,7 +2891,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -2534,7 +2907,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 3,
@@ -2555,7 +2931,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -2568,7 +2947,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 3,
@@ -2589,7 +2971,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -2602,7 +2987,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 3,
"w": 3,
@@ -2624,7 +3012,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "kube_pod_container_status_ready{container=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -2636,7 +3027,10 @@ data:
"type": "text"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -2682,7 +3076,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_settings_setting{name=\"max_connections\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -2694,7 +3091,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -2742,7 +3142,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "max by (pod) (cnp_pg_settings_setting{name=\"shared_buffers\",namespace=~\"$namespace\",pod=~\"$instances\"}) * max by (pod) (cnp_pg_settings_setting{name=\"block_size\",namespace=~\"$namespace\",pod=~\"$instances\"})",
"instant": true,
@@ -2754,7 +3157,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -2802,9 +3208,12 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
- "expr": "cnp_pg_settings_setting{name=\"effective_cache_size\",namespace=~\"$namespace\",pod=~\"$instances\"}",
+ "expr": "max by (pod) (cnp_pg_settings_setting{name=\"effective_cache_size\",namespace=~\"$namespace\",pod=~\"$instances\"}) * max by (pod) (cnp_pg_settings_setting{name=\"block_size\",namespace=~\"$namespace\",pod=~\"$instances\"})",
"instant": true,
"interval": "",
"legendFormat": "{{pod}}",
@@ -2814,7 +3223,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -2862,7 +3274,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_settings_setting{name=\"work_mem\",namespace=~\"$namespace\",pod=~\"$instances\"} * 1024",
"instant": true,
@@ -2874,7 +3289,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -2921,7 +3339,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_settings_setting{name=\"maintenance_work_mem\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -2933,7 +3354,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -2980,7 +3404,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_settings_setting{name=\"random_page_cost\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -2992,7 +3419,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -3039,7 +3469,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_settings_setting{name=\"seq_page_cost\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": true,
@@ -3051,7 +3484,10 @@ data:
"type": "stat"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -3102,7 +3538,10 @@ data:
"repeatDirection": "v",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_settings_setting{namespace=~\"$namespace\",pod=~\"$instances\"}",
"format": "table",
@@ -3185,7 +3624,10 @@ data:
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -3223,7 +3665,10 @@ data:
"steppedLine": false,
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{pod=~\"$instances\", namespace=~\"$namespace\"}) by (pod)",
"format": "time_series",
@@ -3234,7 +3679,10 @@ data:
"step": 10
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{pod=~\"$instances\", namespace=~\"$namespace\"})",
"hide": false,
@@ -3281,7 +3729,10 @@ data:
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fill": 2,
"fillGradient": 0,
"gridPos": {
@@ -3342,7 +3793,10 @@ data:
"steppedLine": false,
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "sum(container_memory_working_set_bytes{pod=~\"$instances\", namespace=\"$namespace\", container!=\"\", image!=\"\"}) by (pod)",
"format": "time_series",
@@ -3353,7 +3807,10 @@ data:
"step": 10
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "sum(container_memory_working_set_bytes{pod=~\"$instances\", namespace=\"$namespace\", container!=\"\", image!=\"\"})",
"hide": false,
@@ -3396,7 +3853,10 @@ data:
}
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -3466,7 +3926,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "sum(cnp_backends_total{namespace=~\"$namespace\",pod=~\"$instances\"}) by (pod)",
"hide": false,
@@ -3475,7 +3938,10 @@ data:
"refId": "B"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "sum(cnp_backends_total{namespace=~\"$namespace\",pod=~\"$instances\"}) by (state, pod)",
"interval": "",
@@ -3487,7 +3953,10 @@ data:
"type": "timeseries"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -3560,7 +4029,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "sum(rate(cnp_pg_stat_database_xact_commit{namespace=~\"$namespace\",pod=~\"$instances\"}[5m])) by (pod)",
"interval": "",
@@ -3568,7 +4040,10 @@ data:
"refId": "A"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "sum(rate(cnp_pg_stat_database_xact_rollback{namespace=~\"$namespace\",pod=~\"$instances\"}[5m])) by (pod)",
"hide": false,
@@ -3581,7 +4056,10 @@ data:
"type": "timeseries"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -3656,7 +4134,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "max by (pod) (cnp_backends_max_tx_duration_seconds{namespace=~\"$namespace\",pod=~\"$instances\"})",
"interval": "",
@@ -3668,7 +4149,10 @@ data:
"type": "timeseries"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -3742,7 +4226,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "rate(cnp_pg_stat_database_deadlocks{datname=\"\",namespace=~\"$namespace\",pod=~\"$instances\"}[5m])",
"hide": false,
@@ -3756,7 +4243,10 @@ data:
"type": "timeseries"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -3830,7 +4320,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_backends_waiting_total{namespace=~\"$namespace\",pod=~\"$instances\"}",
"interval": "",
@@ -3867,7 +4360,10 @@ data:
"id": 35,
"panels": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -3919,7 +4415,10 @@ data:
"pluginVersion": "9.4.7",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"expr": "max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace=\"$namespace\", persistentvolumeclaim=~\"$instances\"} / kubelet_volume_stats_capacity_bytes{namespace=\"$namespace\", persistentvolumeclaim=~\"$instances\"})",
"format": "time_series",
@@ -3927,6 +4426,19 @@ data:
"legendFormat": "{{persistentvolumeclaim}}",
"range": true,
"refId": "FREE_SPACE"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace=\"$namespace\", persistentvolumeclaim=~\"(${instances})-wal\"} / kubelet_volume_stats_capacity_bytes{namespace=\"$namespace\", persistentvolumeclaim=~\"(${instances})-wal\"})",
+ "format": "time_series",
+ "interval": "",
+ "legendFormat": "{{persistentvolumeclaim}}",
+ "range": true,
+ "refId": "FREE_SPACE_WAL"
}
],
"title": "Volume Space Usage",
@@ -3934,7 +4446,10 @@ data:
"type": "gauge"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -3986,7 +4501,10 @@ data:
"pluginVersion": "9.4.7",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"expr": "max by(persistentvolumeclaim) (kubelet_volume_stats_inodes_used{namespace=\"$namespace\", persistentvolumeclaim=~\"$instances\"} / kubelet_volume_stats_inodes{namespace=\"$namespace\", persistentvolumeclaim=~\"$instances\"})",
"format": "time_series",
@@ -3994,6 +4512,19 @@ data:
"legendFormat": "{{persistentvolumeclaim}}",
"range": true,
"refId": "FREE_INODES"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "max by(persistentvolumeclaim) (kubelet_volume_stats_inodes_used{namespace=\"$namespace\", persistentvolumeclaim=~\"(${instances})-wal\"} / kubelet_volume_stats_inodes{namespace=\"$namespace\", persistentvolumeclaim=~\"(${instances})-wal\"})",
+ "format": "time_series",
+ "interval": "",
+ "legendFormat": "{{persistentvolumeclaim}}",
+ "range": true,
+ "refId": "FREE_INODES_WAL"
}
],
"title": "Volume Inode Usage",
@@ -4001,7 +4532,10 @@ data:
"type": "gauge"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -4074,7 +4608,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": true,
"expr": "sum(rate(cnp_pg_stat_database_tup_deleted{datname=\"\",namespace=~\"$namespace\",pod=~\"$instances\"}[5m]))",
@@ -4084,7 +4621,10 @@ data:
"refId": "A"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": true,
"expr": "sum(rate(cnp_pg_stat_database_tup_inserted{datname=\"\",namespace=~\"$namespace\",pod=~\"$instances\"}[5m]))",
@@ -4095,7 +4635,10 @@ data:
"refId": "B"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": true,
"expr": "sum(rate(cnp_pg_stat_database_tup_fetched{datname=\"\",namespace=~\"$namespace\",pod=~\"$instances\"}[5m]))",
@@ -4106,7 +4649,10 @@ data:
"refId": "C"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": true,
"expr": "sum(rate(cnp_pg_stat_database_tup_returned{datname=\"\",namespace=~\"$namespace\",pod=~\"$instances\"}[5m]))",
@@ -4117,7 +4663,10 @@ data:
"refId": "D"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": true,
"expr": "sum(rate(cnp_pg_stat_database_tup_updated{datname=\"\",namespace=~\"$namespace\",pod=~\"$instances\"}[5m]))",
@@ -4132,7 +4681,10 @@ data:
"type": "timeseries"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -4205,7 +4757,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": true,
"expr": "rate(cnp_pg_stat_database_blks_hit{datname=\"\",namespace=~\"$namespace\",pod=~\"$instances\"}[5m])",
@@ -4215,7 +4770,10 @@ data:
"refId": "A"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": true,
"expr": "rate(cnp_pg_stat_database_blks_read{datname=\"\",namespace=~\"$namespace\",pod=~\"$instances\"}[5m])",
@@ -4230,7 +4788,10 @@ data:
"type": "timeseries"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -4301,7 +4862,10 @@ data:
"pluginVersion": "8.0.5",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editorMode": "code",
"exemplar": true,
"expr": "max by (datname) (cnp_pg_database_size_bytes{datname!~\"template.*\",datname!=\"postgres\",namespace=~\"$namespace\",pod=~\"$instances\"})",
@@ -4315,7 +4879,10 @@ data:
"type": "timeseries"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -4389,7 +4956,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "rate(cnp_pg_stat_database_temp_bytes{datname=\"\",namespace=~\"$namespace\",pod=~\"$instances\"}[5m])",
"instant": false,
@@ -4427,7 +4997,10 @@ data:
"id": 37,
"panels": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -4500,7 +5073,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_collector_pg_wal_archive_status{value=\"ready\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"interval": "",
@@ -4508,7 +5084,10 @@ data:
"refId": "A"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_collector_pg_wal_archive_status{value=\"done\",namespace=~\"$namespace\",pod=~\"$instances\"}",
"hide": false,
@@ -4521,7 +5100,10 @@ data:
"type": "timeseries"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -4594,7 +5176,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "rate(cnp_pg_stat_archiver_archived_count{namespace=~\"$namespace\",pod=~\"$instances\"}[5m])",
"interval": "",
@@ -4602,7 +5187,10 @@ data:
"refId": "A"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "rate(cnp_pg_stat_archiver_failed_count{namespace=~\"$namespace\",pod=~\"$instances\"}[5m])",
"hide": false,
@@ -4615,7 +5203,10 @@ data:
"type": "timeseries"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -4690,7 +5281,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_stat_archiver_seconds_since_last_archival{namespace=~\"$namespace\",pod=~\"$instances\"}",
"interval": "",
@@ -4727,7 +5321,10 @@ data:
"id": 18,
"panels": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -4805,7 +5402,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_replication_lag{namespace=~\"$namespace\",pod=~\"$instances\"}",
"instant": false,
@@ -4818,7 +5418,10 @@ data:
"type": "timeseries"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -4892,7 +5495,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_stat_replication_write_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"}",
"instant": false,
@@ -4905,7 +5511,10 @@ data:
"type": "timeseries"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -4979,7 +5588,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_stat_replication_flush_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"}",
"instant": false,
@@ -4992,7 +5604,10 @@ data:
"type": "timeseries"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -5067,7 +5682,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_stat_replication_replay_lag_seconds{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"}",
"interval": "",
@@ -5113,7 +5731,10 @@ data:
"mode": "spectrum"
},
"dataFormat": "timeseries",
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"custom": {
@@ -5183,7 +5804,10 @@ data:
"reverseYBuckets": false,
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_collector_collection_duration_seconds{namespace=~\"$namespace\",pod=~\"$instances\"}",
"interval": "",
@@ -5208,7 +5832,10 @@ data:
"yBucketBound": "auto"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -5281,7 +5908,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_collector_last_collection_error{namespace=~\"$namespace\",pod=~\"$instances\"}",
"interval": "",
@@ -5318,7 +5948,10 @@ data:
"id": 239,
"panels": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
@@ -5392,7 +6025,10 @@ data:
},
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_collector_first_recoverability_point{namespace=~\"$namespace\",pod=~\"$instances\"}*1000 > 0",
"format": "time_series",
@@ -5430,7 +6066,10 @@ data:
"id": 293,
"panels": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -5506,7 +6145,10 @@ data:
"pluginVersion": "8.2.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_stat_bgwriter_checkpoints_req{namespace=~\"$namespace\",pod=~\"$instances\"}",
"format": "time_series",
@@ -5518,7 +6160,10 @@ data:
"refId": "B"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_stat_bgwriter_checkpoints_timed{namespace=~\"$namespace\",pod=~\"$instances\"}",
"format": "time_series",
@@ -5532,7 +6177,10 @@ data:
"type": "timeseries"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "",
"fieldConfig": {
"defaults": {
@@ -5608,7 +6256,10 @@ data:
"pluginVersion": "8.2.1",
"targets": [
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_stat_bgwriter_checkpoint_write_time{namespace=~\"$namespace\",pod=~\"$instances\"}",
"format": "time_series",
@@ -5620,7 +6271,10 @@ data:
"refId": "B"
},
{
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"exemplar": true,
"expr": "cnp_pg_stat_bgwriter_checkpoint_sync_time{namespace=~\"$namespace\",pod=~\"$instances\"}",
"format": "time_series",
@@ -5675,11 +6329,7 @@ data:
"type": "datasource"
},
{
- "current": {
- "selected": false,
- "text": "database",
- "value": "database"
- },
+ "current": {},
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
@@ -5701,11 +6351,7 @@ data:
"type": "query"
},
{
- "current": {
- "selected": false,
- "text": "database-clustermarket-database",
- "value": "database-clustermarket-database"
- },
+ "current": {},
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
@@ -5727,15 +6373,7 @@ data:
"type": "query"
},
{
- "current": {
- "selected": true,
- "text": [
- "All"
- ],
- "value": [
- "$__all"
- ]
- },
+ "current": {},
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
@@ -5767,7 +6405,7 @@ data:
},
"timezone": "",
"title": "EDB Postgres for Kubernetes",
- "uid": "z7FCA4Nnk",
- "version": 9,
+ "uid": "cloudnative-pg",
+ "version": 1,
"weekStart": ""
}
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-dashboard.json b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-dashboard.json
index f389574ea43..faa3846ede5 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-dashboard.json
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/grafana-dashboard.json
@@ -135,8 +135,8 @@
},
"id": 334,
"options": {
- "alertInstanceLabelFilter": "",
- "alertName": "Database",
+ "alertInstanceLabelFilter": "{namespace=~\"$namespace\",pod=~\"$cluster-[0-9]+$\"}",
+ "alertName": "",
"dashboardAlerts": false,
"folder": "",
"groupBy": [],
@@ -579,7 +579,7 @@
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
- "expr": "cnp_pg_replication_lag{namespace=~\"$namespace\",pod=~\"$instances\"}",
+ "expr": "max(cnp_pg_replication_lag{namespace=~\"$namespace\",pod=~\"$instances\"})",
"legendFormat": "__auto",
"range": true,
"refId": "A"
@@ -726,9 +726,22 @@
"expr": "max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace=\"$namespace\", persistentvolumeclaim=~\"$instances\"} / kubelet_volume_stats_capacity_bytes{namespace=\"$namespace\", persistentvolumeclaim=~\"$instances\"}))",
"format": "time_series",
"interval": "",
- "legendFormat": "{{persistentvolumeclaim}}",
+ "legendFormat": "DATA",
"range": true,
- "refId": "FREE_SPACE"
+ "refId": "DATA"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace=\"$namespace\", persistentvolumeclaim=~\"(${instances})-wal\"} / kubelet_volume_stats_capacity_bytes{namespace=\"$namespace\", persistentvolumeclaim=~\"(${instances})-wal\"}))",
+ "format": "time_series",
+ "interval": "",
+ "legendFormat": "WAL",
+ "range": true,
+ "refId": "WAL"
}
],
"title": "Volume Space Usage",
@@ -757,6 +770,18 @@
"to": 1e+42
},
"type": "range"
+ },
+ {
+ "options": {
+ "from": -2147483648,
+ "result": {
+ "color": "red",
+ "index": 1,
+ "text": "N/A"
+ },
+ "to": -1577847600
+ },
+ "type": "range"
}
],
"thresholds": {
@@ -904,7 +929,7 @@
},
"editorMode": "code",
"exemplar": true,
- "expr": "(1 - cnp_pg_replication_in_recovery{namespace=~\"$namespace\",pod=~\"$instances\"} + cnp_pg_replication_is_wal_receiver_up{namespace=~\"$namespace\",pod=~\"$instances\"}) * (time() - timestamp(cnp_pg_stat_archiver_seconds_since_last_archival{namespace=~\"$namespace\",pod=~\"$instances\"}) -\ncnp_pg_stat_archiver_seconds_since_last_archival{namespace=~\"$namespace\",pod=~\"$instances\"})",
+ "expr": "min((1 - cnp_pg_replication_in_recovery{namespace=~\"$namespace\",pod=~\"$instances\"} + cnp_pg_replication_is_wal_receiver_up{namespace=~\"$namespace\",pod=~\"$instances\"}) * (time() - timestamp(cnp_pg_stat_archiver_seconds_since_last_archival{namespace=~\"$namespace\",pod=~\"$instances\"}) -\ncnp_pg_stat_archiver_seconds_since_last_archival{namespace=~\"$namespace\",pod=~\"$instances\"}))",
"format": "time_series",
"interval": "",
"legendFormat": "__auto",
@@ -1390,6 +1415,16 @@
"mode": "thresholds"
},
"mappings": [
+ {
+ "options": {
+ "0": {
+ "color": "red",
+ "index": 1,
+ "text": "N/A"
+ }
+ },
+ "type": "value"
+ },
{
"options": {
"match": "null",
@@ -2217,6 +2252,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -3153,7 +3189,7 @@
"uid": "${DS_PROMETHEUS}"
},
"exemplar": true,
- "expr": "cnp_pg_settings_setting{name=\"effective_cache_size\",namespace=~\"$namespace\",pod=~\"$instances\"}",
+ "expr": "max by (pod) (cnp_pg_settings_setting{name=\"effective_cache_size\",namespace=~\"$namespace\",pod=~\"$instances\"}) * max by (pod) (cnp_pg_settings_setting{name=\"block_size\",namespace=~\"$namespace\",pod=~\"$instances\"})",
"instant": true,
"interval": "",
"legendFormat": "{{pod}}",
@@ -4366,6 +4402,19 @@
"legendFormat": "{{persistentvolumeclaim}}",
"range": true,
"refId": "FREE_SPACE"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace=\"$namespace\", persistentvolumeclaim=~\"(${instances})-wal\"} / kubelet_volume_stats_capacity_bytes{namespace=\"$namespace\", persistentvolumeclaim=~\"(${instances})-wal\"})",
+ "format": "time_series",
+ "interval": "",
+ "legendFormat": "{{persistentvolumeclaim}}",
+ "range": true,
+ "refId": "FREE_SPACE_WAL"
}
],
"title": "Volume Space Usage",
@@ -4439,6 +4488,19 @@
"legendFormat": "{{persistentvolumeclaim}}",
"range": true,
"refId": "FREE_INODES"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "max by(persistentvolumeclaim) (kubelet_volume_stats_inodes_used{namespace=\"$namespace\", persistentvolumeclaim=~\"(${instances})-wal\"} / kubelet_volume_stats_inodes{namespace=\"$namespace\", persistentvolumeclaim=~\"(${instances})-wal\"})",
+ "format": "time_series",
+ "interval": "",
+ "legendFormat": "{{persistentvolumeclaim}}",
+ "range": true,
+ "refId": "FREE_INODES_WAL"
}
],
"title": "Volume Inode Usage",
@@ -6319,7 +6381,7 @@
},
"timezone": "",
"title": "EDB Postgres for Kubernetes",
- "uid": "z7FCA4Nnk",
- "version": 9,
+ "uid": "cloudnative-pg",
+ "version": 1,
"weekStart": ""
}
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/prometheusrule.yaml b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/prometheusrule.yaml
index 2eaaec978cd..ed877e922b1 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/prometheusrule.yaml
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples/monitoring/prometheusrule.yaml
@@ -60,3 +60,12 @@ spec:
for: 1m
labels:
severity: warning
+ - alert: ReplicaFailingReplication
+ annotations:
+ description: Replica {{ $labels.pod }} is failing to replicate
+ summary: Checks if the replica is failing to replicate
+ expr: |-
+ cnp_pg_replication_in_recovery > cnp_pg_replication_is_wal_receiver_up
+ for: 1m
+ labels:
+ severity: warning
diff --git a/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx b/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx
index df12107675f..aee1552fdb6 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx
@@ -14,7 +14,7 @@ the best node possible, based on several criteria.
anti-affinity, node selectors, and so on.
You can control how the EDB Postgres for Kubernetes cluster's instances should be
-scheduled through the [`affinity`](api_reference.md#AffinityConfiguration)
+scheduled through the [`affinity`](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-AffinityConfiguration)
section in the definition of the cluster, which supports:
- pod affinity/anti-affinity
@@ -61,7 +61,7 @@ metadata:
name: cluster-example
spec:
instances: 3
- imageName: quay.io/enterprisedb/postgresql:15.3
+ imageName: quay.io/enterprisedb/postgresql:16.0
affinity:
enablePodAntiAffinity: true #default value
diff --git a/product_docs/docs/postgres_for_kubernetes/1/security.mdx b/product_docs/docs/postgres_for_kubernetes/1/security.mdx
index dad9841372d..1c2478ba653 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/security.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/security.mdx
@@ -324,9 +324,19 @@ levels, as listed in the table below:
### PostgreSQL
The current implementation of EDB Postgres for Kubernetes automatically creates
-passwords and `.pgpass` files for the `postgres` superuser and the database owner.
+passwords and `.pgpass` files for the the database owner and, only
+if requested by setting `enableSuperuserAccess` to `true`, for the
+`postgres` superuser.
-As far as encryption of password is concerned, EDB Postgres for Kubernetes follows
+!!! Warning
+ Prior to EDB Postgres for Kubernetes 1.21, `enableSuperuserAccess` was set to `true` by
+ default. This change has been implemented to improve the security-by-default
+ posture of the operator, fostering a microservice approach where changes to
+ PostgreSQL are performed in a declarative way through the `spec` of the
+ `Cluster` resource, while providing developers with full powers inside the
+ database through the database owner user.
+
+As far as password encryption is concerned, EDB Postgres for Kubernetes follows
the default behavior of PostgreSQL: starting from PostgreSQL 14,
`password_encryption` is by default set to `scram-sha-256`, while on earlier
versions it is set to `md5`.
@@ -335,9 +345,6 @@ versions it is set to `md5`.
Please refer to the ["Password authentication"](https://www.postgresql.org/docs/current/auth-password.html)
section in the PostgreSQL documentation for details.
-You can disable management of the `postgres` user password via secrets by setting
-`enableSuperuserAccess` to `false`.
-
!!! Note
The operator supports toggling the `enableSuperuserAccess` option. When you
disable it on a running cluster, the operator will ignore the content of the secret,
diff --git a/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx b/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx
index ca9bba80492..8d7bc013078 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx
@@ -166,7 +166,7 @@ Output :
version
--------------------------------------------------------------------------------------
------------------
-PostgreSQL 15.3 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat
+PostgreSQL 16.0 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat
8.3.1-5), 64-bit
(1 row)
```
\ No newline at end of file
diff --git a/product_docs/docs/postgres_for_kubernetes/1/storage.mdx b/product_docs/docs/postgres_for_kubernetes/1/storage.mdx
index cf8f18fd6a5..6df381052f0 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/storage.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/storage.mdx
@@ -47,13 +47,19 @@ guarantees higher and more predictable performance.
to know more, please read the
["Custom Pod Controller"](controller.md) document.
+## Backup and recovery
+
+Since EDB Postgres for Kubernetes supports volume snapshots for both backup and recovery,
+we recommend that you also consider this aspect when you choose your storage
+solution, especially if you manage very large databases.
+
## Benchmarking EDB Postgres for Kubernetes
-EDB maintains [cnp-bench](https://github.com/EnterpriseDB/cnp-bench),
-an open source set of guidelines and Helm charts for benchmarking EDB Postgres for Kubernetes
-in a controlled Kubernetes environment, before deploying the database in production.
+We recommend that you benchmark EDB Postgres for Kubernetes in a controlled Kubernetes
+environment, before deploying the database in production, by following
+the [guidelines in the "Benchmarking" section](benchmarking.md).
-Briefly, `cnp-bench` is designed to operate at two levels:
+Briefly, our advice is to operate at two levels:
- measuring the performance of the underlying storage using `fio`, with relevant
metrics for database workloads such as throughput for sequential reads, sequential
@@ -436,4 +442,16 @@ You can use a pre-provisioned volume in EDB Postgres for Kubernetes by following
on the affinity rules of your cluster, Postgres pods can be correctly scheduled
by Kubernetes where a pre-provisioned volume exists. Make sure you check
for any pods stuck in `Pending` after you have deployed the cluster, and
- if the condition persists investigate why this is happening.
\ No newline at end of file
+ if the condition persists investigate why this is happening.
+
+## Block storage considerations (Ceph/ Longhorn)
+
+Most block storage solutions in Kubernetes suggest to have multiple 'replicas' of a volume
+to improve resiliency. This works well for workloads that don't have resiliency built into the
+application. However, EDB Postgres for Kubernetes has this resiliency built directly into the Postgres `Cluster`
+through the number of instances and the persistent volumes that are attached to them.
+
+In these cases it makes sense to define the storage class used by the Postgres clusters
+to be defined as 1 replica. By having additional replicas defined in the storage solution like
+Longhorn and Ceph you might incur in the issue known as write amplification, unnecessarily
+increasing disk I/O and space used.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx b/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx
index c65e8fd390b..7709366978a 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx
@@ -183,7 +183,7 @@ Cluster in healthy state
Name: cluster-example
Namespace: default
System ID: 7044925089871458324
-PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3-3
+PostgreSQL Image: quay.io/enterprisedb/postgresql:16.0-3
Primary instance: cluster-example-1
Instances: 3
Ready instances: 3
@@ -259,7 +259,7 @@ kubectl describe cluster -n | grep "Image Name"
Output:
```shell
- Image Name: quay.io/enterprisedb/postgresql:15.3-3
+ Image Name: quay.io/enterprisedb/postgresql:16.0-3
```
!!! Note
@@ -547,6 +547,61 @@ allow-prometheus k8s.enterprisedb.io/cluster=cluster-example 47m
default-deny-ingress 57m
```
+## PostgreSQL core dumps
+
+Although rare, PostgreSQL can sometimes crash and generate a core dump
+in the `PGDATA` folder. When that happens, normally it is a bug in PostgreSQL
+(and most likely it has already been solved - this is why it is important
+to always run the latest minor version of PostgreSQL).
+
+EDB Postgres for Kubernetes allows you to control what to include in the core dump through
+the `k8s.enterprisedb.io/coredumpFilter` annotation.
+
+!!! Info
+ Please refer to ["Labels and annotations"](labels_annotations.md)
+ for more details on the standard annotations that EDB Postgres for Kubernetes provides.
+
+By default, the `k8s.enterprisedb.io/coredumpFilter` is set to `0x31` in order to
+exclude shared memory segments from the dump, as this is the safest
+approach in most cases.
+
+!!! Info
+ Please refer to
+ ["Core dump filtering settings" section of "The `/proc` Filesystem" page of the Linux Kernel documentation](https://docs.kernel.org/filesystems/proc.html#proc-pid-coredump-filter-core-dump-filtering-settings).
+ for more details on how to set the bitmask that controls the core dump filter.
+
+!!! Important
+ Beware that this setting only takes effect during Pod startup and that changing
+ the annotation doesn't trigger an automated rollout of the instances.
+
+Although you might not personally be involved in inspecting core dumps,
+you might be asked to provide them so that a Postgres expert can look
+into them. First, verify that you have a core dump in the `PGDATA`
+directory with the following command (please run it against the
+correct pod where the Postgres instance is running):
+
+```sh
+kubectl exec -ti POD -c postgres \
+ -- find /var/lib/postgresql/data/pgdata -name 'core.*'
+```
+
+Under normal circumstances, this should return an empty set. Suppose, for
+example, that we have a core dump file:
+
+```
+/var/lib/postgresql/data/pgdata/core.14177
+```
+
+Once you have verified the space on disk is sufficient, you can collect the
+core dump on your machine through `kubectl cp` as follows:
+
+```sh
+kubectl cp POD:/var/lib/postgresql/data/pgdata/core.14177 core.14177
+```
+
+You now have the file. Make sure you free the space on the server by
+removing the core dumps.
+
## Some common issues
### Storage is full
diff --git a/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx b/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx
new file mode 100644
index 00000000000..c3de7ae5f3a
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx
@@ -0,0 +1,79 @@
+---
+title: 'WAL archiving'
+originalFilePath: 'src/wal_archiving.md'
+---
+
+WAL archiving is the process that feeds a [WAL archive](backup.md#wal-archive)
+in EDB Postgres for Kubernetes.
+
+!!! Important
+ EDB Postgres for Kubernetes currently only supports WAL archives on object stores. Such
+ WAL archives serve for both object store backups and volume snapshot backups.
+
+The WAL archive is defined in the `.spec.backup.barmanObjectStore` stanza of
+a `Cluster` resource. Please proceed with the same instructions you find in
+the ["Backup on object stores" section](backup_barmanobjectstore.md) to set up
+the WAL archive.
+
+!!! Info
+ Please refer to [`BarmanObjectStoreConfiguration`](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-barmanobjectstoreconfiguration)
+ in the API reference for a full list of options.
+
+If required, you can choose to compress WAL files as soon as they
+are uploaded and/or encrypt them:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+[...]
+spec:
+ backup:
+ barmanObjectStore:
+ [...]
+ wal:
+ compression: gzip
+ encryption: AES256
+```
+
+You can configure the encryption directly in your bucket, and the operator
+will use it unless you override it in the cluster configuration.
+
+PostgreSQL implements a sequential archiving scheme, where the
+`archive_command` will be executed sequentially for every WAL
+segment to be archived.
+
+!!! Important
+ By default, EDB Postgres for Kubernetes sets `archive_timeout` to `5min`, ensuring
+ that WAL files, even in case of low workloads, are closed and archived
+ at least every 5 minutes, providing a deterministic time-based value for
+ your Recovery Point Objective (RPO). Even though you change the value
+ of the [`archive_timeout` setting in the PostgreSQL configuration](https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-ARCHIVE-TIMEOUT),
+ our experience suggests that the default value set by the operator is
+ suitable for most use cases.
+
+When the bandwidth between the PostgreSQL instance and the object
+store allows archiving more than one WAL file in parallel, you
+can use the parallel WAL archiving feature of the instance manager
+like in the following example:
+
+```yaml
+apiVersion: postgresql.k8s.enterprisedb.io/v1
+kind: Cluster
+[...]
+spec:
+ backup:
+ barmanObjectStore:
+ [...]
+ wal:
+ compression: gzip
+ maxParallel: 8
+ encryption: AES256
+```
+
+In the previous example, the instance manager optimizes the WAL
+archiving process by archiving in parallel at most eight ready
+WALs, including the one requested by PostgreSQL.
+
+When PostgreSQL will request the archiving of a WAL that has
+already been archived by the instance manager as an optimization,
+that archival request will be just dismissed with a positive status.
\ No newline at end of file
diff --git a/scripts/fileProcessor/package-lock.json b/scripts/fileProcessor/package-lock.json
index d7cdd398f6b..6c625df3184 100644
--- a/scripts/fileProcessor/package-lock.json
+++ b/scripts/fileProcessor/package-lock.json
@@ -2491,7 +2491,7 @@
"parse-entities": "^2.0.0",
"repeat-string": "^1.5.4",
"state-toggle": "^1.0.0",
- "trim": ">=0.0.3",
+ "trim": "0.0.1",
"trim-trailing-lines": "^1.0.0",
"unherit": "^1.0.4",
"unist-util-remove-position": "^2.0.0",
@@ -2618,8 +2618,7 @@
}
},
"trim": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/trim/-/trim-1.0.1.tgz",
+ "version": "https://registry.npmjs.org/trim/-/trim-1.0.1.tgz",
"integrity": "sha512-3JVP2YVqITUisXblCDq/Bi4P9457G/sdEamInkyvCsjbTcXLXIiG7XCb4kGMFWh6JGXesS3TKxOPtrncN/xe8w=="
},
"trim-trailing-lines": {
diff --git a/scripts/source/package-lock.json b/scripts/source/package-lock.json
index 0b6eb5a5e6d..56642db519f 100644
--- a/scripts/source/package-lock.json
+++ b/scripts/source/package-lock.json
@@ -3269,7 +3269,7 @@
"parse-entities": "^2.0.0",
"repeat-string": "^1.5.4",
"state-toggle": "^1.0.0",
- "trim": ">=0.0.3",
+ "trim": "0.0.1",
"trim-trailing-lines": "^1.0.0",
"unherit": "^1.0.4",
"unist-util-remove-position": "^2.0.0",
@@ -3417,8 +3417,7 @@
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="
},
"trim": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/trim/-/trim-1.0.1.tgz",
+ "version": "https://registry.npmjs.org/trim/-/trim-1.0.1.tgz",
"integrity": "sha512-3JVP2YVqITUisXblCDq/Bi4P9457G/sdEamInkyvCsjbTcXLXIiG7XCb4kGMFWh6JGXesS3TKxOPtrncN/xe8w=="
},
"trim-trailing-lines": {
From c0885e400b3a1d02a4a8e81018a5756e293fc216 Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Wed, 18 Oct 2023 17:36:03 +0000
Subject: [PATCH 2/8] Revert unintended lockfile changes
---
scripts/fileProcessor/package-lock.json | 5 +++--
scripts/source/dispatch_product.py | 2 +-
scripts/source/package-lock.json | 5 +++--
scripts/source/process-cnp-docs.sh | 2 +-
4 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/scripts/fileProcessor/package-lock.json b/scripts/fileProcessor/package-lock.json
index 6c625df3184..d7cdd398f6b 100644
--- a/scripts/fileProcessor/package-lock.json
+++ b/scripts/fileProcessor/package-lock.json
@@ -2491,7 +2491,7 @@
"parse-entities": "^2.0.0",
"repeat-string": "^1.5.4",
"state-toggle": "^1.0.0",
- "trim": "0.0.1",
+ "trim": ">=0.0.3",
"trim-trailing-lines": "^1.0.0",
"unherit": "^1.0.4",
"unist-util-remove-position": "^2.0.0",
@@ -2618,7 +2618,8 @@
}
},
"trim": {
- "version": "https://registry.npmjs.org/trim/-/trim-1.0.1.tgz",
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/trim/-/trim-1.0.1.tgz",
"integrity": "sha512-3JVP2YVqITUisXblCDq/Bi4P9457G/sdEamInkyvCsjbTcXLXIiG7XCb4kGMFWh6JGXesS3TKxOPtrncN/xe8w=="
},
"trim-trailing-lines": {
diff --git a/scripts/source/dispatch_product.py b/scripts/source/dispatch_product.py
index 9f374788903..3874bdb67ff 100755
--- a/scripts/source/dispatch_product.py
+++ b/scripts/source/dispatch_product.py
@@ -23,7 +23,7 @@
ret = os.system(
f"cd {args.workspace}/destination/scripts/source && \
- npm install --production"
+ npm ci"
)
if ret != 0:
diff --git a/scripts/source/package-lock.json b/scripts/source/package-lock.json
index 56642db519f..0b6eb5a5e6d 100644
--- a/scripts/source/package-lock.json
+++ b/scripts/source/package-lock.json
@@ -3269,7 +3269,7 @@
"parse-entities": "^2.0.0",
"repeat-string": "^1.5.4",
"state-toggle": "^1.0.0",
- "trim": "0.0.1",
+ "trim": ">=0.0.3",
"trim-trailing-lines": "^1.0.0",
"unherit": "^1.0.4",
"unist-util-remove-position": "^2.0.0",
@@ -3417,7 +3417,8 @@
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="
},
"trim": {
- "version": "https://registry.npmjs.org/trim/-/trim-1.0.1.tgz",
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/trim/-/trim-1.0.1.tgz",
"integrity": "sha512-3JVP2YVqITUisXblCDq/Bi4P9457G/sdEamInkyvCsjbTcXLXIiG7XCb4kGMFWh6JGXesS3TKxOPtrncN/xe8w=="
},
"trim-trailing-lines": {
diff --git a/scripts/source/process-cnp-docs.sh b/scripts/source/process-cnp-docs.sh
index b5df0948b2e..c45b0851126 100755
--- a/scripts/source/process-cnp-docs.sh
+++ b/scripts/source/process-cnp-docs.sh
@@ -11,7 +11,7 @@ SOURCE_CHECKOUT=`cd $1 && pwd`
DESTINATION_CHECKOUT=`cd $2 && pwd`
cd $DESTINATION_CHECKOUT/scripts/fileProcessor
-npm install --production
+npm ci
cd $DESTINATION_CHECKOUT/product_docs/docs/postgres_for_kubernetes/1/
node $DESTINATION_CHECKOUT/scripts/source/files-to-ignore.mjs \
From c546c59fa2e1f446549a1577d831c00566aa7010 Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Wed, 18 Oct 2023 18:15:00 +0000
Subject: [PATCH 3/8] Added release notes
---
.../1/rel_notes/1_18_7_rel_notes.mdx | 57 +++++++++++++++++++
.../1/rel_notes/1_19_5_rel_notes.mdx | 12 ++++
.../1/rel_notes/1_20_3_rel_notes.mdx | 12 ++++
.../1/rel_notes/1_21_0_rel_notes.mdx | 12 ++++
.../1/rel_notes/index.mdx | 4 ++
5 files changed, 97 insertions(+)
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_18_7_rel_notes.mdx
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_19_5_rel_notes.mdx
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_20_3_rel_notes.mdx
create mode 100644 product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_21_0_rel_notes.mdx
diff --git a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_18_7_rel_notes.mdx b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_18_7_rel_notes.mdx
new file mode 100644
index 00000000000..d890387b3da
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_18_7_rel_notes.mdx
@@ -0,0 +1,57 @@
+---
+title: "EDB Postgres for Kubernetes 1.18.7 release notes"
+navTitle: "Version 1.18.7"
+---
+
+Released: 18 Oct 2023
+
+EDB Postgres for Kubernetes version 1.8.7 is an LTS release of EDB Postgres for Kubernetes; there is no corresponding upstream release of CloudNativePG.
+
+## Highlights of EDB Postgres for Kubernetes 1.8.7
+
+- Changed the default value of `stopDelay` to 1800 seconds instead of 30 seconds
+- Introduced a new parameter, called `smartShutdownTimeout`, to control the
+ window of time reserved for the smart shutdown of Postgres to complete; the
+ general formula to compute the overall timeout to stop Postgres is
+ `max(stopDelay - smartShutdownTimeout, 30)`
+- Changed the default value of `startDelay` to 3600, instead of 30 seconds
+- Replaced the livenessProbe initial delay with a more proper Kubernetes
+ startup probe to deal with the start of a Postgres server
+- Changed the default value of `switchoverDelay` to 3600 seconds instead of
+ 40000000 seconds
+
+Additionally, this release of EDB Postgres for Kubernetes includes the following:
+
+| Type | Description |
+| ------------ | ------------------------------------------------------------------------------------------------------------------------------ |
+| Security fix | Added a default `seccompProfile` to the operator deployment. | |
+| Enhancement | Introduced the `k8s.enterprisedb.io/coredumpFilter` annotation to control the content of a core dump generated in the unlikely event of a PostgreSQL crash, by default set to exclude shared memory segments from the dump. |
+| Enhancement | Allowed configuration of ephemeral-storage limits for the shared memory and temporary data ephemeral volumes. |
+| Enhancement | Validation of resource limits and requests through the webhook. |
+| Enhancement | Ensure that PostgreSQL's `shared_buffers` are coherent with the pods' allocated memory resources. |
+| Enhancement | Added `uri` and `jdbc-uri` fields in the credential secrets to facilitate developers when connecting their applications to the database. |
+| Enhancement | Added a new phase, `Waiting for the instances to become active`, for finer control of a cluster's state waiting for the replicas to be ready. |
+| Enhancement | Improved detection of Pod rollout conditions through the `podSpec` annotation. |
+| Enhancement | Added primary timestamp and uptime to the kubectl plugin's `status` command. |
+| Technical enhancement | Replaced `k8s-api-docgen` with `gen-crd-api-reference-docs` to automatically build the API reference documentation. |
+| Bug fix | Ensure that the primary instance is always recreated first by prioritizing ready PVCs with a primary role. |
+| Bug fix | Honor the `k8s.enterprisedb.io/skipEmptyWalArchiveCheck` annotation during recovery to bypass the check for an empty WAL archive. |
+| Bug fix | prevent a cluster from being stuck when the PostgreSQL server is down but the pod is up on the primary. |
+| Bug fix | Avoid treating the designated primary in a replica cluster as a regular HA replica when replication slots are enabled. |
+| Bug fix | Reconcile services every time the selectors change or when labels/annotations need to be changed. |
+| Bug fix | Default to `app` for both the owner and database during recovery bootstrap. |
+| Bug fix | Avoid write-read concurrency on cached cluster. |
+| Bug fix | Remove empty items, make them unique and sort in the `ResourceName` sections of the generated roles. |
+| Bug fix | Ensure that the `ContinuousArchiving` condition is properly set to 'failed' in case of errors. |
+| Bug fix | Reconcile PodMonitor `labels` and `annotations`. |
+| Bug fix | Fixed backup failure due to missing RBAC `resourceNames` on the `Role` object. |
+| Observability | Added TCP port label to default `pg_stat_replication` metric. |
+| Observability | Fixed the `pg_wal_stat` default metric for Prometheus. |
+| Observability | Improved the `pg_replication` default metric for Prometheus |
+| Observability | Used `alertInstanceLabelFilter` instead of `alertName` in the provided Grafana dashboard |
+| Observability | Enforce `standard_conforming_strings` in metric collection. |
+| Change | Set the default operand image to PostgreSQL 16.0. |
+| Change | Fencing now uses PostgreSQL's fast shutdown instead of smart shutdown to halt an instance. |
+| Change | Rename webhooks from kb.io to k8s.enterprisedb.io group. |
+| Change | Added the `k8s.enterprisedb.io/instanceRole` label and deprecated the existing `role` label. |
+
diff --git a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_19_5_rel_notes.mdx b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_19_5_rel_notes.mdx
new file mode 100644
index 00000000000..30879ecdb11
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_19_5_rel_notes.mdx
@@ -0,0 +1,12 @@
+---
+title: "EDB Postgres for Kubernetes 1.19.5 release notes"
+navTitle: "Version 1.19.5"
+---
+
+Released: 18 Oct 2023
+
+This release of EDB Postgres for Kubernetes includes the following:
+
+| Type | Description |
+| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| Upstream merge | Merged with community CloudNativePG 1.19.5. See the community [Release Notes](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/). |
diff --git a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_20_3_rel_notes.mdx b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_20_3_rel_notes.mdx
new file mode 100644
index 00000000000..080f8819a5d
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_20_3_rel_notes.mdx
@@ -0,0 +1,12 @@
+---
+title: "EDB Postgres for Kubernetes 1.20.3 release notes"
+navTitle: "Version 1.20.3"
+---
+
+Released: 18 Oct 2023
+
+This release of EDB Postgres for Kubernetes includes the following:
+
+| Type | Description |
+| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| Upstream merge | Merged with community CloudNativePG 1.20.3. See the community [Release Notes](https://cloudnative-pg.io/documentation/1.20/release_notes/v1.20/). |
diff --git a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_21_0_rel_notes.mdx b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_21_0_rel_notes.mdx
new file mode 100644
index 00000000000..067237c3ffa
--- /dev/null
+++ b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/1_21_0_rel_notes.mdx
@@ -0,0 +1,12 @@
+---
+title: "EDB Postgres for Kubernetes 1.21.0 release notes"
+navTitle: "Version 1.21.0"
+---
+
+Released: 18 Oct 2023
+
+This release of EDB Postgres for Kubernetes includes the following:
+
+| Type | Description |
+| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| Upstream merge | Merged with community CloudNativePG 1.21.0. See the community [Release Notes](https://cloudnative-pg.io/documentation/1.21/release_notes/v1.21/). |
diff --git a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx
index 0dc85308acc..47ee0ff15ff 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx
@@ -4,14 +4,18 @@ navTitle: "Release notes"
redirects:
- ../release_notes
navigation:
+- 1_21_0_rel_notes
+- 1_20_3_rel_notes
- 1_20_2_rel_notes
- 1_20_1_rel_notes
- 1_20_0_rel_notes
+- 1_19_5_rel_notes
- 1_19_4_rel_notes
- 1_19_3_rel_notes
- 1_19_2_rel_notes
- 1_19_1_rel_notes
- 1_19_0_rel_notes
+- 1_18_7_rel_notes
- 1_18_6_rel_notes
- 1_18_5_rel_notes
- 1_18_4_rel_notes
From 0eeaa216cd08f2cfb5efd9fae2d51dd4cb627434 Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Wed, 18 Oct 2023 19:15:50 +0000
Subject: [PATCH 4/8] Link fixes and updates to the transformation scripts to
automate this in the future
---
.../1/backup_barmanobjectstore.mdx | 2 +-
.../1/before_you_start.mdx | 4 +-
.../1/cloudnative-pg.v1.mdx | 352 +++++++++++++-----
.../1/installation_upgrade.mdx | 4 +-
.../1/interactive_demo.mdx | 4 +-
.../postgres_for_kubernetes/1/monitoring.mdx | 2 +-
.../1/wal_archiving.mdx | 2 +-
.../cnp/rewrite-mdextra-anchors.mjs | 65 ++++
.../processors/cnp/update-links.mjs | 2 +-
.../processors/cnp/update-yaml-links.mjs | 3 +-
scripts/source/process-cnp-docs.sh | 2 +
11 files changed, 342 insertions(+), 100 deletions(-)
create mode 100644 scripts/fileProcessor/processors/cnp/rewrite-mdextra-anchors.mjs
diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx
index db1a990242a..f56fdff9f6c 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx
@@ -32,7 +32,7 @@ A backup is performed from a primary or a designated primary instance in a
`Cluster` (please refer to
[replica clusters](replica_cluster.md)
for more information about designated primary instances), or alternatively
-on a [standby](#backup-from-a-standby).
+on a [standby](backup/#backup-from-a-standby).
## Common object stores
diff --git a/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx b/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx
index d67cd01cddd..bb6e683175b 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx
@@ -75,8 +75,8 @@ specific to Kubernetes and PostgreSQL.
[`kubectl`](https://kubernetes.io/docs/reference/kubectl/overview/)
: `kubectl` is the command-line tool used to manage a Kubernetes cluster.
-EDB Postgres for Kubernetes requires a Kubernetes version supported by the community. Please refer to the
-["Supported releases"](supported_releases.md) page for details.
+EDB Postgres for Kubernetes requires a Kubernetes version supported by EDB. Please refer to the
+["Platform Compatibility"](https://www.enterprisedb.com/resources/platform-compatibility#pgk8s) page for details.
## PostgreSQL terminology
diff --git a/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx b/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx
index b04fc1dd9b4..c0d0e581954 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx
@@ -12,7 +12,9 @@ originalFilePath: 'src/cloudnative-pg.v1.md'
- [Pooler](#postgresql-k8s-enterprisedb-io-v1-Pooler)
- [ScheduledBackup](#postgresql-k8s-enterprisedb-io-v1-ScheduledBackup)
-## Backup {#postgresql-k8s-enterprisedb-io-v1-Backup}
+
+
+## Backup
Backup is the Schema for the backups API
@@ -47,7 +49,9 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
-## Cluster {#postgresql-k8s-enterprisedb-io-v1-Cluster}
+
+
+## Cluster
Cluster is the Schema for the PostgreSQL API
@@ -82,7 +86,9 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
-## Pooler {#postgresql-k8s-enterprisedb-io-v1-Pooler}
+
+
+## Pooler
Pooler is the Schema for the poolers API
@@ -117,7 +123,9 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
-## ScheduledBackup {#postgresql-k8s-enterprisedb-io-v1-ScheduledBackup}
+
+
+## ScheduledBackup
ScheduledBackup is the Schema for the scheduledbackups API
@@ -152,7 +160,9 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
-## AffinityConfiguration {#postgresql-k8s-enterprisedb-io-v1-AffinityConfiguration}
+
+
+## AffinityConfiguration
**Appears in:**
@@ -236,7 +246,9 @@ by the operator if EnablePodAntiAffinity is set to true (default) or to be used
-## AzureCredentials {#postgresql-k8s-enterprisedb-io-v1-AzureCredentials}
+
+
+## AzureCredentials
**Appears in:**
@@ -301,7 +313,9 @@ the storage account name
-## BackupConfiguration {#postgresql-k8s-enterprisedb-io-v1-BackupConfiguration}
+
+
+## BackupConfiguration
**Appears in:**
@@ -353,7 +367,9 @@ to have backups run preferably on the most updated standby, if available.
-## BackupMethod {#postgresql-k8s-enterprisedb-io-v1-BackupMethod}
+
+
+## BackupMethod
(Alias of `string`)
@@ -368,7 +384,9 @@ to have backups run preferably on the most updated standby, if available.
BackupMethod defines the way of executing the physical base backups of
the selected PostgreSQL instance
-## BackupPhase {#postgresql-k8s-enterprisedb-io-v1-BackupPhase}
+
+
+## BackupPhase
(Alias of `string`)
@@ -378,7 +396,9 @@ the selected PostgreSQL instance
BackupPhase is the phase of the backup
-## BackupSnapshotElementStatus {#postgresql-k8s-enterprisedb-io-v1-BackupSnapshotElementStatus}
+
+
+## BackupSnapshotElementStatus
**Appears in:**
@@ -406,7 +426,9 @@ the selected PostgreSQL instance
-## BackupSnapshotStatus {#postgresql-k8s-enterprisedb-io-v1-BackupSnapshotStatus}
+
+
+## BackupSnapshotStatus
**Appears in:**
@@ -427,7 +449,9 @@ the selected PostgreSQL instance
-## BackupSource {#postgresql-k8s-enterprisedb-io-v1-BackupSource}
+
+
+## BackupSource
**Appears in:**
@@ -457,7 +481,9 @@ errors with certificate issuer and barman-cloud-wal-archive.
-## BackupSpec {#postgresql-k8s-enterprisedb-io-v1-BackupSpec}
+
+
+## BackupSpec
**Appears in:**
@@ -498,7 +524,9 @@ and volumeSnapshot
. Defaults to: barmanObjectStore
.
-## BackupStatus {#postgresql-k8s-enterprisedb-io-v1-BackupStatus}
+
+
+## BackupStatus
**Appears in:**
@@ -665,7 +693,9 @@ parameter is omitted
-## BackupTarget {#postgresql-k8s-enterprisedb-io-v1-BackupTarget}
+
+
+## BackupTarget
(Alias of `string`)
@@ -679,7 +709,9 @@ parameter is omitted
BackupTarget describes the preferred targets for a backup
-## BarmanCredentials {#postgresql-k8s-enterprisedb-io-v1-BarmanCredentials}
+
+
+## BarmanCredentials
**Appears in:**
@@ -716,7 +748,9 @@ parameter is omitted
-## BarmanObjectStoreConfiguration {#postgresql-k8s-enterprisedb-io-v1-BarmanObjectStoreConfiguration}
+
+
+## BarmanObjectStoreConfiguration
**Appears in:**
@@ -809,7 +843,9 @@ Barman --history-tags option.
-## BootstrapConfiguration {#postgresql-k8s-enterprisedb-io-v1-BootstrapConfiguration}
+
+
+## BootstrapConfiguration
**Appears in:**
@@ -849,7 +885,9 @@ PostgreSQL instance
-## BootstrapInitDB {#postgresql-k8s-enterprisedb-io-v1-BootstrapInitDB}
+
+
+## BootstrapInitDB
**Appears in:**
@@ -990,7 +1028,9 @@ the implementation order is same as the order of each array
-## BootstrapPgBaseBackup {#postgresql-k8s-enterprisedb-io-v1-BootstrapPgBaseBackup}
+
+
+## BootstrapPgBaseBackup
**Appears in:**
@@ -1036,7 +1076,9 @@ created from scratch
-## BootstrapRecovery {#postgresql-k8s-enterprisedb-io-v1-BootstrapRecovery}
+
+
+## BootstrapRecovery
**Appears in:**
@@ -1129,7 +1171,9 @@ created from scratch
-## CertificatesConfiguration {#postgresql-k8s-enterprisedb-io-v1-CertificatesConfiguration}
+
+
+## CertificatesConfiguration
**Appears in:**
@@ -1206,7 +1250,9 @@ this can be omitted.
-## CertificatesStatus {#postgresql-k8s-enterprisedb-io-v1-CertificatesStatus}
+
+
+## CertificatesStatus
**Appears in:**
@@ -1234,7 +1280,9 @@ this can be omitted.
-## ClusterSpec {#postgresql-k8s-enterprisedb-io-v1-ClusterSpec}
+
+
+## ClusterSpec
**Appears in:**
@@ -1612,7 +1660,9 @@ Defaults to: RuntimeDefault
-## ClusterStatus {#postgresql-k8s-enterprisedb-io-v1-ClusterStatus}
+
+
+## ClusterStatus
**Appears in:**
@@ -1901,7 +1951,9 @@ This field is reported when spec.failoverDelay is populated or during online upg
-## CompressionType {#postgresql-k8s-enterprisedb-io-v1-CompressionType}
+
+
+## CompressionType
(Alias of `string`)
@@ -1913,7 +1965,9 @@ This field is reported when spec.failoverDelay is populated or during online upg
CompressionType encapsulates the available types of compression
-## ConfigMapKeySelector {#postgresql-k8s-enterprisedb-io-v1-ConfigMapKeySelector}
+
+
+## ConfigMapKeySelector
**Appears in:**
@@ -1944,7 +1998,9 @@ the key of a ConfigMap
-## ConfigMapResourceVersion {#postgresql-k8s-enterprisedb-io-v1-ConfigMapResourceVersion}
+
+
+## ConfigMapResourceVersion
**Appears in:**
@@ -1967,7 +2023,9 @@ Map keys are the config map names, map values are the versions
-## DataBackupConfiguration {#postgresql-k8s-enterprisedb-io-v1-DataBackupConfiguration}
+
+
+## DataBackupConfiguration
**Appears in:**
@@ -2020,7 +2078,9 @@ possible. false
by default.
-## DataSource {#postgresql-k8s-enterprisedb-io-v1-DataSource}
+
+
+## DataSource
**Appears in:**
@@ -2049,7 +2109,9 @@ PostgreSQL cluster from an existing storage
-## EPASConfiguration {#postgresql-k8s-enterprisedb-io-v1-EPASConfiguration}
+
+
+## EPASConfiguration
**Appears in:**
@@ -2077,7 +2139,9 @@ PostgreSQL cluster from an existing storage
-## EmbeddedObjectMetadata {#postgresql-k8s-enterprisedb-io-v1-EmbeddedObjectMetadata}
+
+
+## EmbeddedObjectMetadata
**Appears in:**
@@ -2103,7 +2167,9 @@ PostgreSQL cluster from an existing storage
-## EncryptionType {#postgresql-k8s-enterprisedb-io-v1-EncryptionType}
+
+
+## EncryptionType
(Alias of `string`)
@@ -2115,7 +2181,9 @@ PostgreSQL cluster from an existing storage
EncryptionType encapsulated the available types of encryption
-## EnsureOption {#postgresql-k8s-enterprisedb-io-v1-EnsureOption}
+
+
+## EnsureOption
(Alias of `string`)
@@ -2126,7 +2194,9 @@ PostgreSQL cluster from an existing storage
EnsureOption represents whether we should enforce the presence or absence of
a Role in a PostgreSQL instance
-## EphemeralVolumesSizeLimitConfiguration {#postgresql-k8s-enterprisedb-io-v1-EphemeralVolumesSizeLimitConfiguration}
+
+
+## EphemeralVolumesSizeLimitConfiguration
**Appears in:**
@@ -2155,7 +2225,9 @@ storage
-## ExternalCluster {#postgresql-k8s-enterprisedb-io-v1-ExternalCluster}
+
+
+## ExternalCluster
**Appears in:**
@@ -2222,7 +2294,9 @@ instance
-## GoogleCredentials {#postgresql-k8s-enterprisedb-io-v1-GoogleCredentials}
+
+
+## GoogleCredentials
**Appears in:**
@@ -2252,7 +2326,9 @@ default to false.
-## Import {#postgresql-k8s-enterprisedb-io-v1-Import}
+
+
+## Import
**Appears in:**
@@ -2311,7 +2387,9 @@ database right after is imported - to be used with extreme care
-## ImportSource {#postgresql-k8s-enterprisedb-io-v1-ImportSource}
+
+
+## ImportSource
**Appears in:**
@@ -2332,7 +2410,9 @@ database right after is imported - to be used with extreme care
-## InstanceID {#postgresql-k8s-enterprisedb-io-v1-InstanceID}
+
+
+## InstanceID
**Appears in:**
@@ -2360,7 +2440,9 @@ database right after is imported - to be used with extreme care
-## InstanceReportedState {#postgresql-k8s-enterprisedb-io-v1-InstanceReportedState}
+
+
+## InstanceReportedState
**Appears in:**
@@ -2388,7 +2470,9 @@ database right after is imported - to be used with extreme care
-## LDAPBindAsAuth {#postgresql-k8s-enterprisedb-io-v1-LDAPBindAsAuth}
+
+
+## LDAPBindAsAuth
**Appears in:**
@@ -2417,7 +2501,9 @@ bind authentication for LDAP
-## LDAPBindSearchAuth {#postgresql-k8s-enterprisedb-io-v1-LDAPBindSearchAuth}
+
+
+## LDAPBindSearchAuth
**Appears in:**
@@ -2467,7 +2553,9 @@ the bind+search LDAP authentication process
-## LDAPConfig {#postgresql-k8s-enterprisedb-io-v1-LDAPConfig}
+
+
+## LDAPConfig
**Appears in:**
@@ -2523,7 +2611,9 @@ the bind+search LDAP authentication process
-## LDAPScheme {#postgresql-k8s-enterprisedb-io-v1-LDAPScheme}
+
+
+## LDAPScheme
(Alias of `string`)
@@ -2533,7 +2623,9 @@ the bind+search LDAP authentication process
LDAPScheme defines the possible schemes for LDAP
-## LocalObjectReference {#postgresql-k8s-enterprisedb-io-v1-LocalObjectReference}
+
+
+## LocalObjectReference
**Appears in:**
@@ -2577,7 +2669,9 @@ local object with a known type inside the same namespace
-## ManagedConfiguration {#postgresql-k8s-enterprisedb-io-v1-ManagedConfiguration}
+
+
+## ManagedConfiguration
**Appears in:**
@@ -2599,7 +2693,9 @@ by the instance manager
-## ManagedRoles {#postgresql-k8s-enterprisedb-io-v1-ManagedRoles}
+
+
+## ManagedRoles
**Appears in:**
@@ -2635,7 +2731,9 @@ with an explanation of the cause
-## Metadata {#postgresql-k8s-enterprisedb-io-v1-Metadata}
+
+
+## Metadata
**Appears in:**
@@ -2674,7 +2772,9 @@ More info: http://kubernetes.io/docs/user-guide/annotations
-## MonitoringConfiguration {#postgresql-k8s-enterprisedb-io-v1-MonitoringConfiguration}
+
+
+## MonitoringConfiguration
**Appears in:**
@@ -2719,7 +2819,9 @@ Default: false.
-## NodeMaintenanceWindow {#postgresql-k8s-enterprisedb-io-v1-NodeMaintenanceWindow}
+
+
+## NodeMaintenanceWindow
**Appears in:**
@@ -2751,7 +2853,9 @@ up again) or not (recreate it elsewhere - when instances
>1)
-## PasswordState {#postgresql-k8s-enterprisedb-io-v1-PasswordState}
+
+
+## PasswordState
**Appears in:**
@@ -2779,7 +2883,9 @@ up again) or not (recreate it elsewhere - when instances
>1)
-## PgBouncerIntegrationStatus {#postgresql-k8s-enterprisedb-io-v1-PgBouncerIntegrationStatus}
+
+
+## PgBouncerIntegrationStatus
**Appears in:**
@@ -2799,7 +2905,9 @@ up again) or not (recreate it elsewhere - when instances
>1)
-## PgBouncerPoolMode {#postgresql-k8s-enterprisedb-io-v1-PgBouncerPoolMode}
+
+
+## PgBouncerPoolMode
(Alias of `string`)
@@ -2809,7 +2917,9 @@ up again) or not (recreate it elsewhere - when instances
>1)
PgBouncerPoolMode is the mode of PgBouncer
-## PgBouncerSecrets {#postgresql-k8s-enterprisedb-io-v1-PgBouncerSecrets}
+
+
+## PgBouncerSecrets
**Appears in:**
@@ -2831,7 +2941,9 @@ by pgbouncer
-## PgBouncerSpec {#postgresql-k8s-enterprisedb-io-v1-PgBouncerSpec}
+
+
+## PgBouncerSpec
**Appears in:**
@@ -2898,7 +3010,9 @@ the operator calls PgBouncer's PAUSE
and RESUME
comman
-## PodTemplateSpec {#postgresql-k8s-enterprisedb-io-v1-PodTemplateSpec}
+
+
+## PodTemplateSpec
**Appears in:**
@@ -2936,7 +3050,9 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
-## PodTopologyLabels {#postgresql-k8s-enterprisedb-io-v1-PodTopologyLabels}
+
+
+## PodTopologyLabels
(Alias of `map[string]string`)
@@ -2946,7 +3062,9 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
PodTopologyLabels represent the topology of a Pod. map[labelName]labelValue
-## PoolerIntegrations {#postgresql-k8s-enterprisedb-io-v1-PoolerIntegrations}
+
+
+## PoolerIntegrations
**Appears in:**
@@ -2966,7 +3084,9 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
-## PoolerMonitoringConfiguration {#postgresql-k8s-enterprisedb-io-v1-PoolerMonitoringConfiguration}
+
+
+## PoolerMonitoringConfiguration
**Appears in:**
@@ -2990,7 +3110,9 @@ part for now.
-## PoolerSecrets {#postgresql-k8s-enterprisedb-io-v1-PoolerSecrets}
+
+
+## PoolerSecrets
**Appears in:**
@@ -3032,7 +3154,9 @@ part for now.
-## PoolerSpec {#postgresql-k8s-enterprisedb-io-v1-PoolerSpec}
+
+
+## PoolerSpec
**Appears in:**
@@ -3096,7 +3220,9 @@ Pooler name should never match with any cluster name within the same namespace.<
-## PoolerStatus {#postgresql-k8s-enterprisedb-io-v1-PoolerStatus}
+
+
+## PoolerStatus
**Appears in:**
@@ -3124,7 +3250,9 @@ Pooler name should never match with any cluster name within the same namespace.<
-## PoolerType {#postgresql-k8s-enterprisedb-io-v1-PoolerType}
+
+
+## PoolerType
(Alias of `string`)
@@ -3135,7 +3263,9 @@ Pooler name should never match with any cluster name within the same namespace.<
PoolerType is the type of the connection pool, meaning the service
we are targeting. Allowed values are rw
and ro
.
-## PostInitApplicationSQLRefs {#postgresql-k8s-enterprisedb-io-v1-PostInitApplicationSQLRefs}
+
+
+## PostInitApplicationSQLRefs
**Appears in:**
@@ -3166,7 +3296,9 @@ the implementation order is same as the order of each array
-## PostgresConfiguration {#postgresql-k8s-enterprisedb-io-v1-PostgresConfiguration}
+
+
+## PostgresConfiguration
**Appears in:**
@@ -3233,7 +3365,9 @@ big enough to simulate an infinite timeout
-## PrimaryUpdateMethod {#postgresql-k8s-enterprisedb-io-v1-PrimaryUpdateMethod}
+
+
+## PrimaryUpdateMethod
(Alias of `string`)
@@ -3244,7 +3378,9 @@ big enough to simulate an infinite timeout
PrimaryUpdateMethod contains the method to use when upgrading
the primary server of the cluster as part of rolling updates
-## PrimaryUpdateStrategy {#postgresql-k8s-enterprisedb-io-v1-PrimaryUpdateStrategy}
+
+
+## PrimaryUpdateStrategy
(Alias of `string`)
@@ -3255,7 +3391,9 @@ the primary server of the cluster as part of rolling updates
PrimaryUpdateStrategy contains the strategy to follow when upgrading
the primary server of the cluster as part of rolling updates
-## RecoveryTarget {#postgresql-k8s-enterprisedb-io-v1-RecoveryTarget}
+
+
+## RecoveryTarget
**Appears in:**
@@ -3331,7 +3469,9 @@ in Postgres, recovery_target_inclusive
will be true
-## ReplicaClusterConfiguration {#postgresql-k8s-enterprisedb-io-v1-ReplicaClusterConfiguration}
+
+
+## ReplicaClusterConfiguration
**Appears in:**
@@ -3363,7 +3503,9 @@ Refer to the Replica clusters page of the documentation for more information.
-## ReplicationSlotsConfiguration {#postgresql-k8s-enterprisedb-io-v1-ReplicationSlotsConfiguration}
+
+
+## ReplicationSlotsConfiguration
**Appears in:**
@@ -3393,7 +3535,9 @@ every updateInterval
seconds (default 30).
-## ReplicationSlotsHAConfiguration {#postgresql-k8s-enterprisedb-io-v1-ReplicationSlotsHAConfiguration}
+
+
+## ReplicationSlotsHAConfiguration
**Appears in:**
@@ -3436,7 +3580,9 @@ This can only be set at creation time. By default set to _cnp_
.
-## RoleConfiguration {#postgresql-k8s-enterprisedb-io-v1-RoleConfiguration}
+
+
+## RoleConfiguration
**Appears in:**
@@ -3581,7 +3727,9 @@ Default is false
.
-## S3Credentials {#postgresql-k8s-enterprisedb-io-v1-S3Credentials}
+
+
+## S3Credentials
**Appears in:**
@@ -3639,7 +3787,9 @@ files to S3. It can be provided in two alternative ways:
-## ScheduledBackupSpec {#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupSpec}
+
+
+## ScheduledBackupSpec
**Appears in:**
@@ -3715,7 +3865,9 @@ and volumeSnapshot
. Defaults to: barmanObjectStore
.
-## ScheduledBackupStatus {#postgresql-k8s-enterprisedb-io-v1-ScheduledBackupStatus}
+
+
+## ScheduledBackupStatus
**Appears in:**
@@ -3750,7 +3902,9 @@ and volumeSnapshot
. Defaults to: barmanObjectStore
.
-## SecretKeySelector {#postgresql-k8s-enterprisedb-io-v1-SecretKeySelector}
+
+
+## SecretKeySelector
**Appears in:**
@@ -3793,7 +3947,9 @@ the key of a Secret
-## SecretVersion {#postgresql-k8s-enterprisedb-io-v1-SecretVersion}
+
+
+## SecretVersion
**Appears in:**
@@ -3823,7 +3979,9 @@ the key of a Secret
-## SecretsResourceVersion {#postgresql-k8s-enterprisedb-io-v1-SecretsResourceVersion}
+
+
+## SecretsResourceVersion
**Appears in:**
@@ -3909,7 +4067,9 @@ Map keys are the secret names, map values are the versions
-## ServiceAccountTemplate {#postgresql-k8s-enterprisedb-io-v1-ServiceAccountTemplate}
+
+
+## ServiceAccountTemplate
**Appears in:**
@@ -3931,7 +4091,9 @@ service account
-## SnapshotOwnerReference {#postgresql-k8s-enterprisedb-io-v1-SnapshotOwnerReference}
+
+
+## SnapshotOwnerReference
(Alias of `string`)
@@ -3942,7 +4104,9 @@ service account
SnapshotOwnerReference defines the reference type for the owner of the snapshot.
This specifies which owner the processed resources should relate to.
-## SnapshotType {#postgresql-k8s-enterprisedb-io-v1-SnapshotType}
+
+
+## SnapshotType
(Alias of `string`)
@@ -3952,7 +4116,9 @@ This specifies which owner the processed resources should relate to.
SnapshotType is a type of allowed import
-## StorageConfiguration {#postgresql-k8s-enterprisedb-io-v1-StorageConfiguration}
+
+
+## StorageConfiguration
**Appears in:**
@@ -3999,7 +4165,9 @@ Size cannot be decreased.
-## SyncReplicaElectionConstraints {#postgresql-k8s-enterprisedb-io-v1-SyncReplicaElectionConstraints}
+
+
+## SyncReplicaElectionConstraints
**Appears in:**
@@ -4030,7 +4198,9 @@ if all the labels values match.
-## TDEConfiguration {#postgresql-k8s-enterprisedb-io-v1-TDEConfiguration}
+
+
+## TDEConfiguration
**Appears in:**
@@ -4080,7 +4250,9 @@ passed to the OpenSSL command to encrypt and decrypt
-## Topology {#postgresql-k8s-enterprisedb-io-v1-Topology}
+
+
+## Topology
**Appears in:**
@@ -4120,7 +4292,9 @@ in synchronous replica election in case of failures
-## VolumeSnapshotConfiguration {#postgresql-k8s-enterprisedb-io-v1-VolumeSnapshotConfiguration}
+
+
+## VolumeSnapshotConfiguration
**Appears in:**
@@ -4170,7 +4344,9 @@ It is the default class for the other types if no specific class is present
-## WalBackupConfiguration {#postgresql-k8s-enterprisedb-io-v1-WalBackupConfiguration}
+
+
+## WalBackupConfiguration
**Appears in:**
diff --git a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx
index 1374e5c1ef5..515377e9248 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx
@@ -155,7 +155,7 @@ plane for self-managed Kubernetes installations).
## Upgrades
!!! Important
- Please carefully read the [release notes](release_notes.md)
+ Please carefully read the [release notes](rel_notes)
before performing an upgrade as some versions might require
extra steps.
@@ -242,7 +242,7 @@ least monthly. If you are unable to apply updates as each version becomes
available, we recommend upgrading through each version in sequential order to
come current periodically and not skipping versions.
-The [release notes](release_notes.md) page contains a detailed list of the
+The [release notes](rel_notes) page contains a detailed list of the
changes introduced in every released version of EDB Postgres for Kubernetes,
and it must be read before upgrading to a newer version of the software.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/interactive_demo.mdx b/product_docs/docs/postgres_for_kubernetes/1/interactive_demo.mdx
index 31ebaa7366d..f2f31679ba6 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/interactive_demo.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/interactive_demo.mdx
@@ -136,7 +136,7 @@ EOF
!!! Note "There's more"
For more detailed information about the available options, please refer
- to the ["API Reference" section](api_reference.md).
+ to the ["API Reference" section](cloudnative-pg.v1.md).
In order to create the 3-node PostgreSQL cluster, you need to run the following command:
@@ -358,7 +358,7 @@ status:
By default, the operator will install the latest available minor version
of the latest major version of PostgreSQL when the operator was released.
You can override this by setting [the `imageName` key in the `spec` section of
- the `Cluster` definition](api_reference/#clusterspec).
+ the `Cluster` definition](cloudnative-pg.v1/#clusterspec).
!!! Important
The immutable infrastructure paradigm requires that you always
diff --git a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx
index 80daa8fc175..2229b81fe41 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx
@@ -442,7 +442,7 @@ data:
```
A list of basic monitoring queries can be found in the
-[`default-monitoring.yaml` file](default-monitoring.yaml)
+[`default-monitoring.yaml` file](../default-monitoring.yaml)
that is already installed in your EDB Postgres for Kubernetes deployment (see ["Default set of metrics"](#default-set-of-metrics)).
#### Example of a user defined metric running on multiple databases
diff --git a/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx b/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx
index c3de7ae5f3a..92feb23166c 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx
@@ -16,7 +16,7 @@ the ["Backup on object stores" section](backup_barmanobjectstore.md) to set up
the WAL archive.
!!! Info
- Please refer to [`BarmanObjectStoreConfiguration`](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-barmanobjectstoreconfiguration)
+ Please refer to [`BarmanObjectStoreConfiguration`](cloudnative-pg.v1.md#postgresql-k8s-enterprisedb-io-v1-BarmanObjectStoreConfiguration)
in the API reference for a full list of options.
If required, you can choose to compress WAL files as soon as they
diff --git a/scripts/fileProcessor/processors/cnp/rewrite-mdextra-anchors.mjs b/scripts/fileProcessor/processors/cnp/rewrite-mdextra-anchors.mjs
new file mode 100644
index 00000000000..c88a709a18c
--- /dev/null
+++ b/scripts/fileProcessor/processors/cnp/rewrite-mdextra-anchors.mjs
@@ -0,0 +1,65 @@
+// Rewrites MDExtra headings with embedded fragment identifiers, e.g.
+// ## A heading {#with-an-identifier-not-based-on-title}
+// into something slightly more compatible with GFM / MDX:
+//
+// ## A heading
+
+import toVFile from "to-vfile";
+import remarkParse from "remark-parse";
+import mdx from "remark-mdx";
+import unified from "unified";
+import remarkFrontmatter from "remark-frontmatter";
+import remarkStringify from "remark-stringify";
+import admonitions from "remark-admonitions";
+import visit from "unist-util-visit";
+import mdast2string from "mdast-util-to-string";
+
+export const process = async (filename, content) => {
+ const processor = unified()
+ .use(remarkParse)
+ .use(remarkStringify, { emphasis: "*", bullet: "-", fences: true })
+ .use(admonitions, {
+ tag: "!!!",
+ icons: "none",
+ infima: true,
+ customTypes: {
+ seealso: "note",
+ hint: "tip",
+ interactive: "interactive",
+ },
+ })
+ .use(remarkFrontmatter)
+ .use(mdx)
+ .use(headingRewriter);
+
+ const output = await processor.process(
+ toVFile({ path: filename, contents: content }),
+ );
+
+ return {
+ newFilename: filename,
+ newContent: output.contents.toString(),
+ };
+};
+
+function headingRewriter() {
+ const anchorRE = /{#([^}]+)}/;
+ return (tree) => {
+ // link rewriter:
+ // - update links to supported_releases.md to point to /resources/platform-compatibility#pgk8s
+ visit(tree, "heading", (node, index, parent) => {
+ let text = mdast2string(node);
+ let anchor = text.match(anchorRE);
+ if (!anchor) return;
+
+ // remove the anchor syntax from this heading
+ text = text.replace(anchorRE, "");
+ node.children = [{ type: "text", value: text }];
+
+ // ...and insert it as an HTML (JSX) literal
+ anchor = { type: "jsx", value: `` };
+ parent.children.splice(index, 0, anchor);
+ });
+ };
+}
+
diff --git a/scripts/fileProcessor/processors/cnp/update-links.mjs b/scripts/fileProcessor/processors/cnp/update-links.mjs
index 17c8fb3455c..bb26ae5161a 100644
--- a/scripts/fileProcessor/processors/cnp/update-links.mjs
+++ b/scripts/fileProcessor/processors/cnp/update-links.mjs
@@ -43,7 +43,7 @@ function linkRewriter() {
visit(tree, "link", (node) => {
if (node.url === "supported_releases.md")
node.url = "/resources/platform-compatibility#pgk8s";
- else if (node.url === "release_nodes.md")
+ else if (node.url === "release_notes.md")
node.url = "rel_notes";
});
};
diff --git a/scripts/fileProcessor/processors/cnp/update-yaml-links.mjs b/scripts/fileProcessor/processors/cnp/update-yaml-links.mjs
index f307fb71989..582513f4d08 100644
--- a/scripts/fileProcessor/processors/cnp/update-yaml-links.mjs
+++ b/scripts/fileProcessor/processors/cnp/update-yaml-links.mjs
@@ -39,12 +39,11 @@ export const process = async (filename, content) => {
function linkRewriter() {
return (tree) => {
// link rewriter:
- // - only links to .yaml files in samples dir
// - make relative to parent (because gatsby URL paths are always directories)
visit(tree, "link", (node) => {
if (isAbsoluteUrl(node.url) || node.url[0] === "/") return;
if (!node.url.includes(".yaml")) return;
- node.url = node.url.replace(/^(?:\.\/)?samples\//, "../samples/");
+ node.url = node.url.replace(/^\/?/, "../");
});
};
}
diff --git a/scripts/source/process-cnp-docs.sh b/scripts/source/process-cnp-docs.sh
index c45b0851126..4bb2c7ca99e 100755
--- a/scripts/source/process-cnp-docs.sh
+++ b/scripts/source/process-cnp-docs.sh
@@ -30,7 +30,9 @@ node $DESTINATION_CHECKOUT/scripts/fileProcessor/main.mjs \
node $DESTINATION_CHECKOUT/scripts/fileProcessor/main.mjs \
-f "src/**/*.md" \
-p "cnp/replace-github-urls" \
+ -p "cnp/update-links" \
-p "cnp/update-yaml-links" \
+ -p "cnp/rewrite-mdextra-anchors" \
-p "cnp/add-frontmatters" \
-p "cnp/rename-to-mdx"
From c128b8827d3807729c0e707820a0f2ad209cf33b Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Wed, 18 Oct 2023 19:40:37 +0000
Subject: [PATCH 5/8] Strip HTML comments - they cause issues with JSX in some
situations
---
.../docs/postgres_for_kubernetes/1/backup.mdx | 2 +-
.../1/backup_volumesnapshot.mdx | 2 +-
.../1/cloudnative-pg.v1.mdx | 208 +++++++++---------
.../docs/postgres_for_kubernetes/1/faq.mdx | 108 +--------
.../postgres_for_kubernetes/1/logging.mdx | 2 +-
.../postgres_for_kubernetes/1/monitoring.mdx | 2 +-
.../postgres_for_kubernetes/1/quickstart.mdx | 5 +-
.../postgres_for_kubernetes/1/recovery.mdx | 4 +-
.../postgres_for_kubernetes/1/scheduling.mdx | 2 +-
.../postgres_for_kubernetes/1/storage.mdx | 2 +-
.../processors/cnp/strip-html-comments.mjs | 51 +++++
scripts/source/process-cnp-docs.sh | 1 +
12 files changed, 168 insertions(+), 221 deletions(-)
create mode 100644 scripts/fileProcessor/processors/cnp/strip-html-comments.mjs
diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup.mdx
index 0df10cb2fe3..0a4bf30fc3a 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/backup.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/backup.mdx
@@ -314,7 +314,7 @@ Events:
## Backup from a standby
-
+
Taking a base backup requires to scrape the whole data content of the
PostgreSQL instance on disk, possibly resulting in I/O contention with the
diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx
index 69e31328380..5540145a441 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx
@@ -174,7 +174,7 @@ volume snapshot class.
!!! Important
If you are interested in testing the example, please read
- ["Volume Snapshots" for the Amazon Elastic Block Store (EBS) CSI driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/tree/master/examples/kubernetes/snapshot)
+ ["Volume Snapshots" for the Amazon Elastic Block Store (EBS) CSI driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/tree/master/examples/kubernetes/snapshot)
for detailed instructions on the installation process for the storage class and the snapshot class.
The following manifest creates a `Cluster` that is ready to be used for volume
diff --git a/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx b/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx
index c0d0e581954..4f568ff413b 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx
@@ -14,7 +14,7 @@ originalFilePath: 'src/cloudnative-pg.v1.md'
-## Backup
+## Backup
Backup is the Schema for the backups API
@@ -51,7 +51,7 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
-## Cluster
+## Cluster
Cluster is the Schema for the PostgreSQL API
@@ -88,7 +88,7 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
-## Pooler
+## Pooler
Pooler is the Schema for the poolers API
@@ -125,7 +125,7 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
-## ScheduledBackup
+## ScheduledBackup
ScheduledBackup is the Schema for the scheduledbackups API
@@ -162,7 +162,7 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
-## AffinityConfiguration
+## AffinityConfiguration
**Appears in:**
@@ -248,7 +248,7 @@ by the operator if EnablePodAntiAffinity is set to true (default) or to be used
-## AzureCredentials
+## AzureCredentials
**Appears in:**
@@ -315,7 +315,7 @@ the storage account name
-## BackupConfiguration
+## BackupConfiguration
**Appears in:**
@@ -369,7 +369,7 @@ to have backups run preferably on the most updated standby, if available.
-## BackupMethod
+## BackupMethod
(Alias of `string`)
@@ -386,7 +386,7 @@ the selected PostgreSQL instance
-## BackupPhase
+## BackupPhase
(Alias of `string`)
@@ -398,7 +398,7 @@ the selected PostgreSQL instance
-## BackupSnapshotElementStatus
+## BackupSnapshotElementStatus
**Appears in:**
@@ -428,7 +428,7 @@ the selected PostgreSQL instance
-## BackupSnapshotStatus
+## BackupSnapshotStatus
**Appears in:**
@@ -451,7 +451,7 @@ the selected PostgreSQL instance
-## BackupSource
+## BackupSource
**Appears in:**
@@ -483,7 +483,7 @@ errors with certificate issuer and barman-cloud-wal-archive.
-## BackupSpec
+## BackupSpec
**Appears in:**
@@ -526,7 +526,7 @@ and volumeSnapshot
. Defaults to: barmanObjectStore
.
-## BackupStatus
+## BackupStatus
**Appears in:**
@@ -695,7 +695,7 @@ parameter is omitted
-## BackupTarget
+## BackupTarget
(Alias of `string`)
@@ -711,7 +711,7 @@ parameter is omitted
-## BarmanCredentials
+## BarmanCredentials
**Appears in:**
@@ -750,7 +750,7 @@ parameter is omitted
-## BarmanObjectStoreConfiguration
+## BarmanObjectStoreConfiguration
**Appears in:**
@@ -845,7 +845,7 @@ Barman --history-tags option.
-## BootstrapConfiguration
+## BootstrapConfiguration
**Appears in:**
@@ -887,7 +887,7 @@ PostgreSQL instance
-## BootstrapInitDB
+## BootstrapInitDB
**Appears in:**
@@ -1030,7 +1030,7 @@ the implementation order is same as the order of each array
-## BootstrapPgBaseBackup
+## BootstrapPgBaseBackup
**Appears in:**
@@ -1078,7 +1078,7 @@ created from scratch
-## BootstrapRecovery
+## BootstrapRecovery
**Appears in:**
@@ -1173,7 +1173,7 @@ created from scratch
-## CertificatesConfiguration
+## CertificatesConfiguration
**Appears in:**
@@ -1191,15 +1191,15 @@ created from scratch
The secret containing the Server CA certificate. If not defined, a new secret will be created
-with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
-
-Contains:
-
+with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+Contains:
+
ca.crt : CA that should be used to validate the server certificate,
-used as sslrootcert in client connection strings.
+used as sslrootcert in client connection strings.
ca.key : key used to generate Server SSL certs, if ServerTLSSecret is provided,
-this can be omitted.
+this can be omitted.
|
@@ -1228,15 +1228,15 @@ created using the provided CA.
The secret containing the Client CA certificate. If not defined, a new secret will be created
-with a self-signed CA and will be used to generate all the client certificates.
-
-Contains:
-
+with a self-signed CA and will be used to generate all the client certificates.
+
+Contains:
+
ca.crt : CA that should be used to validate the client certificates,
-used as ssl_ca_file of all the instances.
+used as ssl_ca_file of all the instances.
ca.key : key used to generate client certificates, if ReplicationTLSSecret is provided,
-this can be omitted.
+this can be omitted.
|
@@ -1252,7 +1252,7 @@ this can be omitted.
-## CertificatesStatus
+## CertificatesStatus
**Appears in:**
@@ -1282,7 +1282,7 @@ this can be omitted.
-## ClusterSpec
+## ClusterSpec
**Appears in:**
@@ -1662,7 +1662,7 @@ Defaults to: RuntimeDefault
-## ClusterStatus
+## ClusterStatus
**Appears in:**
@@ -1953,7 +1953,7 @@ This field is reported when spec.failoverDelay is populated or during online upg
-## CompressionType
+## CompressionType
(Alias of `string`)
@@ -1967,7 +1967,7 @@ This field is reported when spec.failoverDelay is populated or during online upg
-## ConfigMapKeySelector
+## ConfigMapKeySelector
**Appears in:**
@@ -2000,7 +2000,7 @@ the key of a ConfigMap
-## ConfigMapResourceVersion
+## ConfigMapResourceVersion
**Appears in:**
@@ -2025,7 +2025,7 @@ Map keys are the config map names, map values are the versions
-## DataBackupConfiguration
+## DataBackupConfiguration
**Appears in:**
@@ -2080,7 +2080,7 @@ possible. false
by default.
-## DataSource
+## DataSource
**Appears in:**
@@ -2111,7 +2111,7 @@ PostgreSQL cluster from an existing storage
-## EPASConfiguration
+## EPASConfiguration
**Appears in:**
@@ -2141,7 +2141,7 @@ PostgreSQL cluster from an existing storage
-## EmbeddedObjectMetadata
+## EmbeddedObjectMetadata
**Appears in:**
@@ -2169,7 +2169,7 @@ PostgreSQL cluster from an existing storage
-## EncryptionType
+## EncryptionType
(Alias of `string`)
@@ -2183,7 +2183,7 @@ PostgreSQL cluster from an existing storage
-## EnsureOption
+## EnsureOption
(Alias of `string`)
@@ -2196,7 +2196,7 @@ a Role in a PostgreSQL instance
-## EphemeralVolumesSizeLimitConfiguration
+## EphemeralVolumesSizeLimitConfiguration
**Appears in:**
@@ -2227,7 +2227,7 @@ storage
-## ExternalCluster
+## ExternalCluster
**Appears in:**
@@ -2296,7 +2296,7 @@ instance
-## GoogleCredentials
+## GoogleCredentials
**Appears in:**
@@ -2328,7 +2328,7 @@ default to false.
-## Import
+## Import
**Appears in:**
@@ -2389,7 +2389,7 @@ database right after is imported - to be used with extreme care
-## ImportSource
+## ImportSource
**Appears in:**
@@ -2412,7 +2412,7 @@ database right after is imported - to be used with extreme care
-## InstanceID
+## InstanceID
**Appears in:**
@@ -2442,7 +2442,7 @@ database right after is imported - to be used with extreme care
-## InstanceReportedState
+## InstanceReportedState
**Appears in:**
@@ -2472,7 +2472,7 @@ database right after is imported - to be used with extreme care
-## LDAPBindAsAuth
+## LDAPBindAsAuth
**Appears in:**
@@ -2503,7 +2503,7 @@ bind authentication for LDAP
-## LDAPBindSearchAuth
+## LDAPBindSearchAuth
**Appears in:**
@@ -2555,7 +2555,7 @@ the bind+search LDAP authentication process
-## LDAPConfig
+## LDAPConfig
**Appears in:**
@@ -2613,7 +2613,7 @@ the bind+search LDAP authentication process
-## LDAPScheme
+## LDAPScheme
(Alias of `string`)
@@ -2625,7 +2625,7 @@ the bind+search LDAP authentication process
-## LocalObjectReference
+## LocalObjectReference
**Appears in:**
@@ -2671,7 +2671,7 @@ local object with a known type inside the same namespace
-## ManagedConfiguration
+## ManagedConfiguration
**Appears in:**
@@ -2695,7 +2695,7 @@ by the instance manager
-## ManagedRoles
+## ManagedRoles
**Appears in:**
@@ -2733,7 +2733,7 @@ with an explanation of the cause
-## Metadata
+## Metadata
**Appears in:**
@@ -2774,7 +2774,7 @@ More info: http://kubernetes.io/docs/user-guide/annotations
-## MonitoringConfiguration
+## MonitoringConfiguration
**Appears in:**
@@ -2821,7 +2821,7 @@ Default: false.
-## NodeMaintenanceWindow
+## NodeMaintenanceWindow
**Appears in:**
@@ -2855,7 +2855,7 @@ up again) or not (recreate it elsewhere - when instances
>1)
-## PasswordState
+## PasswordState
**Appears in:**
@@ -2885,7 +2885,7 @@ up again) or not (recreate it elsewhere - when instances
>1)
-## PgBouncerIntegrationStatus
+## PgBouncerIntegrationStatus
**Appears in:**
@@ -2907,7 +2907,7 @@ up again) or not (recreate it elsewhere - when instances
>1)
-## PgBouncerPoolMode
+## PgBouncerPoolMode
(Alias of `string`)
@@ -2919,7 +2919,7 @@ up again) or not (recreate it elsewhere - when instances
>1)
-## PgBouncerSecrets
+## PgBouncerSecrets
**Appears in:**
@@ -2943,7 +2943,7 @@ by pgbouncer
-## PgBouncerSpec
+## PgBouncerSpec
**Appears in:**
@@ -3012,7 +3012,7 @@ the operator calls PgBouncer's PAUSE
and RESUME
comman
-## PodTemplateSpec
+## PodTemplateSpec
**Appears in:**
@@ -3052,7 +3052,7 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
-## PodTopologyLabels
+## PodTopologyLabels
(Alias of `map[string]string`)
@@ -3064,7 +3064,7 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
-## PoolerIntegrations
+## PoolerIntegrations
**Appears in:**
@@ -3086,7 +3086,7 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
-## PoolerMonitoringConfiguration
+## PoolerMonitoringConfiguration
**Appears in:**
@@ -3112,7 +3112,7 @@ part for now.
-## PoolerSecrets
+## PoolerSecrets
**Appears in:**
@@ -3156,7 +3156,7 @@ part for now.
-## PoolerSpec
+## PoolerSpec
**Appears in:**
@@ -3222,7 +3222,7 @@ Pooler name should never match with any cluster name within the same namespace.<
-## PoolerStatus
+## PoolerStatus
**Appears in:**
@@ -3252,7 +3252,7 @@ Pooler name should never match with any cluster name within the same namespace.<
-## PoolerType
+## PoolerType
(Alias of `string`)
@@ -3265,7 +3265,7 @@ we are targeting. Allowed values are rw
and ro
.
-## PostInitApplicationSQLRefs
+## PostInitApplicationSQLRefs
**Appears in:**
@@ -3298,7 +3298,7 @@ the implementation order is same as the order of each array
-## PostgresConfiguration
+## PostgresConfiguration
**Appears in:**
@@ -3367,7 +3367,7 @@ big enough to simulate an infinite timeout
-## PrimaryUpdateMethod
+## PrimaryUpdateMethod
(Alias of `string`)
@@ -3380,7 +3380,7 @@ the primary server of the cluster as part of rolling updates
-## PrimaryUpdateStrategy
+## PrimaryUpdateStrategy
(Alias of `string`)
@@ -3393,7 +3393,7 @@ the primary server of the cluster as part of rolling updates
-## RecoveryTarget
+## RecoveryTarget
**Appears in:**
@@ -3471,7 +3471,7 @@ in Postgres, recovery_target_inclusive
will be true
-## ReplicaClusterConfiguration
+## ReplicaClusterConfiguration
**Appears in:**
@@ -3505,7 +3505,7 @@ Refer to the Replica clusters page of the documentation for more information.
-## ReplicationSlotsConfiguration
+## ReplicationSlotsConfiguration
**Appears in:**
@@ -3537,7 +3537,7 @@ every updateInterval
seconds (default 30).
-## ReplicationSlotsHAConfiguration
+## ReplicationSlotsHAConfiguration
**Appears in:**
@@ -3582,7 +3582,7 @@ This can only be set at creation time. By default set to _cnp_
.
-## RoleConfiguration
+## RoleConfiguration
**Appears in:**
@@ -3729,7 +3729,7 @@ Default is false
.
-## S3Credentials
+## S3Credentials
**Appears in:**
@@ -3789,7 +3789,7 @@ files to S3. It can be provided in two alternative ways:
-## ScheduledBackupSpec
+## ScheduledBackupSpec
**Appears in:**
@@ -3834,11 +3834,11 @@ see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format
string
- Indicates which ownerReference should be put inside the created backup resources.
+ Indicates which ownerReference should be put inside the created backup resources.
-- none: no owner reference for created backup objects (same behavior as before the field was introduced)
-- self: sets the Scheduled backup object as owner of the backup
-- cluster: set the cluster as owner of the backup
+- none: no owner reference for created backup objects (same behavior as before the field was introduced)
+- self: sets the Scheduled backup object as owner of the backup
+- cluster: set the cluster as owner of the backup
|
@@ -3867,7 +3867,7 @@ and volumeSnapshot
. Defaults to: barmanObjectStore
.
-## ScheduledBackupStatus
+## ScheduledBackupStatus
**Appears in:**
@@ -3904,7 +3904,7 @@ and volumeSnapshot
. Defaults to: barmanObjectStore
.
-## SecretKeySelector
+## SecretKeySelector
**Appears in:**
@@ -3949,7 +3949,7 @@ the key of a Secret
-## SecretVersion
+## SecretVersion
**Appears in:**
@@ -3981,7 +3981,7 @@ the key of a Secret
-## SecretsResourceVersion
+## SecretsResourceVersion
**Appears in:**
@@ -4069,7 +4069,7 @@ Map keys are the secret names, map values are the versions
-## ServiceAccountTemplate
+## ServiceAccountTemplate
**Appears in:**
@@ -4093,7 +4093,7 @@ service account
-## SnapshotOwnerReference
+## SnapshotOwnerReference
(Alias of `string`)
@@ -4106,7 +4106,7 @@ This specifies which owner the processed resources should relate to.
-## SnapshotType
+## SnapshotType
(Alias of `string`)
@@ -4118,7 +4118,7 @@ This specifies which owner the processed resources should relate to.
-## StorageConfiguration
+## StorageConfiguration
**Appears in:**
@@ -4167,7 +4167,7 @@ Size cannot be decreased.
-## SyncReplicaElectionConstraints
+## SyncReplicaElectionConstraints
**Appears in:**
@@ -4200,7 +4200,7 @@ if all the labels values match.
-## TDEConfiguration
+## TDEConfiguration
**Appears in:**
@@ -4252,7 +4252,7 @@ passed to the OpenSSL command to encrypt and decrypt
-## Topology
+## Topology
**Appears in:**
@@ -4294,7 +4294,7 @@ in synchronous replica election in case of failures
-## VolumeSnapshotConfiguration
+## VolumeSnapshotConfiguration
**Appears in:**
@@ -4346,7 +4346,7 @@ It is the default class for the other types if no specific class is present
-## WalBackupConfiguration
+## WalBackupConfiguration
**Appears in:**
diff --git a/product_docs/docs/postgres_for_kubernetes/1/faq.mdx b/product_docs/docs/postgres_for_kubernetes/1/faq.mdx
index 45a79ef1a60..1ede2469ff4 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/faq.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/faq.mdx
@@ -236,92 +236,7 @@ failover?**
No. The operator does that automatically for you, and relies on `pg_rewind` to
synchronize the former primary with the new one.
-
## Database management
@@ -485,25 +400,4 @@ access.
Teams can then create another user for read-write operations through the
["Declarative role management"](declarative_role_management.md) feature
-and assign the required `GRANT` to the tables.
-
-
\ No newline at end of file
+and assign the required `GRANT` to the tables.
\ No newline at end of file
diff --git a/product_docs/docs/postgres_for_kubernetes/1/logging.mdx b/product_docs/docs/postgres_for_kubernetes/1/logging.mdx
index 7848ed30eb7..2df5ae5d1ad 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/logging.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/logging.mdx
@@ -176,7 +176,7 @@ See the example below:
```
Please refer to the
-[PGAudit documentation](https://github.com/pgaudit/pgaudit/blob/master/README.md#format)
+[PGAudit documentation](https://github.com/pgaudit/pgaudit/blob/master/README.md#format)
for more details about each field in a record.
## EDB Audit logs
diff --git a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx
index 2229b81fe41..864ebae1d00 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx
@@ -544,7 +544,7 @@ Here is a short description of all the available fields:
- ``: the name of the Prometheus metric
- `query`: the SQL query to run on the target database to generate the metrics
- `primary`: whether to run the query only on the primary instance
- - `master`: same as `primary` (for compatibility with the Prometheus PostgreSQL exporter's syntax - deprecated)
+ - `master`: same as `primary` (for compatibility with the Prometheus PostgreSQL exporter's syntax - deprecated)
- `runonserver`: a semantic version range to limit the versions of PostgreSQL the query should run on
(e.g. `">=11.0.0"` or `">=12.0.0 <=15.0.0"`)
- `target_databases`: a list of databases to run the `query` against,
diff --git a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx
index 3046c0ccb02..5efb3e3c3a3 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx
@@ -7,14 +7,14 @@ This section describes how to test a PostgreSQL cluster on your laptop/computer
using EDB Postgres for Kubernetes on a local Kubernetes cluster in [Kind](https://kind.sigs.k8s.io/) or
[Minikube](https://kubernetes.io/docs/setup/learning-environment/minikube/).
-
+
!!! Tip "Live demonstration"
Don't want to install anything locally just yet? Try a demonstration directly in your browser:
[EDB Postgres for Kubernetes Operator Interactive Quickstart](interactive_demo)
-
+
Red Hat OpenShift Container Platform users can test the certified operator for
EDB Postgres for Kubernetes on the [Red Hat OpenShift Local](https://developers.redhat.com/products/openshift-local/overview) (formerly Red Hat CodeReady Containers).
@@ -99,6 +99,7 @@ kind create cluster --name pg
The `crc start` output will explain how to proceed.
+
3. Execute the output of the `crc oc-env` command.
4. Log in as `kubeadmin` with the printed `oc login`
diff --git a/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx b/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx
index c2578e9e2ba..79edae0bde6 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx
@@ -511,7 +511,7 @@ With the above configuration, the following will happen after recovery is comple
## How recovery works under the hood
-
+
You can use the data uploaded to the object storage to *bootstrap* a
new cluster from a previously taken backup.
@@ -561,7 +561,7 @@ manager running in the Pods.
## Restoring into a cluster with a backup section
-
+
A manifest for a cluster restore may include a `backup` section.
This means that the new cluster, after recovery, will start archiving WAL's and
diff --git a/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx b/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx
index aee1552fdb6..84cbe9d8874 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx
@@ -82,7 +82,7 @@ The aforementioned default behavior can be changed by tweaking the above setting
`preferredDuringSchedulingIgnoredDuringExecution`. Please, be aware that such a
strong requirement might result in pending instances in case resources are not
available (which is an expected condition when using
-[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler)
+[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler)
for automated horizontal scaling of a Kubernetes cluster).
!!! Seealso "Inter-pod affinity and anti-affinity"
diff --git a/product_docs/docs/postgres_for_kubernetes/1/storage.mdx b/product_docs/docs/postgres_for_kubernetes/1/storage.mdx
index 6df381052f0..1ab33c76d11 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/storage.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/storage.mdx
@@ -269,7 +269,7 @@ cluster-example-3 1/1 Running 0 2m10s
```
An Azure disk can only be expanded while in "unattached" state, as described in the
-[docs](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/known-issues/sizegrow.md).
+[docs](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/known-issues/sizegrow.md).
This means, that to resize a disk used by a PostgreSQL cluster, you will need to perform a manual rollout,
first cordoning the node that hosts the Pod using the PVC bound to the disk. This will prevent the Operator
to recreate the Pod and immediately reattach it to its PVC before the background disk resizing has been completed.
diff --git a/scripts/fileProcessor/processors/cnp/strip-html-comments.mjs b/scripts/fileProcessor/processors/cnp/strip-html-comments.mjs
new file mode 100644
index 00000000000..41f0ddd6113
--- /dev/null
+++ b/scripts/fileProcessor/processors/cnp/strip-html-comments.mjs
@@ -0,0 +1,51 @@
+// HTML comments () are not valid in MDX
+// strip them out completely
+
+import toVFile from "to-vfile";
+import remarkParse from "remark-parse";
+import mdx from "remark-mdx";
+import unified from "unified";
+import remarkFrontmatter from "remark-frontmatter";
+import remarkStringify from "remark-stringify";
+import admonitions from "remark-admonitions";
+import visit from "unist-util-visit";
+
+export const process = async (filename, content) => {
+ const processor = unified()
+ .use(remarkParse)
+ .use(remarkStringify, { emphasis: "*", bullet: "-", fences: true })
+ .use(admonitions, {
+ tag: "!!!",
+ icons: "none",
+ infima: true,
+ customTypes: {
+ seealso: "note",
+ hint: "tip",
+ interactive: "interactive",
+ },
+ })
+ .use(remarkFrontmatter)
+ .use(mdx)
+ .use(stripComments);
+
+ const output = await processor.process(
+ toVFile({ path: filename, contents: content }),
+ );
+
+ return {
+ newFilename: filename,
+ newContent: output.contents.toString(),
+ };
+};
+
+function stripComments() {
+ return (tree) => {
+ visit(tree, "jsx", (node) => {
+ // todo: use HAST parser here - this is not reliable
+
+ // strip (potentially NON-EMPTY) HTML comments - these are not valid in JSX
+ node.value = node.value.replace(/(?=/g, "");
+ });
+ };
+}
+
diff --git a/scripts/source/process-cnp-docs.sh b/scripts/source/process-cnp-docs.sh
index 4bb2c7ca99e..77bd254a921 100755
--- a/scripts/source/process-cnp-docs.sh
+++ b/scripts/source/process-cnp-docs.sh
@@ -33,6 +33,7 @@ node $DESTINATION_CHECKOUT/scripts/fileProcessor/main.mjs \
-p "cnp/update-links" \
-p "cnp/update-yaml-links" \
-p "cnp/rewrite-mdextra-anchors" \
+ -p "cnp/strip-html-comments" \
-p "cnp/add-frontmatters" \
-p "cnp/rename-to-mdx"
From cf100f834d0c29c9d40d8cdda1712e343339f5e8 Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Wed, 18 Oct 2023 20:50:44 +0000
Subject: [PATCH 6/8] More link fixes, redirects and demo update
---
.../docs/postgres_for_kubernetes/1/addons.mdx | 2 +-
.../1/applications.mdx | 2 +-
.../1/architecture.mdx | 2 +-
.../docs/postgres_for_kubernetes/1/backup.mdx | 2 +-
.../1/backup_barmanobjectstore.mdx | 12 +-
.../1/backup_recovery.mdx | 2 +-
.../1/backup_volumesnapshot.mdx | 4 +-
.../1/before_you_start.mdx | 2 +-
.../1/benchmarking.mdx | 2 +-
.../postgres_for_kubernetes/1/bootstrap.mdx | 2 +-
.../1/certificates.mdx | 2 +-
.../1/cloudnative-pg.v1.mdx | 4 +-
.../1/cluster_conf.mdx | 2 +-
.../1/connection_pooling.mdx | 2 +-
.../1/container_images.mdx | 2 +-
.../postgres_for_kubernetes/1/controller.mdx | 2 +-
.../1/database_import.mdx | 2 +-
.../1/declarative_hibernation.mdx | 2 +-
.../1/declarative_role_management.mdx | 2 +-
.../postgres_for_kubernetes/1/evaluation.mdx | 2 +-
.../1/expose_pg_services.mdx | 2 +-
.../postgres_for_kubernetes/1/failover.mdx | 2 +-
.../1/failure_modes.mdx | 2 +-
.../docs/postgres_for_kubernetes/1/faq.mdx | 3 +-
.../postgres_for_kubernetes/1/fencing.mdx | 2 +-
.../docs/postgres_for_kubernetes/1/index.mdx | 2 +
.../1/installation_upgrade.mdx | 2 +-
.../1/instance_manager.mdx | 2 +-
.../1/interactive_demo.mdx | 240 +++++++-----------
.../1/kubectl-plugin.mdx | 2 +-
.../1/kubernetes_upgrade.mdx | 2 +-
.../1/labels_annotations.mdx | 2 +-
.../1/license_keys.mdx | 2 +-
.../postgres_for_kubernetes/1/logging.mdx | 2 +-
.../postgres_for_kubernetes/1/monitoring.mdx | 2 +-
.../postgres_for_kubernetes/1/networking.mdx | 2 +-
.../1/{appendixes => }/object_stores.mdx | 4 +-
.../postgres_for_kubernetes/1/openshift.mdx | 2 +-
.../1/operator_capability_levels.mdx | 2 +-
.../1/operator_conf.mdx | 2 +-
.../postgres_for_kubernetes/1/postgis.mdx | 2 +-
.../1/postgresql_conf.mdx | 2 +-
.../postgres_for_kubernetes/1/quickstart.mdx | 2 +-
.../postgres_for_kubernetes/1/recovery.mdx | 2 +-
.../1/replica_cluster.mdx | 2 +-
.../postgres_for_kubernetes/1/replication.mdx | 2 +-
.../1/resource_management.mdx | 2 +-
.../1/rolling_update.mdx | 2 +-
.../postgres_for_kubernetes/1/samples.mdx | 2 +-
.../postgres_for_kubernetes/1/scheduling.mdx | 2 +-
.../postgres_for_kubernetes/1/security.mdx | 2 +-
.../1/ssl_connections.mdx | 2 +-
.../postgres_for_kubernetes/1/storage.mdx | 2 +-
.../docs/postgres_for_kubernetes/1/tde.mdx | 2 +-
.../1/troubleshooting.mdx | 2 +-
.../postgres_for_kubernetes/1/use_cases.mdx | 2 +-
.../1/wal_archiving.mdx | 2 +-
.../processors/cnp/flatten-appendices.mjs | 8 +
.../processors/cnp/update-links.mjs | 23 +-
scripts/source/process-cnp-docs.sh | 3 +-
src/components/link.js | 2 +-
61 files changed, 186 insertions(+), 219 deletions(-)
rename product_docs/docs/postgres_for_kubernetes/1/{appendixes => }/object_stores.mdx (99%)
create mode 100644 scripts/fileProcessor/processors/cnp/flatten-appendices.mjs
diff --git a/product_docs/docs/postgres_for_kubernetes/1/addons.mdx b/product_docs/docs/postgres_for_kubernetes/1/addons.mdx
index 2e1439c0d08..3d957d977b3 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/addons.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/addons.mdx
@@ -491,4 +491,4 @@ command to restore from a backup created with the above parameters would be:
velero create restore myrestore \
--from-backup mybackup \
-n velero-install-namespace
-```
\ No newline at end of file
+```
diff --git a/product_docs/docs/postgres_for_kubernetes/1/applications.mdx b/product_docs/docs/postgres_for_kubernetes/1/applications.mdx
index 7fa1fb7615e..5c3a02df751 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/applications.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/applications.mdx
@@ -84,4 +84,4 @@ connecting to the PostgreSQL cluster, and correspond to the user *owning* the
database.
The `-superuser` ones are supposed to be used only for administrative purposes,
-and correspond to the `postgres` user.
\ No newline at end of file
+and correspond to the `postgres` user.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx b/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx
index 38e694fe184..415d7d7ca22 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/architecture.mdx
@@ -300,4 +300,4 @@ increase this number when the cluster is promoted to primary.
Please refer to the ["Replica Clusters" section](replica_cluster.md) for more
information about physical replica clusters work and how you can configure
read-only clusters in different Kubernetes cluster to improve your global
- disaster recovery and HA strategy.
\ No newline at end of file
+ disaster recovery and HA strategy.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup.mdx
index 0a4bf30fc3a..67bc5c8dedc 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/backup.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/backup.mdx
@@ -379,4 +379,4 @@ spec:
```
In the previous example, EDB Postgres for Kubernetes will invariably choose the primary
-instance even if the `Cluster` is set to prefer replicas.
\ No newline at end of file
+instance even if the `Cluster` is set to prefer replicas.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx
index f56fdff9f6c..0a0e316d77b 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/backup_barmanobjectstore.mdx
@@ -37,11 +37,11 @@ on a [standby](backup/#backup-from-a-standby).
## Common object stores
If you are looking for a specific object store such as
-[AWS S3](appendixes/object_stores.md#aws-s3),
-[Microsoft Azure Blob Storage](appendixes/object_stores.md#azure-blob-storage),
-[Google Cloud Storage](appendixes/object_stores.md#google-cloud-storage), or
-[MinIO Gateway](appendixes/object_stores.md#minio-gateway), or a compatible
-provider, please refer to [Appendix A - Common object stores](appendixes/object_stores.md).
+[AWS S3](object_stores.md#aws-s3),
+[Microsoft Azure Blob Storage](object_stores.md#azure-blob-storage),
+[Google Cloud Storage](object_stores.md#google-cloud-storage), or
+[MinIO Gateway](object_stores.md#minio-gateway), or a compatible
+provider, please refer to [Appendix A - Common object stores](object_stores.md).
## Retention policies
@@ -149,4 +149,4 @@ spec:
backupRetentionPolicy: "expire"
historyTags:
backupRetentionPolicy: "keep"
-```
\ No newline at end of file
+```
diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup_recovery.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup_recovery.mdx
index 42d7a0d33fb..8080d7e0fae 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/backup_recovery.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/backup_recovery.mdx
@@ -11,4 +11,4 @@ Version 1.21 introduces support for the Kubernetes `VolumeSnapshot` API,
providing more possibilities for the end user.
As a result, [backup](backup.md) and [recovery](recovery.md) are now in two
-separate sections.
\ No newline at end of file
+separate sections.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx b/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx
index 5540145a441..2ed2c61cb58 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/backup_volumesnapshot.mdx
@@ -179,7 +179,7 @@ volume snapshot class.
The following manifest creates a `Cluster` that is ready to be used for volume
snapshots and that stores the WAL archive in a S3 bucket via IAM role for the
-Service Account (IRSA, see [AWS S3](appendixes/object_stores.md#aws-s3)):
+Service Account (IRSA, see [AWS S3](object_stores.md#aws-s3)):
```yaml
apiVersion: postgresql.k8s.enterprisedb.io/v1
@@ -226,4 +226,4 @@ spec:
```
The last resource defines daily volume snapshot backups at midnight, requesting
-one immediately after the cluster is created.
\ No newline at end of file
+one immediately after the cluster is created.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx b/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx
index bb6e683175b..4f70234c7d5 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/before_you_start.mdx
@@ -144,4 +144,4 @@ Zone
Now that you have familiarized with the terminology, you can decide to
[test EDB Postgres for Kubernetes on your laptop using a local cluster](quickstart.md) before
-deploying the operator in your selected cloud environment.
\ No newline at end of file
+deploying the operator in your selected cloud environment.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/benchmarking.mdx b/product_docs/docs/postgres_for_kubernetes/1/benchmarking.mdx
index 0884e13a29e..27d5daa558c 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/benchmarking.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/benchmarking.mdx
@@ -193,4 +193,4 @@ After all testing is done, fio deployment and resources can be deleted by:
kubectl cnp fio --dry-run | kubectl delete -f -
```
-make sure use the same name which was used to create the fio deployment and add namespace if applicable.
\ No newline at end of file
+make sure use the same name which was used to create the fio deployment and add namespace if applicable.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx b/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx
index 690b336aaa9..67864d50050 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/bootstrap.mdx
@@ -666,4 +666,4 @@ before migrating to the target database in Kubernetes.
Before you attempt a migration, you must test both the procedure
and the applications. In particular, it is fundamental that you run the migration
procedure as many times as needed to systematically measure the downtime of your
- applications in production. Feel free to contact EDB for assistance.
\ No newline at end of file
+ applications in production. Feel free to contact EDB for assistance.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/certificates.mdx b/product_docs/docs/postgres_for_kubernetes/1/certificates.mdx
index c35c528534c..d928ca3112f 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/certificates.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/certificates.mdx
@@ -288,4 +288,4 @@ spec:
```
You can find a complete example using cert-manager to manage both server and client CA and certificates in
-the [cluster-example-cert-manager.yaml](../samples/cluster-example-cert-manager.yaml) deployment manifest.
\ No newline at end of file
+the [cluster-example-cert-manager.yaml](../samples/cluster-example-cert-manager.yaml) deployment manifest.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx b/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx
index 4f568ff413b..c9cb12e4e9d 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx
@@ -1,6 +1,8 @@
---
title: 'API Reference'
originalFilePath: 'src/cloudnative-pg.v1.md'
+redirects:
+- api_reference
---
Package v1 contains API Schema definitions for the postgresql v1 API group
@@ -4389,4 +4391,4 @@ value - with 1 being the minimum accepted value.
-
\ No newline at end of file
+
diff --git a/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx b/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx
index 0529567f01e..807fe05e6d3 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/cluster_conf.mdx
@@ -119,4 +119,4 @@ update of the PostgreSQL Pods.
If the `env` or the `envFrom` section refers to a Secret or a ConfigMap, the
operator will not detect any changes in them and will not trigger a rollout.
The Kubelet use the same behavior with Pods, and the user is supposed to
-trigger the Pod rollout manually.
\ No newline at end of file
+trigger the Pod rollout manually.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx b/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx
index 103c6fb197f..7663ae519b6 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/connection_pooling.mdx
@@ -610,4 +610,4 @@ users.
We have reasons to believe that the adopted solution addresses the majority of
use cases, while leaving room for the future implementation of a separate
operator for PgBouncer to complete the gamma with more advanced and customized
- scenarios.
\ No newline at end of file
+ scenarios.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx b/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx
index 03a3c6c80ed..5fad160a6d6 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/container_images.mdx
@@ -63,4 +63,4 @@ Examples of accepted image tags:
- `15.0`
!!! Warning
- `latest` is not considered a valid tag for the image.
\ No newline at end of file
+ `latest` is not considered a valid tag for the image.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/controller.mdx b/product_docs/docs/postgres_for_kubernetes/1/controller.mdx
index b8119e92116..f8b7ae9016e 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/controller.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/controller.mdx
@@ -123,4 +123,4 @@ user can select the preferred behavior at the cluster level (read the
["Kubernetes upgrade"](kubernetes_upgrade.md) section for details).
Being generic, the `StatefulSet` doesn't allow this level of
-customization.
\ No newline at end of file
+customization.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/database_import.mdx b/product_docs/docs/postgres_for_kubernetes/1/database_import.mdx
index 2533b18ddf7..3f4d576d270 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/database_import.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/database_import.mdx
@@ -242,4 +242,4 @@ There are a few things you need to be aware of when using the `monolith` type:
and those databases not allowing connections
- After the clone procedure is done, `ANALYZE VERBOSE` is executed for every
database.
-- `postImportApplicationSQL` field is not supported
\ No newline at end of file
+- `postImportApplicationSQL` field is not supported
diff --git a/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx b/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx
index 5dacb1ae67f..e09d829b8f8 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/declarative_hibernation.mdx
@@ -88,4 +88,4 @@ Or, just unset it altogether:
$ kubectl annotate cluster k8s.enterprisedb.io/hibernation-
```
-The Pods will be recreated and the cluster will resume operation.
\ No newline at end of file
+The Pods will be recreated and the cluster will resume operation.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx b/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx
index 2d963716e9f..9c195b11934 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/declarative_role_management.mdx
@@ -223,4 +223,4 @@ the database instances.
In terms of backward compatibility, declarative role management is designed
to ignore roles that exist in the database but are not included in the spec.
The lifecycle of these roles will continue to be managed within PostgreSQL,
- allowing EDB Postgres for Kubernetes users to adopt this feature at their convenience.
\ No newline at end of file
+ allowing EDB Postgres for Kubernetes users to adopt this feature at their convenience.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/evaluation.mdx b/product_docs/docs/postgres_for_kubernetes/1/evaluation.mdx
index f30c78cf359..96aee4bcaff 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/evaluation.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/evaluation.mdx
@@ -37,4 +37,4 @@ To see how `imageName` and `licenseKey` is set, refer to the [cluster-full-examp
## Further Information
-Refer to [License and License keys](license_keys.md) for terms and more details.
\ No newline at end of file
+Refer to [License and License keys](license_keys.md) for terms and more details.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/expose_pg_services.mdx b/product_docs/docs/postgres_for_kubernetes/1/expose_pg_services.mdx
index 67e9b7ef208..b111a67f282 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/expose_pg_services.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/expose_pg_services.mdx
@@ -134,4 +134,4 @@ You can access the primary from your machine running:
```sh
psql -h $(minikube ip) -p 5432 -U postgres
-```
\ No newline at end of file
+```
diff --git a/product_docs/docs/postgres_for_kubernetes/1/failover.mdx b/product_docs/docs/postgres_for_kubernetes/1/failover.mdx
index 1779ddf5239..f7c5e73f79a 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/failover.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/failover.mdx
@@ -92,4 +92,4 @@ subscribers) or when the time to perform the failover is longer than the
expected outage.
Enabling a new configuration option to delay failover provides a mechanism to
-prevent premature failover for short-lived network or node instability.
\ No newline at end of file
+prevent premature failover for short-lived network or node instability.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx b/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx
index a193b6a86d3..a1aab1641cf 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/failure_modes.mdx
@@ -198,4 +198,4 @@ and for the sole duration of the extraordinary/emergency operation.
Please make sure that you use this annotation only for a limited period of
time and you remove it when the emergency has finished. Leaving this annotation
in a cluster will prevent the operator from issuing any self-healing operation,
- such as a failover.
\ No newline at end of file
+ such as a failover.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/faq.mdx b/product_docs/docs/postgres_for_kubernetes/1/faq.mdx
index 1ede2469ff4..9ff148266ea 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/faq.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/faq.mdx
@@ -400,4 +400,5 @@ access.
Teams can then create another user for read-write operations through the
["Declarative role management"](declarative_role_management.md) feature
-and assign the required `GRANT` to the tables.
\ No newline at end of file
+and assign the required `GRANT` to the tables.
+
diff --git a/product_docs/docs/postgres_for_kubernetes/1/fencing.mdx b/product_docs/docs/postgres_for_kubernetes/1/fencing.mdx
index 73525f1d39e..c35e05abb82 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/fencing.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/fencing.mdx
@@ -106,4 +106,4 @@ This consists of an initial fast shutdown with a timeout set to
If a fenced instance is deleted, the pod will be recreated normally, but the
postmaster won't be started. This can be extremely helpful when instances
-are `Crashlooping`.
\ No newline at end of file
+are `Crashlooping`.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_for_kubernetes/1/index.mdx
index 554cedde64e..3345db5e28a 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/index.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/index.mdx
@@ -63,6 +63,8 @@ navigation:
- faq
- troubleshooting
- api_reference
+ - '#Appendix'
+ - object_stores
---
diff --git a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx
index 515377e9248..5efd28a4af9 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/installation_upgrade.mdx
@@ -544,4 +544,4 @@ spec:
replicationSlots:
highAvailability:
enabled: false
-```
\ No newline at end of file
+```
diff --git a/product_docs/docs/postgres_for_kubernetes/1/instance_manager.mdx b/product_docs/docs/postgres_for_kubernetes/1/instance_manager.mdx
index 7b164a6fa50..e7f3e5b159e 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/instance_manager.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/instance_manager.mdx
@@ -96,4 +96,4 @@ the WAL files. By default it is set to `3600` (1 hour).
## Failover
In case of primary pod failure, the cluster will go into failover mode.
-Please refer to the ["Failover" section](failover.md) for details.
\ No newline at end of file
+Please refer to the ["Failover" section](failover.md) for details.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/interactive_demo.mdx b/product_docs/docs/postgres_for_kubernetes/1/interactive_demo.mdx
index f2f31679ba6..72f4151074c 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/interactive_demo.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/interactive_demo.mdx
@@ -39,22 +39,22 @@ INFO[0000] Prep: Network
INFO[0000] Created network 'k3d-k3s-default'
INFO[0000] Created image volume k3d-k3s-default-images
INFO[0000] Starting new tools node...
-INFO[0001] Pulling image 'ghcr.io/k3d-io/k3d-tools:5.5.1'
+INFO[0001] Pulling image 'ghcr.io/k3d-io/k3d-tools:5.6.0'
INFO[0001] Creating node 'k3d-k3s-default-server-0'
-INFO[0002] Pulling image 'docker.io/rancher/k3s:v1.26.4-k3s1'
+INFO[0001] Pulling image 'docker.io/rancher/k3s:v1.27.4-k3s1'
INFO[0003] Starting Node 'k3d-k3s-default-tools'
-INFO[0006] Creating LoadBalancer 'k3d-k3s-default-serverlb'
-INFO[0007] Pulling image 'ghcr.io/k3d-io/k3d-proxy:5.5.1'
-INFO[0010] Using the k3d-tools node to gather environment information
-INFO[0010] HostIP: using network gateway 172.17.0.1 address
-INFO[0010] Starting cluster 'k3s-default'
-INFO[0010] Starting servers...
-INFO[0010] Starting Node 'k3d-k3s-default-server-0'
-INFO[0015] All agents already running.
-INFO[0015] Starting helpers...
-INFO[0015] Starting Node 'k3d-k3s-default-serverlb'
-INFO[0022] Injecting records for hostAliases (incl. host.k3d.internal) and for 2 network members into CoreDNS configmap...
-INFO[0024] Cluster 'k3s-default' created successfully!
+INFO[0005] Creating LoadBalancer 'k3d-k3s-default-serverlb'
+INFO[0006] Pulling image 'ghcr.io/k3d-io/k3d-proxy:5.6.0'
+INFO[0011] Using the k3d-tools node to gather environment information
+INFO[0011] HostIP: using network gateway 172.17.0.1 address
+INFO[0011] Starting cluster 'k3s-default'
+INFO[0011] Starting servers...
+INFO[0011] Starting Node 'k3d-k3s-default-server-0'
+INFO[0016] All agents already running.
+INFO[0016] Starting helpers...
+INFO[0016] Starting Node 'k3d-k3s-default-serverlb'
+INFO[0023] Injecting records for hostAliases (incl. host.k3d.internal) and for 2 network members into CoreDNS configmap...
+INFO[0025] Cluster 'k3s-default' created successfully!
INFO[0025] You can now use it like this:
kubectl cluster-info
```
@@ -66,7 +66,7 @@ Verify that it works with the following command:
kubectl get nodes
__OUTPUT__
NAME STATUS ROLES AGE VERSION
-k3d-k3s-default-server-0 Ready control-plane,master 17s v1.26.4+k3s1
+k3d-k3s-default-server-0 Ready control-plane,master 17s v1.27.4+k3s1
```
You will see one node called `k3d-k3s-default-server-0`. If the status isn't yet "Ready", wait for a few seconds and run the command above again.
@@ -76,7 +76,7 @@ You will see one node called `k3d-k3s-default-server-0`. If the status isn't yet
Now that the Kubernetes cluster is running, you can proceed with EDB Postgres for Kubernetes installation as described in the ["Installation and upgrades"](installation_upgrade.md) section:
```shell
-kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.20.2.yaml
+kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.21.0.yaml
__OUTPUT__
namespace/postgresql-operator-system created
customresourcedefinition.apiextensions.k8s.io/backups.postgresql.k8s.enterprisedb.io created
@@ -179,12 +179,12 @@ metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"postgresql.k8s.enterprisedb.io/v1","kind":"Cluster","metadata":{"annotations":{},"name":"cluster-example","namespace":"default"},"spec":{"instances":3,"primaryUpdateStrategy":"unsupervised","storage":{"size":"1Gi"}}}
- creationTimestamp: "2023-07-28T16:14:08Z"
+ creationTimestamp: "2023-10-18T19:53:06Z"
generation: 1
name: cluster-example
namespace: default
- resourceVersion: "1115"
- uid: 70e054ae-b487-41e3-941b-b7c969f950be
+ resourceVersion: "1201"
+ uid: 9d712b83-f2ea-4835-8de1-c2cee75bd3c7
spec:
affinity:
podAntiAffinityType: preferred
@@ -246,9 +246,9 @@ status:
certificates:
clientCASecret: cluster-example-ca
expirations:
- cluster-example-ca: 2023-10-26 16:09:09 +0000 UTC
- cluster-example-replication: 2023-10-26 16:09:09 +0000 UTC
- cluster-example-server: 2023-10-26 16:09:09 +0000 UTC
+ cluster-example-ca: 2024-01-16 19:48:06 +0000 UTC
+ cluster-example-replication: 2024-01-16 19:48:06 +0000 UTC
+ cluster-example-server: 2024-01-16 19:48:06 +0000 UTC
replicationTLSSecret: cluster-example-replication
serverAltDNSNames:
- cluster-example-rw
@@ -265,36 +265,36 @@ status:
cloudNativePostgresqlCommitHash: c42ca1c2
cloudNativePostgresqlOperatorHash: 1d51c15adffb02c81dbc4e8752ddb68f709699c78d9c3384ed9292188685971b
conditions:
- - lastTransitionTime: "2023-07-28T16:15:29Z"
+ - lastTransitionTime: "2023-10-18T19:54:30Z"
message: Cluster is Ready
reason: ClusterIsReady
status: "True"
type: Ready
- - lastTransitionTime: "2023-07-28T16:15:29Z"
+ - lastTransitionTime: "2023-10-18T19:54:30Z"
message: velero addon is disabled
reason: Disabled
status: "False"
type: k8s.enterprisedb.io/velero
- - lastTransitionTime: "2023-07-28T16:15:29Z"
+ - lastTransitionTime: "2023-10-18T19:54:30Z"
message: external-backup-adapter addon is disabled
reason: Disabled
status: "False"
type: k8s.enterprisedb.io/externalBackupAdapter
- - lastTransitionTime: "2023-07-28T16:15:30Z"
+ - lastTransitionTime: "2023-10-18T19:54:30Z"
message: external-backup-adapter-cluster addon is disabled
reason: Disabled
status: "False"
type: k8s.enterprisedb.io/externalBackupAdapterCluster
- - lastTransitionTime: "2023-07-28T16:15:30Z"
+ - lastTransitionTime: "2023-10-18T19:54:31Z"
message: kasten addon is disabled
reason: Disabled
status: "False"
type: k8s.enterprisedb.io/kasten
configMapResourceVersion:
metrics:
- postgresql-operator-default-monitoring: "788"
+ postgresql-operator-default-monitoring: "860"
currentPrimary: cluster-example-1
- currentPrimaryTimestamp: "2023-07-28T16:14:48.609086Z"
+ currentPrimaryTimestamp: "2023-10-18T19:53:49.065241Z"
healthyPVC:
- cluster-example-1
- cluster-example-2
@@ -323,7 +323,7 @@ status:
licenseStatus:
isImplicit: true
isTrial: true
- licenseExpiration: "2023-08-27T16:14:08Z"
+ licenseExpiration: "2023-11-17T19:53:06Z"
licenseStatus: Implicit trial license
repositoryAccess: false
valid: true
@@ -335,14 +335,14 @@ status:
readService: cluster-example-r
readyInstances: 3
secretsResourceVersion:
- applicationSecretVersion: "760"
- clientCaSecretVersion: "756"
- replicationSecretVersion: "758"
- serverCaSecretVersion: "756"
- serverSecretVersion: "757"
- superuserSecretVersion: "759"
+ applicationSecretVersion: "832"
+ clientCaSecretVersion: "828"
+ replicationSecretVersion: "830"
+ serverCaSecretVersion: "828"
+ serverSecretVersion: "829"
+ superuserSecretVersion: "831"
targetPrimary: cluster-example-1
- targetPrimaryTimestamp: "2023-07-28T16:14:09.501164Z"
+ targetPrimaryTimestamp: "2023-10-18T19:53:06.981792Z"
timelineID: 1
topology:
instances:
@@ -377,7 +377,7 @@ curl -sSfL \
sudo sh -s -- -b /usr/local/bin
__OUTPUT__
EnterpriseDB/kubectl-cnp info checking GitHub for latest tag
-EnterpriseDB/kubectl-cnp info found version: 1.20.2 for v1.20.2/linux/x86_64
+EnterpriseDB/kubectl-cnp info found version: 1.21.0 for v1.21.0/linux/x86_64
EnterpriseDB/kubectl-cnp info installed /usr/local/bin/kubectl-cnp
```
@@ -387,22 +387,23 @@ The `cnp` command is now available in kubectl:
kubectl cnp status cluster-example
__OUTPUT__
Cluster Summary
-Name: cluster-example
-Namespace: default
-System ID: 7260903692491026447
-PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3
-Primary instance: cluster-example-1
-Status: Cluster in healthy state
-Instances: 3
-Ready instances: 3
-Current Write LSN: 0/6054B60 (Timeline: 1 - WAL File: 000000010000000000000006)
+Name: cluster-example
+Namespace: default
+System ID: 7291389121501601807
+PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3
+Primary instance: cluster-example-1
+Primary start time: 2023-10-18 19:53:49 +0000 UTC (uptime 2m32s)
+Status: Cluster in healthy state
+Instances: 3
+Ready instances: 3
+Current Write LSN: 0/6054B60 (Timeline: 1 - WAL File: 000000010000000000000006)
Certificates Status
Certificate Name Expiration Date Days Left Until Expiration
---------------- --------------- --------------------------
-cluster-example-ca 2023-10-26 16:09:09 +0000 UTC 89.99
-cluster-example-replication 2023-10-26 16:09:09 +0000 UTC 89.99
-cluster-example-server 2023-10-26 16:09:09 +0000 UTC 89.99
+cluster-example-ca 2024-01-16 19:48:06 +0000 UTC 89.99
+cluster-example-replication 2024-01-16 19:48:06 +0000 UTC 89.99
+cluster-example-server 2024-01-16 19:48:06 +0000 UTC 89.99
Continuous Backup status
Not configured
@@ -444,22 +445,23 @@ Now if we check the status...
kubectl cnp status cluster-example
__OUTPUT__
Cluster Summary
-Name: cluster-example
-Namespace: default
-System ID: 7260903692491026447
-PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3
-Primary instance: cluster-example-2
-Status: Failing over Failing over from cluster-example-1 to cluster-example-2
-Instances: 3
-Ready instances: 2
-Current Write LSN: 0/7001000 (Timeline: 2 - WAL File: 000000020000000000000007)
+Name: cluster-example
+Namespace: default
+System ID: 7291389121501601807
+PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3
+Primary instance: cluster-example-2
+Primary start time: 2023-10-18 19:57:07 +0000 UTC (uptime 5s)
+Status: Failing over Failing over from cluster-example-1 to cluster-example-2
+Instances: 3
+Ready instances: 2
+Current Write LSN: 0/7001000 (Timeline: 2 - WAL File: 000000020000000000000007)
Certificates Status
Certificate Name Expiration Date Days Left Until Expiration
---------------- --------------- --------------------------
-cluster-example-ca 2023-10-26 16:09:09 +0000 UTC 89.99
-cluster-example-replication 2023-10-26 16:09:09 +0000 UTC 89.99
-cluster-example-server 2023-10-26 16:09:09 +0000 UTC 89.99
+cluster-example-ca 2024-01-16 19:48:06 +0000 UTC 89.99
+cluster-example-replication 2024-01-16 19:48:06 +0000 UTC 89.99
+cluster-example-server 2024-01-16 19:48:06 +0000 UTC 89.99
Continuous Backup status
Not configured
@@ -471,10 +473,11 @@ Unmanaged Replication Slot Status
No unmanaged replication slots found
Instances status
-Name Database Size Current LSN Replication role Status QoS Manager Version Node
----- ------------- ----------- ---------------- ------ --- --------------- ----
-cluster-example-2 29 MB 0/7001000 Primary OK BestEffort 1.20.2 k3d-k3s-default-server-0
-cluster-example-3 29 MB 0/70000A0 Standby (file based) OK BestEffort 1.20.2 k3d-k3s-default-server-0
+Name Database Size Current LSN Replication role Status QoS Manager Version Node
+---- ------------- ----------- ---------------- ------ --- --------------- ----
+cluster-example-2 29 MB 0/7001000 Primary OK BestEffort 1.20.2 k3d-k3s-default-server-0
+cluster-example-3 29 MB 0/70000A0 Standby (file based) OK BestEffort 1.20.2 k3d-k3s-default-server-0
+cluster-example-1 - - - pod not available BestEffort - k3d-k3s-default-server-0
```
...the failover process has begun, with the second pod promoted to primary. Once the failed pod has restarted, it will become a replica of the new primary:
@@ -483,91 +486,23 @@ cluster-example-3 29 MB 0/70000A0 Standby (file based) OK Bes
kubectl cnp status cluster-example
__OUTPUT__
Cluster Summary
-Name: cluster-example
-Namespace: default
-System ID: 7260903692491026447
-PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3
-Primary instance: cluster-example-2
-Status: Failing over Failing over from cluster-example-1 to cluster-example-2
-Instances: 3
-Ready instances: 2
-Current Write LSN: 0/7001000 (Timeline: 2 - WAL File: 000000020000000000000007)
+Name: cluster-example
+Namespace: default
+System ID: 7291389121501601807
+PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3
+Primary instance: cluster-example-2
+Primary start time: 2023-10-18 19:57:07 +0000 UTC (uptime 1m14s)
+Status: Cluster in healthy state
+Instances: 3
+Ready instances: 3
+Current Write LSN: 0/7004D98 (Timeline: 2 - WAL File: 000000020000000000000007)
Certificates Status
Certificate Name Expiration Date Days Left Until Expiration
---------------- --------------- --------------------------
-cluster-example-ca 2023-10-26 16:09:09 +0000 UTC 89.99
-cluster-example-replication 2023-10-26 16:09:09 +0000 UTC 89.99
-cluster-example-server 2023-10-26 16:09:09 +0000 UTC 89.99
-
-Continuous Backup status
-Not configured
-
-Streaming Replication status
-Not available yet
-
-Unmanaged Replication Slot Status
-No unmanaged replication slots found
-
-Instances status
-Name Database Size Current LSN Replication role Status QoS Manager Version Node
----- ------------- ----------- ---------------- ------ --- --------------- ----
-cluster-example-2 29 MB 0/7001000 Primary OK BestEffort 1.20.2 k3d-k3s-default-server-0
-cluster-example-3 29 MB 0/70000A0 Standby (file based) OK BestEffort 1.20.2 k3d-k3s-default-server-0
-$ kubectl cnp status cluster-example
-Cluster Summary
-Name: cluster-example
-Namespace: default
-System ID: 7260903692491026447
-PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3
-Primary instance: cluster-example-2
-Status: Cluster in healthy state
-Instances: 3
-Ready instances: 3
-Current Write LSN: 0/7004D60 (Timeline: 2 - WAL File: 000000020000000000000007)
-
-Certificates Status
-Certificate Name Expiration Date Days Left Until Expiration
----------------- --------------- --------------------------
-cluster-example-ca 2023-10-26 16:09:09 +0000 UTC 89.99
-cluster-example-replication 2023-10-26 16:09:09 +0000 UTC 89.99
-cluster-example-server 2023-10-26 16:09:09 +0000 UTC 89.99
-
-Continuous Backup status
-Not configured
-
-Streaming Replication status
-Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority
----- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- -------------
-cluster-example-1 0/7004D60 0/7004D60 0/7004D60 0/7004D60 00:00:00 00:00:00 00:00:00 streaming async 0
-
-Unmanaged Replication Slot Status
-No unmanaged replication slots found
-
-Instances status
-Name Database Size Current LSN Replication role Status QoS Manager Version Node
----- ------------- ----------- ---------------- ------ --- --------------- ----
-cluster-example-2 29 MB 0/7004D60 Primary OK BestEffort 1.20.2 k3d-k3s-default-server-0
-cluster-example-1 29 MB 0/7004D60 Standby (async) OK BestEffort 1.20.2 k3d-k3s-default-server-0
-cluster-example-3 29 MB 0/70000A0 Standby (file based) OK BestEffort 1.20.2 k3d-k3s-default-server-0
-$ kubectl cnp status cluster-example
-Cluster Summary
-Name: cluster-example
-Namespace: default
-System ID: 7260903692491026447
-PostgreSQL Image: quay.io/enterprisedb/postgresql:15.3
-Primary instance: cluster-example-2
-Status: Cluster in healthy state
-Instances: 3
-Ready instances: 3
-Current Write LSN: 0/7004D98 (Timeline: 2 - WAL File: 000000020000000000000007)
-
-Certificates Status
-Certificate Name Expiration Date Days Left Until Expiration
----------------- --------------- --------------------------
-cluster-example-ca 2023-10-26 16:09:09 +0000 UTC 89.99
-cluster-example-replication 2023-10-26 16:09:09 +0000 UTC 89.99
-cluster-example-server 2023-10-26 16:09:09 +0000 UTC 89.99
+cluster-example-ca 2024-01-16 19:48:06 +0000 UTC 89.99
+cluster-example-replication 2024-01-16 19:48:06 +0000 UTC 89.99
+cluster-example-server 2024-01-16 19:48:06 +0000 UTC 89.99
Continuous Backup status
Not configured
@@ -576,16 +511,17 @@ Streaming Replication status
Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority
---- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- -------------
cluster-example-1 0/7004D98 0/7004D98 0/7004D98 0/7004D98 00:00:00 00:00:00 00:00:00 streaming async 0
+cluster-example-3 0/7004D98 0/7004D98 0/7004D98 0/7004D98 00:00:00 00:00:00 00:00:00 streaming async 0
Unmanaged Replication Slot Status
No unmanaged replication slots found
Instances status
-Name Database Size Current LSN Replication role Status QoS Manager Version Node
----- ------------- ----------- ---------------- ------ --- --------------- ----
-cluster-example-2 29 MB 0/7004D98 Primary OK BestEffort 1.20.2 k3d-k3s-default-server-0
-cluster-example-1 29 MB 0/7004D98 Standby (async) OK BestEffort 1.20.2 k3d-k3s-default-server-0
-cluster-example-3 29 MB 0/70000A0 Standby (file based) OK BestEffort 1.20.2 k3d-k3s-default-server-0
+Name Database Size Current LSN Replication role Status QoS Manager Version Node
+---- ------------- ----------- ---------------- ------ --- --------------- ----
+cluster-example-2 29 MB 0/7004D98 Primary OK BestEffort 1.20.2 k3d-k3s-default-server-0
+cluster-example-1 29 MB 0/7004D98 Standby (async) OK BestEffort 1.20.2 k3d-k3s-default-server-0
+cluster-example-3 29 MB 0/7004D98 Standby (async) OK BestEffort 1.20.2 k3d-k3s-default-server-0
```
diff --git a/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx
index afd83edb238..e1982245b0a 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/kubectl-plugin.mdx
@@ -941,4 +941,4 @@ A specific `VolumeSnapshotClass` can be requested via the `-c` option:
```shell
kubectl cnp snapshot cluster-example -c longhorn
-```
\ No newline at end of file
+```
diff --git a/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx b/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx
index 8088b9e5369..321a8dd6b29 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/kubernetes_upgrade.mdx
@@ -125,4 +125,4 @@ A possible approach could be:
perform a switchover given that the current primary is running on a cordoned node.
4. Scale back down the cluster to a single instance, this will delete the old instance
5. The old primary's node can now be drained successfully, while leaving the new primary
- running on a new node.
\ No newline at end of file
+ running on a new node.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx b/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx
index af93f0754fa..100e3b6ecfb 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/labels_annotations.mdx
@@ -232,4 +232,4 @@ kubectl get pods --show-labels
Currently, EDB Postgres for Kubernetes does not automatically propagate labels or
annotations deletions. Therefore, when an annotation or label is removed from
a Cluster, which was previously propagated to the underlying pods, the operator
-will not automatically remove it on the associated resources.
\ No newline at end of file
+will not automatically remove it on the associated resources.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/license_keys.mdx b/product_docs/docs/postgres_for_kubernetes/1/license_keys.mdx
index 617c4e775f1..f8e700d28f0 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/license_keys.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/license_keys.mdx
@@ -130,4 +130,4 @@ attempt on the cluster, effectively stopping to manage its status. This also
includes any self-healing and high availability capabilities, such as automated
failover and switchovers.
-The pods and the data will still be available.
\ No newline at end of file
+The pods and the data will still be available.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/logging.mdx b/product_docs/docs/postgres_for_kubernetes/1/logging.mdx
index 2df5ae5d1ad..0a128ad3841 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/logging.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/logging.mdx
@@ -289,4 +289,4 @@ Therefore, all the possible `logger` values are the following ones:
Except for `postgres` and `edb_audit` that have the aforementioned structures,
all other possible values just have `msg` set to the escaped message that is
-logged.
\ No newline at end of file
+logged.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx
index 864ebae1d00..af8038b1368 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/monitoring.mdx
@@ -816,4 +816,4 @@ spec:
!!! Note
We currently don’t use `ServiceMonitor` because our service doesn’t define
a port pointing to the metrics. If we added a metric port this could expose
- sensitive data.
\ No newline at end of file
+ sensitive data.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/networking.mdx b/product_docs/docs/postgres_for_kubernetes/1/networking.mdx
index 98bee802a9d..0df3fb93072 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/networking.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/networking.mdx
@@ -49,4 +49,4 @@ While [bootstrapping](bootstrap.md) from another cluster or when using the `exte
ensure connectivity among all clusters, object stores, and namespaces involved.
Again, we refer you to the [Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/)
-for setup information.
\ No newline at end of file
+for setup information.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/appendixes/object_stores.mdx b/product_docs/docs/postgres_for_kubernetes/1/object_stores.mdx
similarity index 99%
rename from product_docs/docs/postgres_for_kubernetes/1/appendixes/object_stores.mdx
rename to product_docs/docs/postgres_for_kubernetes/1/object_stores.mdx
index 6f473de7c0d..f692a3fe6be 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/appendixes/object_stores.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/object_stores.mdx
@@ -3,7 +3,7 @@ title: 'Appendix A - Common object stores for backups'
originalFilePath: 'src/appendixes/object_stores.md'
---
-You can store the [backup](../backup.md) files in any service that is supported
+You can store the [backup](backup.md) files in any service that is supported
by the Barman Cloud infrastructure. That is:
- [Amazon S3](#aws-s3)
@@ -444,4 +444,4 @@ spec:
```
Verify on `s3://BUCKET_NAME/` the presence of archived WAL files before
-proceeding with a backup.
\ No newline at end of file
+proceeding with a backup.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/openshift.mdx b/product_docs/docs/postgres_for_kubernetes/1/openshift.mdx
index c376e907e02..096880532f0 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/openshift.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/openshift.mdx
@@ -1013,4 +1013,4 @@ there might be no alerts.
![Prometheus alerts](./images/openshift/alerts-openshift.png)
-Alert routing and notifications are beyond the scope of this guide.
\ No newline at end of file
+Alert routing and notifications are beyond the scope of this guide.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx b/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx
index 84a1208849c..4e7975f6b43 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/operator_capability_levels.mdx
@@ -636,4 +636,4 @@ the server and restarting it as a standby.
### Automated recreation of a standby
In case the pod hosting a standby has been removed, the operator initiates
-the procedure to recreate a standby server.
\ No newline at end of file
+the procedure to recreate a standby server.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/operator_conf.mdx b/product_docs/docs/postgres_for_kubernetes/1/operator_conf.mdx
index 8097d31b22b..6a5e06b4845 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/operator_conf.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/operator_conf.mdx
@@ -179,4 +179,4 @@ Once inside execute:
```shell
curl localhost:6060/debug/pprof/
-```
\ No newline at end of file
+```
diff --git a/product_docs/docs/postgres_for_kubernetes/1/postgis.mdx b/product_docs/docs/postgres_for_kubernetes/1/postgis.mdx
index 9cdc8f7ff83..b3839ecbacd 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/postgis.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/postgis.mdx
@@ -152,4 +152,4 @@ app=# SELECT postgis_full_version();
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
POSTGIS="3.2.2 628da50" [EXTENSION] PGSQL="140" GEOS="3.9.0-CAPI-1.16.2" PROJ="7.2.1" LIBXML="2.9.10" LIBJSON="0.15" LIBPROTOBUF="1.3.3" WAGYU="0.5.0 (Internal)" TOPOLOGY
(1 row)
-```
\ No newline at end of file
+```
diff --git a/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx b/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx
index dc2f708cdbb..2afc1f8d7ee 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/postgresql_conf.mdx
@@ -574,4 +574,4 @@ Users are not allowed to set the following configuration parameters in the
- `unix_socket_group`
- `unix_socket_permissions`
- `wal_level`
-- `wal_log_hints`
\ No newline at end of file
+- `wal_log_hints`
diff --git a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx
index 5efb3e3c3a3..54c1e31aa74 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/quickstart.mdx
@@ -375,4 +375,4 @@ see the `EDB Postgres for Kubernetes` dashboard.
![local grafana](images/grafana-local.png)
Note that in our example setup, both Prometheus and Grafana will pick up
-any other EDB Postgres for Kubernetes clusters deployed with Monitoring activated.
\ No newline at end of file
+any other EDB Postgres for Kubernetes clusters deployed with Monitoring activated.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx b/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx
index 79edae0bde6..2f7b156f2f0 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/recovery.mdx
@@ -612,4 +612,4 @@ in the storage buckets could be overwritten by the new cluster.
the recovered cluster, you can skip the above check. This is not recommended
as for the general use case the above check works fine. Please don't do
this unless you are familiar with PostgreSQL recovery system, as this can lead
- you to severe data loss.
\ No newline at end of file
+ you to severe data loss.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx b/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx
index 1372ffb7b86..2373f4f4cf0 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/replica_cluster.mdx
@@ -231,4 +231,4 @@ kubectl cnp -n status cluster-replica-example
Disabling replication is an **irreversible** operation: once replication is
disabled and the **designated primary** is promoted to **primary**, the
replica cluster and the source cluster will become two independent clusters
- definitively.
\ No newline at end of file
+ definitively.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/replication.mdx b/product_docs/docs/postgres_for_kubernetes/1/replication.mdx
index c1e606d617f..50170e760e7 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/replication.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/replication.mdx
@@ -329,4 +329,4 @@ when replication slots support is enabled. For example:
parameters:
max_slot_wal_keep_size: "10GB"
# ...
-```
\ No newline at end of file
+```
diff --git a/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx b/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx
index 704ef4eb44c..aa354aede0e 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/resource_management.mdx
@@ -97,4 +97,4 @@ section in the PostgreSQL documentation.
!!! Seealso "Managing Compute Resources for Containers"
For more details on resource management, please refer to the
["Managing Compute Resources for Containers"](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/)
- page from the Kubernetes documentation.
\ No newline at end of file
+ page from the Kubernetes documentation.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx b/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx
index c6f79ab0085..e68d22cae34 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/rolling_update.mdx
@@ -93,4 +93,4 @@ You can trigger a restart with:
kubectl cnp restart [cluster] [current_primary]
```
-You can find more information in the [`cnp` plugin page](kubectl-plugin.md).
\ No newline at end of file
+You can find more information in the [`cnp` plugin page](kubectl-plugin.md).
diff --git a/product_docs/docs/postgres_for_kubernetes/1/samples.mdx b/product_docs/docs/postgres_for_kubernetes/1/samples.mdx
index 7add2d6e076..e993654f699 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/samples.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/samples.mdx
@@ -99,4 +99,4 @@ Cluster with TDE enabled
an EPAS 15 cluster with TDE. Note that you will need access credentials
to download the image used.
-For a list of available options, please refer to the ["API Reference" page](cloudnative-pg.v1.md).
\ No newline at end of file
+For a list of available options, please refer to the ["API Reference" page](cloudnative-pg.v1.md).
diff --git a/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx b/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx
index 84cbe9d8874..fe164b17126 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/scheduling.mdx
@@ -147,4 +147,4 @@ for tolerations.
!!! Seealso "Taints and Tolerations"
More information on taints and tolerations can be found in the
- [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
\ No newline at end of file
+ [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
diff --git a/product_docs/docs/postgres_for_kubernetes/1/security.mdx b/product_docs/docs/postgres_for_kubernetes/1/security.mdx
index 1c2478ba653..fe00f55b4d4 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/security.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/security.mdx
@@ -376,4 +376,4 @@ For further detail on how `pg_hba.conf` is managed by the operator, see the
EDB Postgres for Kubernetes delegates encryption at rest to the underlying storage class. For
data protection in production environments, we highly recommend that you choose
-a storage class that supports encryption at rest.
\ No newline at end of file
+a storage class that supports encryption at rest.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx b/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx
index 8d7bc013078..e437cc8950d 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/ssl_connections.mdx
@@ -169,4 +169,4 @@ Output :
PostgreSQL 16.0 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat
8.3.1-5), 64-bit
(1 row)
-```
\ No newline at end of file
+```
diff --git a/product_docs/docs/postgres_for_kubernetes/1/storage.mdx b/product_docs/docs/postgres_for_kubernetes/1/storage.mdx
index 1ab33c76d11..eec486ff6a0 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/storage.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/storage.mdx
@@ -454,4 +454,4 @@ through the number of instances and the persistent volumes that are attached to
In these cases it makes sense to define the storage class used by the Postgres clusters
to be defined as 1 replica. By having additional replicas defined in the storage solution like
Longhorn and Ceph you might incur in the issue known as write amplification, unnecessarily
-increasing disk I/O and space used.
\ No newline at end of file
+increasing disk I/O and space used.
diff --git a/product_docs/docs/postgres_for_kubernetes/1/tde.mdx b/product_docs/docs/postgres_for_kubernetes/1/tde.mdx
index e23e7f943d5..22c2f28d9b1 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/tde.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/tde.mdx
@@ -157,4 +157,4 @@ The custom commands need to obey the following conventions:
For example:
- wrap command: `openssl enc -aes-128-cbc -pass pass:temp-pass -e -out %p`
-- unwrap command: `openssl enc -aes-128-cbc -pass pass:temp-pass -d -in %p`
\ No newline at end of file
+- unwrap command: `openssl enc -aes-128-cbc -pass pass:temp-pass -d -in %p`
diff --git a/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx b/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx
index 7709366978a..06bf2d3f0e9 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/troubleshooting.mdx
@@ -747,4 +747,4 @@ For example:
Please remember that you must have enough hugepages memory available to schedule
every Pod in the Cluster (in the example above, at least 512MiB per Pod must be
-free).
\ No newline at end of file
+free).
diff --git a/product_docs/docs/postgres_for_kubernetes/1/use_cases.mdx b/product_docs/docs/postgres_for_kubernetes/1/use_cases.mdx
index 52a8617cdf6..3b20172c3e4 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/use_cases.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/use_cases.mdx
@@ -46,4 +46,4 @@ resource in Kubernetes.
The application can still benefit from a TLS connection to PostgreSQL.
-![Application outside Kubernetes](./images/apps-outside-k8s.png)
\ No newline at end of file
+![Application outside Kubernetes](./images/apps-outside-k8s.png)
diff --git a/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx b/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx
index 92feb23166c..1900d9bec12 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/wal_archiving.mdx
@@ -76,4 +76,4 @@ WALs, including the one requested by PostgreSQL.
When PostgreSQL will request the archiving of a WAL that has
already been archived by the instance manager as an optimization,
-that archival request will be just dismissed with a positive status.
\ No newline at end of file
+that archival request will be just dismissed with a positive status.
diff --git a/scripts/fileProcessor/processors/cnp/flatten-appendices.mjs b/scripts/fileProcessor/processors/cnp/flatten-appendices.mjs
new file mode 100644
index 00000000000..9eaed2e81ad
--- /dev/null
+++ b/scripts/fileProcessor/processors/cnp/flatten-appendices.mjs
@@ -0,0 +1,8 @@
+export const process = (filename, content) => {
+ if (filename.includes("/appendixes/"))
+ return {
+ newFilename: filename.replace(/\/appendixes\//, "/"),
+ newContent: content,
+ };
+ return {newFilename: filename, newContent: content};
+};
diff --git a/scripts/fileProcessor/processors/cnp/update-links.mjs b/scripts/fileProcessor/processors/cnp/update-links.mjs
index bb26ae5161a..36ef8e0336c 100644
--- a/scripts/fileProcessor/processors/cnp/update-links.mjs
+++ b/scripts/fileProcessor/processors/cnp/update-links.mjs
@@ -6,7 +6,7 @@ import remarkFrontmatter from "remark-frontmatter";
import remarkStringify from "remark-stringify";
import admonitions from "remark-admonitions";
import visit from "unist-util-visit";
-import isAbsoluteUrl from "is-absolute-url";
+import yaml from "js-yaml";
export const process = async (filename, content) => {
const processor = unified()
@@ -38,13 +38,30 @@ export const process = async (filename, content) => {
function linkRewriter() {
return (tree) => {
+ let fileMetadata = {};
// link rewriter:
// - update links to supported_releases.md to point to /resources/platform-compatibility#pgk8s
- visit(tree, "link", (node) => {
- if (node.url === "supported_releases.md")
+ // - update links to release_notes to rel_notes
+ // - update links to appendixes/* to /*
+ // - update links *from* appendixes/* to /*
+ visit(tree, ["link", "yaml"], (node) => {
+ if (node.type === "yaml")
+ {
+ fileMetadata = yaml.load(node.value);
+ return;
+ }
+
+ if (fileMetadata.originalFilePath?.startsWith("src/appendixes"))
+ node.url = node.url.replace(/^\.\.\//, "");
+
+ if (node.url.startsWith("appendixes"))
+ node.url = node.url.replace("appendixes/", "");
+ else if (node.url === "supported_releases.md")
node.url = "/resources/platform-compatibility#pgk8s";
else if (node.url === "release_notes.md")
node.url = "rel_notes";
+ else if (node.url === "release_notes.md")
+ node.url = "rel_notes";
});
};
}
diff --git a/scripts/source/process-cnp-docs.sh b/scripts/source/process-cnp-docs.sh
index 77bd254a921..02403e0407b 100755
--- a/scripts/source/process-cnp-docs.sh
+++ b/scripts/source/process-cnp-docs.sh
@@ -29,12 +29,13 @@ node $DESTINATION_CHECKOUT/scripts/fileProcessor/main.mjs \
node $DESTINATION_CHECKOUT/scripts/fileProcessor/main.mjs \
-f "src/**/*.md" \
+ -p "cnp/add-frontmatters" \
+ -p "cnp/flatten-appendices" \
-p "cnp/replace-github-urls" \
-p "cnp/update-links" \
-p "cnp/update-yaml-links" \
-p "cnp/rewrite-mdextra-anchors" \
-p "cnp/strip-html-comments" \
- -p "cnp/add-frontmatters" \
-p "cnp/rename-to-mdx"
node $DESTINATION_CHECKOUT/scripts/source/merge-indexes.mjs \
diff --git a/src/components/link.js b/src/components/link.js
index c1d071d7270..239e9c479c3 100644
--- a/src/components/link.js
+++ b/src/components/link.js
@@ -21,7 +21,7 @@ const stripPathPrefix = (path, pathPrefix) => {
};
const stripMarkdownExtension = (path) => {
- return path.replace(/\.mdx?(?=$|\?|#)/, "");
+ return path.replace(/\.mdx?\/?(?=$|\?|#)/, "");
};
const isAbsoluteOrProtocolRelativeUrl = (url) => {
From 1ff245e699802ad69ad6aa56b4160194446dce5e Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Wed, 18 Oct 2023 21:07:22 +0000
Subject: [PATCH 7/8] do redirect properly, fix index navigation
---
.../docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx | 2 +-
product_docs/docs/postgres_for_kubernetes/1/index.mdx | 9 +++++++--
2 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx b/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx
index c9cb12e4e9d..5adfd4f53a5 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/cloudnative-pg.v1.mdx
@@ -2,7 +2,7 @@
title: 'API Reference'
originalFilePath: 'src/cloudnative-pg.v1.md'
redirects:
-- api_reference
+- ../api_reference
---
Package v1 contains API Schema definitions for the postgresql v1 API group
diff --git a/product_docs/docs/postgres_for_kubernetes/1/index.mdx b/product_docs/docs/postgres_for_kubernetes/1/index.mdx
index 3345db5e28a..25d53231219 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/index.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/index.mdx
@@ -30,7 +30,11 @@ navigation:
- failure_modes
- rolling_update
- replication
- - backup_recovery
+ - backup
+ - recovery
+ - backup_volumesnapshot
+ - backup_barmanobjectstore
+ - wal_archiving
- declarative_role_management
- storage
- labels_annotations
@@ -62,7 +66,8 @@ navigation:
- operator_capability_levels
- faq
- troubleshooting
- - api_reference
+ - cloudnative-pg.v1
+ - backup_recovery
- '#Appendix'
- object_stores
From e0c462f5e4223a1927a910ee33c473de3a08a3be Mon Sep 17 00:00:00 2001
From: Josh Heyer
Date: Wed, 18 Oct 2023 21:19:02 +0000
Subject: [PATCH 8/8] Add links to table as well
---
.../docs/postgres_for_kubernetes/1/rel_notes/index.mdx | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx
index 47ee0ff15ff..8771282f438 100644
--- a/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx
+++ b/product_docs/docs/postgres_for_kubernetes/1/rel_notes/index.mdx
@@ -67,14 +67,18 @@ The EDB Postgres for Kubernetes documentation describes the major version of EDB
| Version | Release date | Upstream merges |
| -------------------------- | ------------ | ------------------------------------------------------------------------------------------- |
+| [1.21.0](1_21_0_rel_notes) | 18 Oct 2023 | Upstream [1.21.0](https://cloudnative-pg.io/documentation/1.21/release_notes/v1.21/) |
+| [1.20.3](1_20_3_rel_notes) | 18 Oct 2023 | Upstream [1.20.3](https://cloudnative-pg.io/documentation/1.20/release_notes/v1.20/) |
| [1.20.2](1_20_2_rel_notes) | 27 Jul 2023 | Upstream [1.20.2](https://cloudnative-pg.io/documentation/1.20/release_notes/v1.20/) |
| [1.20.1](1_20_1_rel_notes) | 13 Jun 2023 | Upstream [1.20.1](https://cloudnative-pg.io/documentation/1.20/release_notes/v1.20/) |
| [1.20.0](1_20_0_rel_notes) | 27 Apr 2023 | Upstream [1.20.0](https://cloudnative-pg.io/documentation/1.20/release_notes/v1.20/) |
+| [1.19.5](1_19_5_rel_notes) | 18 Oct 2023 | Upstream [1.19.5](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
| [1.19.4](1_19_4_rel_notes) | 27 Jul 2023 | Upstream [1.19.4](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
| [1.19.3](1_19_3_rel_notes) | 13 Jun 2023 | Upstream [1.19.3](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
| [1.19.2](1_19_2_rel_notes) | 27 Apr 2023 | Upstream [1.19.2](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
| [1.19.1](1_19_1_rel_notes) | 20 Mar 2023 | Upstream [1.19.1](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
| [1.19.0](1_19_0_rel_notes) | 14 Feb 2023 | Upstream [1.19.0](https://cloudnative-pg.io/documentation/1.19/release_notes/v1.19/) |
+| [1.18.7](1_18_7_rel_notes) | 18 Oct 2023 | None |
| [1.18.6](1_18_6_rel_notes) | 27 Jul 2023 | None |
| [1.18.5](1_18_5_rel_notes) | 13 Jun 2023 | Upstream [1.18.5](https://cloudnative-pg.io/documentation/1.18/release_notes/v1.18/) |
| [1.18.4](1_18_4_rel_notes) | 27 Apr 2023 | Upstream [1.18.4](https://cloudnative-pg.io/documentation/1.18/release_notes/v1.18/) |