Skip to content

Commit

Permalink
refactor: use getters for main fields in postgres.Instance (cloudnati…
Browse files Browse the repository at this point in the history
…ve-pg#5685)

Closes: cloudnative-pg#5544

Signed-off-by: Jaime Silvela <[email protected]>
Signed-off-by: Gabriele Quaresima <[email protected]>
Signed-off-by: Armando Ruocco <[email protected]>
Co-authored-by: Gabriele Quaresima <[email protected]>
Co-authored-by: Armando Ruocco <[email protected]>
  • Loading branch information
3 people authored Oct 7, 2024
1 parent cd5a706 commit 90bc4ff
Show file tree
Hide file tree
Showing 22 changed files with 127 additions and 102 deletions.
13 changes: 6 additions & 7 deletions internal/cmd/manager/instance/join/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,13 @@ func NewCmd() *cobra.Command {
},
RunE: func(cmd *cobra.Command, _ []string) error {
ctx := cmd.Context()
instance := postgres.NewInstance()

// The following are needed to correctly
// The fields in the instance are needed to correctly
// download the secret containing the TLS
// certificates
instance.Namespace = namespace
instance.PodName = podName
instance.ClusterName = clusterName
instance := postgres.NewInstance().
WithNamespace(namespace).
WithPodName(podName).
WithClusterName(clusterName)

info := postgres.InitInfo{
PgData: pgData,
Expand Down Expand Up @@ -112,7 +111,7 @@ func joinSubCommand(ctx context.Context, instance *postgres.Instance, info postg
// Download the cluster definition from the API server
var cluster apiv1.Cluster
if err := reconciler.GetClient().Get(ctx,
ctrl.ObjectKey{Namespace: instance.Namespace, Name: instance.ClusterName},
ctrl.ObjectKey{Namespace: instance.GetNamespaceName(), Name: instance.GetClusterName()},
&cluster,
); err != nil {
log.Error(err, "Error while getting cluster")
Expand Down
14 changes: 7 additions & 7 deletions internal/cmd/manager/instance/run/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,12 +88,12 @@ func NewCmd() *cobra.Command {
},
RunE: func(cmd *cobra.Command, _ []string) error {
ctx := log.IntoContext(cmd.Context(), log.GetLogger())
instance := postgres.NewInstance()
instance := postgres.NewInstance().
WithPodName(podName).
WithClusterName(clusterName).
WithNamespace(namespace)

instance.PgData = pgData
instance.Namespace = namespace
instance.PodName = podName
instance.ClusterName = clusterName
instance.StatusPortTLS = statusPortTLS
instance.MetricsPortTLS = metricsPortTLS

Expand Down Expand Up @@ -152,14 +152,14 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error {
Cache: cache.Options{
ByObject: map[client.Object]cache.ByObject{
&apiv1.Cluster{}: {
Field: fields.OneTermEqualSelector("metadata.name", instance.ClusterName),
Field: fields.OneTermEqualSelector("metadata.name", instance.GetClusterName()),
Namespaces: map[string]cache.Config{
instance.Namespace: {},
instance.GetNamespaceName(): {},
},
},
&apiv1.Database{}: {
Namespaces: map[string]cache.Config{
instance.Namespace: {},
instance.GetNamespaceName(): {},
},
},
},
Expand Down
2 changes: 1 addition & 1 deletion internal/management/controller/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ func (r *InstanceReconciler) updateCacheFromCluster(ctx context.Context, cluster
}

func (r *InstanceReconciler) updateWALRestoreSettingsCache(ctx context.Context, cluster *apiv1.Cluster) {
_, env, barmanConfiguration, err := walrestore.GetRecoverConfiguration(cluster, r.instance.PodName)
_, env, barmanConfiguration, err := walrestore.GetRecoverConfiguration(cluster, r.instance.GetPodName())
if errors.Is(err, walrestore.ErrNoBackupConfigured) {
cache.Delete(cache.WALRestoreKey)
return
Expand Down
9 changes: 4 additions & 5 deletions internal/management/controller/database_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,11 +86,10 @@ var _ = Describe("Managed Database status", func() {
db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
Expect(err).ToNot(HaveOccurred())

pgInstance := &postgres.Instance{
Namespace: "default",
PodName: "cluster-example-1",
ClusterName: "cluster-example",
}
pgInstance := postgres.NewInstance().
WithNamespace("default").
WithPodName("cluster-example-1").
WithClusterName("cluster-example")

f := fakeInstanceData{
Instance: pgInstance,
Expand Down
4 changes: 2 additions & 2 deletions internal/management/controller/externalservers/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ func (r *Reconciler) getCluster(ctx context.Context) (*apiv1.Cluster, error) {
var cluster apiv1.Cluster
err := r.client.Get(ctx,
types.NamespacedName{
Namespace: r.instance.Namespace,
Name: r.instance.ClusterName,
Namespace: r.instance.GetNamespaceName(),
Name: r.instance.GetClusterName(),
},
&cluster)
if err != nil {
Expand Down
48 changes: 27 additions & 21 deletions internal/management/controller/instance_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,14 +239,14 @@ func (r *InstanceReconciler) Reconcile(

if result, err := reconciler.ReconcileReplicationSlots(
ctx,
r.instance.PodName,
r.instance.GetPodName(),
infrastructure.NewPostgresManager(r.instance.ConnectionPool()),
cluster,
); err != nil || !result.IsZero() {
return result, err
}

if r.instance.PodName == cluster.Status.CurrentPrimary {
if r.instance.GetPodName() == cluster.Status.CurrentPrimary {
result, err := roles.Reconcile(ctx, r.instance, cluster, r.client)
if err != nil || !result.IsZero() {
return result, err
Expand Down Expand Up @@ -296,7 +296,7 @@ func (r *InstanceReconciler) Reconcile(
}

func (r *InstanceReconciler) configureSlotReplicator(cluster *apiv1.Cluster) {
switch r.instance.PodName {
switch r.instance.GetPodName() {
case cluster.Status.CurrentPrimary, cluster.Status.TargetPrimary:
r.instance.ConfigureSlotReplicator(nil)
default:
Expand All @@ -308,7 +308,7 @@ func (r *InstanceReconciler) restartPrimaryInplaceIfRequested(
ctx context.Context,
cluster *apiv1.Cluster,
) (bool, error) {
isPrimary := cluster.Status.CurrentPrimary == r.instance.PodName
isPrimary := cluster.Status.CurrentPrimary == r.instance.GetPodName()
restartRequested := isPrimary && cluster.Status.Phase == apiv1.PhaseInplacePrimaryRestart
if restartRequested {
if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary {
Expand Down Expand Up @@ -366,7 +366,7 @@ func (r *InstanceReconciler) refreshConfigurationFiles(
}

func (r *InstanceReconciler) reconcileFencing(ctx context.Context, cluster *apiv1.Cluster) *reconcile.Result {
fencingRequired := cluster.IsInstanceFenced(r.instance.PodName)
fencingRequired := cluster.IsInstanceFenced(r.instance.GetPodName())
isFenced := r.instance.IsFenced()
switch {
case !isFenced && fencingRequired:
Expand Down Expand Up @@ -411,7 +411,7 @@ func (r *InstanceReconciler) initialize(ctx context.Context, cluster *apiv1.Clus
return err
}

r.instance.SetFencing(cluster.IsInstanceFenced(r.instance.PodName))
r.instance.SetFencing(cluster.IsInstanceFenced(r.instance.GetPodName()))

return nil
}
Expand All @@ -428,7 +428,8 @@ func (r *InstanceReconciler) verifyParametersForFollower(cluster *apiv1.Cluster)

// we use a file as a flag to ensure the pod has been restarted already. I.e. on
// newly created pod we don't need to check the enforced parameters
filename := path.Join(r.instance.PgData, fmt.Sprintf("%s-%s", constants.Startup, r.instance.PodName))
filename := path.Join(r.instance.PgData, fmt.Sprintf("%s-%s",
constants.Startup, r.instance.GetPodName()))
exists, err := fileutils.FileExists(filename)
if err != nil {
return err
Expand Down Expand Up @@ -482,7 +483,7 @@ func (r *InstanceReconciler) reconcileOldPrimary(
) (restarted bool, err error) {
contextLogger := log.FromContext(ctx)

if cluster.Status.TargetPrimary == r.instance.PodName {
if cluster.Status.TargetPrimary == r.instance.GetPodName() {
return false, nil
}

Expand Down Expand Up @@ -744,7 +745,7 @@ func (r *InstanceReconciler) reconcileClusterRoleWithoutDB(
return false, err
}
// Reconcile replica role
if cluster.Status.TargetPrimary != r.instance.PodName {
if cluster.Status.TargetPrimary != r.instance.GetPodName() {
if !isPrimary {
// We need to ensure that this instance is replicating from the correct server
return r.instance.RefreshReplicaConfiguration(ctx, cluster, r.client)
Expand All @@ -767,7 +768,7 @@ func (r *InstanceReconciler) reconcileMetrics(
exporter := r.metricsServerExporter
// We should never reset the SwitchoverRequired metrics as it needs the primary instance restarts,
// however, if the cluster is healthy we make sure it is set to 0.
if cluster.Status.CurrentPrimary == r.instance.PodName {
if cluster.Status.CurrentPrimary == r.instance.GetPodName() {
if cluster.Status.Phase == apiv1.PhaseWaitingForUser {
exporter.Metrics.SwitchoverRequired.Set(1)
} else {
Expand Down Expand Up @@ -814,7 +815,7 @@ func (r *InstanceReconciler) reconcileMonitoringQueries(
var configMap corev1.ConfigMap
err := r.GetClient().Get(
ctx,
client.ObjectKey{Namespace: r.instance.Namespace, Name: reference.Name},
client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: reference.Name},
&configMap)
if err != nil {
contextLogger.Warning("Unable to get configMap containing custom monitoring queries",
Expand All @@ -841,7 +842,12 @@ func (r *InstanceReconciler) reconcileMonitoringQueries(

for _, reference := range cluster.Spec.Monitoring.CustomQueriesSecret {
var secret corev1.Secret
err := r.GetClient().Get(ctx, client.ObjectKey{Namespace: r.instance.Namespace, Name: reference.Name}, &secret)
err := r.GetClient().Get(ctx,
client.ObjectKey{
Namespace: r.instance.GetNamespaceName(),
Name: reference.Name,
},
&secret)
if err != nil {
contextLogger.Warning("Unable to get secret containing custom monitoring queries",
"reference", reference,
Expand Down Expand Up @@ -1177,7 +1183,7 @@ func (r *InstanceReconciler) refreshFileFromSecret(
func (r *InstanceReconciler) reconcilePrimary(ctx context.Context, cluster *apiv1.Cluster) error {
contextLogger := log.FromContext(ctx)

if cluster.Status.TargetPrimary != r.instance.PodName || cluster.IsReplica() {
if cluster.Status.TargetPrimary != r.instance.GetPodName() || cluster.IsReplica() {
return nil
}

Expand Down Expand Up @@ -1206,8 +1212,8 @@ func (r *InstanceReconciler) reconcilePrimary(ctx context.Context, cluster *apiv
}

// if the currentPrimary doesn't match the PodName we set the correct value.
if cluster.Status.CurrentPrimary != r.instance.PodName {
cluster.Status.CurrentPrimary = r.instance.PodName
if cluster.Status.CurrentPrimary != r.instance.GetPodName() {
cluster.Status.CurrentPrimary = r.instance.GetPodName()
cluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp()

if err := r.client.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster)); err != nil {
Expand Down Expand Up @@ -1238,7 +1244,7 @@ func (r *InstanceReconciler) reconcilePrimary(ctx context.Context, cluster *apiv
func (r *InstanceReconciler) handlePromotion(ctx context.Context, cluster *apiv1.Cluster) error {
contextLogger := log.FromContext(ctx)
contextLogger.Info("I'm the target primary, wait for the wal_receiver to be terminated")
if r.instance.PodName != cluster.Status.CurrentPrimary {
if r.instance.GetPodName() != cluster.Status.CurrentPrimary {
// if the cluster is not replicating it means it's doing a failover and
// we have to wait for wal receivers to be down
err := r.waitForWalReceiverDown()
Expand All @@ -1262,7 +1268,7 @@ func (r *InstanceReconciler) reconcileDesignatedPrimary(
cluster *apiv1.Cluster,
) (changed bool, err error) {
// If I'm already the current designated primary everything is ok.
if cluster.Status.CurrentPrimary == r.instance.PodName && !r.instance.RequiresDesignatedPrimaryTransition {
if cluster.Status.CurrentPrimary == r.instance.GetPodName() && !r.instance.RequiresDesignatedPrimaryTransition {
return false, nil
}

Expand All @@ -1276,7 +1282,7 @@ func (r *InstanceReconciler) reconcileDesignatedPrimary(
log.FromContext(ctx).Info("Setting myself as the current designated primary")

oldCluster := cluster.DeepCopy()
cluster.Status.CurrentPrimary = r.instance.PodName
cluster.Status.CurrentPrimary = r.instance.GetPodName()
cluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp()
if r.instance.RequiresDesignatedPrimaryTransition {
externalcluster.SetDesignatedPrimaryTransitionCompleted(cluster)
Expand Down Expand Up @@ -1350,7 +1356,7 @@ func (r *InstanceReconciler) reconcileUser(ctx context.Context, username string,
var secret corev1.Secret
err := r.GetClient().Get(
ctx,
client.ObjectKey{Namespace: r.instance.Namespace, Name: secretName},
client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: secretName},
&secret)
if err != nil {
if apierrors.IsNotFound(err) {
Expand Down Expand Up @@ -1393,7 +1399,7 @@ func (r *InstanceReconciler) refreshPGHBA(ctx context.Context, cluster *apiv1.Cl
err := r.GetClient().Get(ctx,
types.NamespacedName{
Name: ldapSecretName,
Namespace: r.instance.Namespace,
Namespace: r.instance.GetNamespaceName(),
}, &ldapBindPasswordSecret)
if err != nil {
return false, err
Expand Down Expand Up @@ -1454,7 +1460,7 @@ func (r *InstanceReconciler) dropStaleReplicationConnections(
return ctrl.Result{}, nil
}

if cluster.Status.CurrentPrimary == r.instance.PodName {
if cluster.Status.CurrentPrimary == r.instance.GetPodName() {
return ctrl.Result{}, nil
}

Expand Down
20 changes: 10 additions & 10 deletions internal/management/controller/instance_startup.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func (r *InstanceReconciler) refreshServerCertificateFiles(ctx context.Context,
func() error {
err := r.GetClient().Get(
ctx,
client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ServerTLSSecret},
client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ServerTLSSecret},
&secret)
if err != nil {
contextLogger.Info("Error accessing server TLS Certificate. Retrying with exponential backoff.",
Expand Down Expand Up @@ -86,7 +86,7 @@ func (r *InstanceReconciler) refreshReplicationUserCertificate(
var secret corev1.Secret
err := r.GetClient().Get(
ctx,
client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ReplicationTLSSecret},
client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ReplicationTLSSecret},
&secret)
if err != nil {
return false, err
Expand All @@ -105,7 +105,7 @@ func (r *InstanceReconciler) refreshClientCA(ctx context.Context, cluster *apiv1
var secret corev1.Secret
err := r.GetClient().Get(
ctx,
client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ClientCASecret},
client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ClientCASecret},
&secret)
if err != nil {
return false, err
Expand All @@ -120,7 +120,7 @@ func (r *InstanceReconciler) refreshServerCA(ctx context.Context, cluster *apiv1
var secret corev1.Secret
err := r.GetClient().Get(
ctx,
client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ServerCASecret},
client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ServerCASecret},
&secret)
if err != nil {
return false, err
Expand Down Expand Up @@ -148,7 +148,7 @@ func (r *InstanceReconciler) refreshBarmanEndpointCA(ctx context.Context, cluste
var secret corev1.Secret
err := r.GetClient().Get(
ctx,
client.ObjectKey{Namespace: r.instance.Namespace, Name: secretKeySelector.Name},
client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: secretKeySelector.Name},
&secret)
if err != nil {
return false, err
Expand Down Expand Up @@ -194,7 +194,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context
"of the cluster is resumed, demoting immediately")
return r.instance.Demote(ctx, cluster)

case targetPrimary == r.instance.PodName:
case targetPrimary == r.instance.GetPodName():
if currentPrimary == "" {
// This means that this cluster has been just started up and the
// current primary still need to be written
Expand All @@ -203,7 +203,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context
"targetPrimary", targetPrimary)

oldCluster := cluster.DeepCopy()
cluster.Status.CurrentPrimary = r.instance.PodName
cluster.Status.CurrentPrimary = r.instance.GetPodName()
cluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp()
return r.client.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster))
}
Expand Down Expand Up @@ -349,12 +349,12 @@ func (r *InstanceReconciler) ReconcileTablespaces(
mountPoint := specs.MountForTablespace(tbsName)
if tbsMount, err := fileutils.FileExists(mountPoint); err != nil {
contextLogger.Error(err, "while checking for mountpoint", "instance",
r.instance.PodName, "tablespace", tbsName)
r.instance.GetPodName(), "tablespace", tbsName)
return err
} else if !tbsMount {
contextLogger.Error(fmt.Errorf("mountpoint not found"),
"mountpoint for tablespaces is missing",
"instance", r.instance.PodName, "tablespace", tbsName)
"instance", r.instance.GetPodName(), "tablespace", tbsName)
continue
}

Expand All @@ -369,7 +369,7 @@ func (r *InstanceReconciler) ReconcileTablespaces(
if err != nil {
contextLogger.Error(err,
"could not create data dir in tablespace mount",
"instance", r.instance.PodName, "tablespace", tbsName)
"instance", r.instance.GetPodName(), "tablespace", tbsName)
return fmt.Errorf("while creating data dir in tablespace %s: %w", mountPoint, err)
}
}
Expand Down
6 changes: 3 additions & 3 deletions internal/management/controller/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,8 @@ func (r *InstanceReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, er
var cluster apiv1.Cluster
err := r.GetClient().Get(ctx,
types.NamespacedName{
Namespace: r.instance.Namespace,
Name: r.instance.ClusterName,
Namespace: r.instance.GetNamespaceName(),
Name: r.instance.GetClusterName(),
},
&cluster)
if err != nil {
Expand All @@ -102,7 +102,7 @@ func (r *InstanceReconciler) GetSecret(ctx context.Context, name string) (*corev
err := r.GetClient().Get(ctx,
types.NamespacedName{
Name: name,
Namespace: r.instance.Namespace,
Namespace: r.instance.GetNamespaceName(),
}, &secret)
if err != nil {
return nil, fmt.Errorf("while getting secret: %w", err)
Expand Down
Loading

0 comments on commit 90bc4ff

Please sign in to comment.