diff --git a/README.md b/README.md index 98d51147e..b548ff100 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@
-Quickstart | +Quickstart | Docs | Cluster API Book

⭐ Consider leaving a star — it motivates us a lot! ⭐

@@ -62,7 +62,7 @@ The best way to get started with CAPH is to spin up a cluster. For that you can Additional resources from the documentation: -- [**Cluster API Provider Hetzner 15 Minute Tutorial**](https://syself.com/docs/caph/getting-started/quickstart): Set up a bootstrap cluster using Kind and deploy a Kubernetes cluster on Hetzner. +- [**Cluster API Provider Hetzner 15 Minute Tutorial**](https://syself.com/docs/caph/getting-started/quickstart/prerequisites): Set up a bootstrap cluster using Kind and deploy a Kubernetes cluster on Hetzner. - [**Develop and test Kubernetes clusters with Tilt**](https://syself.com/docs/caph/developers/development-guide): Start using Tilt for rapid testing of various cluster flavors, like with/without a private network or bare metal. - [**Develop and test your own node-images**](https://syself.com/docs/caph/topics/node-image): Learn how to use your own machine images for production systems. diff --git a/controllers/hetznerbaremetalhost_controller.go b/controllers/hetznerbaremetalhost_controller.go index 3b08a72e5..420c76e5e 100644 --- a/controllers/hetznerbaremetalhost_controller.go +++ b/controllers/hetznerbaremetalhost_controller.go @@ -274,6 +274,7 @@ func (r *HetznerBareMetalHostReconciler) getSecrets( infrav1.CredentialsAvailableCondition, infrav1.OSSSHSecretMissingReason, clusterv1.ConditionSeverityError, + "%s", msg, ) record.Warnf(bmHost, infrav1.OSSSHSecretMissingReason, msg) diff --git a/controllers/hetznercluster_controller.go b/controllers/hetznercluster_controller.go index e7d09692e..4e33e055f 100644 --- a/controllers/hetznercluster_controller.go +++ b/controllers/hetznercluster_controller.go @@ -234,6 +234,7 @@ func (r *HetznerClusterReconciler) reconcileNormal(ctx context.Context, clusterS infrav1.TargetClusterSecretReadyCondition, infrav1.TargetSecretSyncFailedReason, clusterv1.ConditionSeverityError, + "%s", reterr.Error(), ) return reconcile.Result{}, reterr @@ -252,7 +253,7 @@ func processControlPlaneEndpoint(hetznerCluster *infrav1.HetznerCluster) { if hetznerCluster.Spec.ControlPlaneLoadBalancer.Enabled { if hetznerCluster.Status.ControlPlaneLoadBalancer.IPv4 != "" { defaultHost := hetznerCluster.Status.ControlPlaneLoadBalancer.IPv4 - defaultPort := int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port) + defaultPort := int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port) //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder. if hetznerCluster.Spec.ControlPlaneEndpoint == nil { hetznerCluster.Spec.ControlPlaneEndpoint = &clusterv1.APIEndpoint{ @@ -270,7 +271,7 @@ func processControlPlaneEndpoint(hetznerCluster *infrav1.HetznerCluster) { conditions.MarkTrue(hetznerCluster, infrav1.ControlPlaneEndpointSetCondition) hetznerCluster.Status.Ready = true } else { - msg := "enabled LoadBalancer but load balancer not ready yet" + const msg = "enabled LoadBalancer but load balancer not ready yet" conditions.MarkFalse(hetznerCluster, infrav1.ControlPlaneEndpointSetCondition, infrav1.ControlPlaneEndpointNotSetReason, @@ -283,7 +284,7 @@ func processControlPlaneEndpoint(hetznerCluster *infrav1.HetznerCluster) { conditions.MarkTrue(hetznerCluster, infrav1.ControlPlaneEndpointSetCondition) hetznerCluster.Status.Ready = true } else { - msg := "disabled LoadBalancer and not yet provided ControlPlane endpoint" + const msg = "disabled LoadBalancer and not yet provided ControlPlane endpoint" conditions.MarkFalse(hetznerCluster, infrav1.ControlPlaneEndpointSetCondition, infrav1.ControlPlaneEndpointNotSetReason, @@ -454,6 +455,7 @@ func hcloudTokenErrorResult( conditionType, infrav1.HCloudCredentialsInvalidReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) return reconcile.Result{}, fmt.Errorf("an unhandled failure occurred with the Hetzner secret: %w", err) @@ -575,6 +577,7 @@ func (r *HetznerClusterReconciler) reconcileTargetClusterManager(ctx context.Con infrav1.TargetClusterReadyCondition, infrav1.TargetClusterCreateFailedReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) diff --git a/controllers/hetznercluster_controller_test.go b/controllers/hetznercluster_controller_test.go index 79f1b7c98..4365ab068 100644 --- a/controllers/hetznercluster_controller_test.go +++ b/controllers/hetznercluster_controller_test.go @@ -1283,8 +1283,8 @@ func TestSetControlPlaneEndpoint(t *testing.T) { t.Fatalf("Wrong value for Host set. Got: %s, Want: %s", hetznerCluster.Spec.ControlPlaneEndpoint.Host, hetznerCluster.Status.ControlPlaneLoadBalancer.IPv4) } - if hetznerCluster.Spec.ControlPlaneEndpoint.Port != int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port) { - t.Fatalf("Wrong value for Port set. Got: %d, Want: %d", hetznerCluster.Spec.ControlPlaneEndpoint.Port, int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port)) + if hetznerCluster.Spec.ControlPlaneEndpoint.Port != int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port) { //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder. + t.Fatalf("Wrong value for Port set. Got: %d, Want: %d", hetznerCluster.Spec.ControlPlaneEndpoint.Port, int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port)) //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder. } if hetznerCluster.Status.Ready != true { @@ -1317,8 +1317,8 @@ func TestSetControlPlaneEndpoint(t *testing.T) { t.Fatalf("Wrong value for Host set. Got: %s, Want: %s", hetznerCluster.Spec.ControlPlaneEndpoint.Host, hetznerCluster.Status.ControlPlaneLoadBalancer.IPv4) } - if hetznerCluster.Spec.ControlPlaneEndpoint.Port != int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port) { - t.Fatalf("Wrong value for Port set. Got: %d, Want: %d", hetznerCluster.Spec.ControlPlaneEndpoint.Port, int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port)) + if hetznerCluster.Spec.ControlPlaneEndpoint.Port != int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port) { //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder. + t.Fatalf("Wrong value for Port set. Got: %d, Want: %d", hetznerCluster.Spec.ControlPlaneEndpoint.Port, int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port)) //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder. } if hetznerCluster.Status.Ready != true { @@ -1351,8 +1351,8 @@ func TestSetControlPlaneEndpoint(t *testing.T) { t.Fatalf("Wrong value for Host set. Got: %s, Want: 'xyz'", hetznerCluster.Spec.ControlPlaneEndpoint.Host) } - if hetznerCluster.Spec.ControlPlaneEndpoint.Port != int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port) { - t.Fatalf("Wrong value for Port set. Got: %d, Want: %d", hetznerCluster.Spec.ControlPlaneEndpoint.Port, int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port)) + if hetznerCluster.Spec.ControlPlaneEndpoint.Port != int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port) { //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder. + t.Fatalf("Wrong value for Port set. Got: %d, Want: %d", hetznerCluster.Spec.ControlPlaneEndpoint.Port, int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port)) //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder. } if hetznerCluster.Status.Ready != true { diff --git a/pkg/scope/cluster.go b/pkg/scope/cluster.go index 66748ec3a..42c22d54f 100644 --- a/pkg/scope/cluster.go +++ b/pkg/scope/cluster.go @@ -143,7 +143,7 @@ func (s *ClusterScope) SetStatusFailureDomain(regions []infrav1.Region) { // ControlPlaneAPIEndpointPort returns the Port of the Kube-api server. func (s *ClusterScope) ControlPlaneAPIEndpointPort() int32 { - return int32(s.HetznerCluster.Spec.ControlPlaneLoadBalancer.Port) + return int32(s.HetznerCluster.Spec.ControlPlaneLoadBalancer.Port) //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder. } // ClientConfig return a kubernetes client config for the cluster context. diff --git a/pkg/services/baremetal/baremetal/baremetal.go b/pkg/services/baremetal/baremetal/baremetal.go index ba619fa58..fd05bd115 100644 --- a/pkg/services/baremetal/baremetal/baremetal.go +++ b/pkg/services/baremetal/baremetal/baremetal.go @@ -215,7 +215,14 @@ func (s *Service) update(ctx context.Context) error { if readyCondition.Status == corev1.ConditionTrue { conditions.MarkTrue(s.scope.BareMetalMachine, infrav1.HostReadyCondition) } else if readyCondition.Status == corev1.ConditionFalse { - conditions.MarkFalse(s.scope.BareMetalMachine, infrav1.HostReadyCondition, readyCondition.Reason, readyCondition.Severity, readyCondition.Message) + conditions.MarkFalse( + s.scope.BareMetalMachine, + infrav1.HostReadyCondition, + readyCondition.Reason, + readyCondition.Severity, + "%s", + readyCondition.Message, + ) } } @@ -297,6 +304,7 @@ func (s *Service) associate(ctx context.Context) error { infrav1.HostAssociateSucceededCondition, infrav1.NoAvailableHostReason, clusterv1.ConditionSeverityWarning, + "%s", fmt.Sprintf("no available host (%s)", reason), ) return &scope.RequeueAfterError{RequeueAfter: requeueAfter} @@ -318,6 +326,7 @@ func (s *Service) associate(ctx context.Context) error { infrav1.HostAssociateSucceededCondition, infrav1.HostAssociateFailedReason, clusterv1.ConditionSeverityWarning, + "%s", reterr.Error(), ) return reterr diff --git a/pkg/services/baremetal/host/host.go b/pkg/services/baremetal/host/host.go index feed86e82..9a9187f71 100644 --- a/pkg/services/baremetal/host/host.go +++ b/pkg/services/baremetal/host/host.go @@ -182,6 +182,7 @@ func (s *Service) actionPreparing(_ context.Context) actionResult { infrav1.ProvisionSucceededCondition, infrav1.ServerNotFoundReason, clusterv1.ConditionSeverityError, + "%s", msg, ) record.Warnf(s.scope.HetznerBareMetalHost, infrav1.ServerNotFoundReason, msg) @@ -224,6 +225,7 @@ func (s *Service) actionPreparing(_ context.Context) actionResult { infrav1.ProvisionSucceededCondition, infrav1.RescueSystemUnavailableReason, clusterv1.ConditionSeverityError, + "%s", errMsg, ) record.Warnf(s.scope.HetznerBareMetalHost, "NoRescueSystemAvailable", errMsg) @@ -336,6 +338,7 @@ func (s *Service) ensureSSHKey(sshSecretRef infrav1.SSHSecretRef, sshSecret *cor infrav1.CredentialsAvailableCondition, infrav1.SSHKeyAlreadyExistsReason, clusterv1.ConditionSeverityError, + "%s", msg, ) record.Warnf(s.scope.HetznerBareMetalHost, infrav1.SSHKeyAlreadyExistsReason, msg) @@ -367,6 +370,7 @@ func (s *Service) handleIncompleteBoot(isRebootIntoRescue, isTimeout, isConnecti infrav1.ProvisionSucceededCondition, infrav1.SSHConnectionRefusedReason, clusterv1.ConditionSeverityError, + "%s", msg, ) record.Warnf(s.scope.HetznerBareMetalHost, "SSHConnectionError", msg) @@ -538,6 +542,7 @@ func (s *Service) handleErrorTypeHardwareRebootFailed(isSSHTimeoutError, wantsRe infrav1.ProvisionSucceededCondition, infrav1.RebootTimedOutReason, clusterv1.ConditionSeverityError, + "%s", msg, ) @@ -653,6 +658,7 @@ func (s *Service) actionRegistering(_ context.Context) actionResult { infrav1.RootDeviceHintsValidatedCondition, infrav1.ValidationFailedReason, clusterv1.ConditionSeverityError, + "%s", errMsg, ) return s.recordActionFailure(infrav1.RegistrationError, errMsg) @@ -665,6 +671,7 @@ func (s *Service) actionRegistering(_ context.Context) actionResult { infrav1.RootDeviceHintsValidatedCondition, infrav1.ValidationFailedReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) return s.recordActionFailure(infrav1.RegistrationError, err.Error()) @@ -691,6 +698,7 @@ func (s *Service) actionRegistering(_ context.Context) actionResult { infrav1.RootDeviceHintsValidatedCondition, infrav1.ValidationFailedReason, clusterv1.ConditionSeverityError, + "%s", msg, ) return s.recordActionFailure(infrav1.FatalError, msg) @@ -1112,6 +1120,7 @@ func (s *Service) actionImageInstallingStartBackgroundProcess(ctx context.Contex infrav1.ProvisionSucceededCondition, infrav1.CheckDiskFailedReason, clusterv1.ConditionSeverityError, + "%s", msg, ) record.Warn(s.scope.HetznerBareMetalHost, infrav1.CheckDiskFailedReason, msg) @@ -1143,6 +1152,7 @@ func (s *Service) actionImageInstallingStartBackgroundProcess(ctx context.Contex infrav1.ProvisionSucceededCondition, infrav1.WipeDiskFailedReason, clusterv1.ConditionSeverityError, + "%s", msg, ) record.Warn(s.scope.HetznerBareMetalHost, infrav1.WipeDiskFailedReason, msg) @@ -1157,6 +1167,7 @@ func (s *Service) actionImageInstallingStartBackgroundProcess(ctx context.Contex infrav1.ProvisionSucceededCondition, infrav1.WipeDiskFailedReason, clusterv1.ConditionSeverityWarning, + "%s", msg, ) record.Warn(s.scope.HetznerBareMetalHost, infrav1.WipeDiskFailedReason, msg) @@ -1184,6 +1195,7 @@ func (s *Service) actionImageInstallingStartBackgroundProcess(ctx context.Contex infrav1.ProvisionSucceededCondition, infrav1.LinuxOnOtherDiskFoundReason, clusterv1.ConditionSeverityError, + "%s", msg, ) record.Warn(s.scope.HetznerBareMetalHost, infrav1.LinuxOnOtherDiskFoundReason, msg) @@ -1200,6 +1212,7 @@ func (s *Service) actionImageInstallingStartBackgroundProcess(ctx context.Contex infrav1.ProvisionSucceededCondition, infrav1.SSHToRescueSystemFailedReason, clusterv1.ConditionSeverityInfo, + "%s", msg, ) record.Event(s.scope.HetznerBareMetalHost, infrav1.SSHToRescueSystemFailedReason, msg) @@ -1331,6 +1344,7 @@ func (s *Service) createAutoSetupInput(sshClient sshclient.Client) (autoSetupInp infrav1.ProvisionSucceededCondition, infrav1.ImageSpecInvalidReason, clusterv1.ConditionSeverityError, + "%s", errorMessage, ) return autoSetupInput{}, s.recordActionFailure(infrav1.ProvisioningError, errorMessage) @@ -1344,6 +1358,7 @@ func (s *Service) createAutoSetupInput(sshClient sshclient.Client) (autoSetupInp infrav1.ProvisionSucceededCondition, infrav1.ImageDownloadFailedReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) return autoSetupInput{}, actionError{err: err} @@ -1367,6 +1382,7 @@ func (s *Service) createAutoSetupInput(sshClient sshclient.Client) (autoSetupInp infrav1.ProvisionSucceededCondition, infrav1.NoStorageDeviceFoundReason, clusterv1.ConditionSeverityError, + "%s", msg, ) return autoSetupInput{}, s.recordActionFailure(infrav1.ProvisioningError, msg) @@ -1817,6 +1833,7 @@ func (s *Service) handleRobotRateLimitExceeded(err error, functionName string) { infrav1.HetznerAPIReachableCondition, infrav1.RateLimitExceededReason, clusterv1.ConditionSeverityWarning, + "%s", msg, ) record.Warnf(s.scope.HetznerBareMetalHost, "RateLimitExceeded", msg) diff --git a/pkg/services/baremetal/host/state_machine.go b/pkg/services/baremetal/host/state_machine.go index cc336a169..bd46f633d 100644 --- a/pkg/services/baremetal/host/state_machine.go +++ b/pkg/services/baremetal/host/state_machine.go @@ -172,7 +172,15 @@ func (hsm *hostStateMachine) updateOSSSHStatusAndValidateKey(osSSHSecret *corev1 } if err := validateSSHKey(osSSHSecret, hsm.host.Spec.Status.SSHSpec.SecretRef); err != nil { msg := fmt.Sprintf("ssh credentials are invalid: %s", err.Error()) - conditions.MarkFalse(hsm.host, infrav1.CredentialsAvailableCondition, infrav1.SSHCredentialsInSecretInvalidReason, clusterv1.ConditionSeverityError, msg) + conditions.MarkFalse( + hsm.host, + infrav1.CredentialsAvailableCondition, + infrav1.SSHCredentialsInSecretInvalidReason, + clusterv1.ConditionSeverityError, + "%s", + msg, + ) + record.Warnf(hsm.host, infrav1.SSHKeyAlreadyExistsReason, msg) return hsm.reconciler.recordActionFailure(infrav1.PreparationError, infrav1.ErrorMessageMissingOrInvalidSecretData) } @@ -202,7 +210,14 @@ func (hsm *hostStateMachine) updateRescueSSHStatusAndValidateKey(rescueSSHSecret } if err := validateSSHKey(rescueSSHSecret, hsm.reconciler.scope.HetznerCluster.Spec.SSHKeys.RobotRescueSecretRef); err != nil { msg := fmt.Sprintf("ssh credentials for rescue system are invalid: %s", err.Error()) - conditions.MarkFalse(hsm.host, infrav1.CredentialsAvailableCondition, infrav1.SSHCredentialsInSecretInvalidReason, clusterv1.ConditionSeverityError, msg) + conditions.MarkFalse( + hsm.host, + infrav1.CredentialsAvailableCondition, + infrav1.SSHCredentialsInSecretInvalidReason, + clusterv1.ConditionSeverityError, + "%s", + msg, + ) return hsm.reconciler.recordActionFailure(infrav1.PreparationError, infrav1.ErrorMessageMissingOrInvalidSecretData) } return nil diff --git a/pkg/services/hcloud/loadbalancer/loadbalancer.go b/pkg/services/hcloud/loadbalancer/loadbalancer.go index 4548683c0..45512ccf2 100644 --- a/pkg/services/hcloud/loadbalancer/loadbalancer.go +++ b/pkg/services/hcloud/loadbalancer/loadbalancer.go @@ -94,6 +94,7 @@ func (s *Service) Reconcile(ctx context.Context) (reconcile.Result, error) { infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerUpdateFailedReason, clusterv1.ConditionSeverityWarning, + "%s", err.Error(), ) return reconcile.Result{}, fmt.Errorf("failed to reconcile load balancer properties: %w", err) @@ -109,6 +110,7 @@ func (s *Service) Reconcile(ctx context.Context) (reconcile.Result, error) { infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerServiceSyncFailedReason, clusterv1.ConditionSeverityWarning, + "%s", err.Error(), ) return reconcile.Result{}, fmt.Errorf("failed to reconcile services: %w", err) @@ -137,6 +139,7 @@ func (s *Service) reconcileNetworkAttachement(ctx context.Context, lb *hcloud.Lo infrav1.LoadBalancerReadyCondition, infrav1.NetworkAttachFailedReason, clusterv1.ConditionSeverityWarning, + "%s", err.Error(), ) @@ -165,6 +168,7 @@ func (s *Service) reconcileNetworkAttachement(ctx context.Context, lb *hcloud.Lo infrav1.LoadBalancerReadyCondition, infrav1.NetworkAttachFailedReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) return err @@ -293,6 +297,7 @@ func (s *Service) createLoadBalancer(ctx context.Context) (*hcloud.LoadBalancer, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerCreateFailedReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) record.Warnf(s.scope.HetznerCluster, "FailedCreateLoadBalancer", err.Error()) @@ -370,6 +375,7 @@ func (s *Service) Delete(ctx context.Context) (err error) { infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerUpdateFailedReason, clusterv1.ConditionSeverityWarning, + "%s", err.Error(), ) return err @@ -394,6 +400,7 @@ func (s *Service) Delete(ctx context.Context) (err error) { infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerDeleteFailedReason, clusterv1.ConditionSeverityWarning, + "%s", err.Error(), ) return err @@ -448,6 +455,7 @@ func (s *Service) ownExistingLoadBalancer(ctx context.Context) (*hcloud.LoadBala infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedToOwnReason, clusterv1.ConditionSeverityError, + "%s", fmt.Sprintf("load balancer %q not found", name), ) return nil, ErrNoLoadBalancerAvailable @@ -462,6 +470,7 @@ func (s *Service) ownExistingLoadBalancer(ctx context.Context) (*hcloud.LoadBala infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedToOwnReason, clusterv1.ConditionSeverityError, + "%s", fmt.Sprintf("load balancer %q already owned with label %q", name, label), ) return nil, ErrNoLoadBalancerAvailable @@ -485,6 +494,7 @@ func (s *Service) ownExistingLoadBalancer(ctx context.Context) (*hcloud.LoadBala infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedToOwnReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) return nil, err diff --git a/pkg/services/hcloud/network/network.go b/pkg/services/hcloud/network/network.go index 6d6af928a..3db250f0b 100644 --- a/pkg/services/hcloud/network/network.go +++ b/pkg/services/hcloud/network/network.go @@ -62,6 +62,7 @@ func (s *Service) Reconcile(ctx context.Context) (err error) { infrav1.NetworkReadyCondition, infrav1.NetworkReconcileFailedReason, clusterv1.ConditionSeverityWarning, + "%s", err.Error(), ) } diff --git a/pkg/services/hcloud/placementgroup/placementgroup.go b/pkg/services/hcloud/placementgroup/placementgroup.go index c33216437..86a115781 100644 --- a/pkg/services/hcloud/placementgroup/placementgroup.go +++ b/pkg/services/hcloud/placementgroup/placementgroup.go @@ -55,6 +55,7 @@ func (s *Service) Reconcile(ctx context.Context) (err error) { infrav1.PlacementGroupsSyncedCondition, infrav1.PlacementGroupsSyncFailedReason, clusterv1.ConditionSeverityWarning, + "%s", err.Error(), ) } diff --git a/pkg/services/hcloud/server/server.go b/pkg/services/hcloud/server/server.go index b13067d19..8cd3c308f 100644 --- a/pkg/services/hcloud/server/server.go +++ b/pkg/services/hcloud/server/server.go @@ -157,6 +157,7 @@ func (s *Service) Reconcile(ctx context.Context) (res reconcile.Result, err erro infrav1.ServerAvailableCondition, infrav1.NetworkAttachFailedReason, clusterv1.ConditionSeverityError, + "%s", reterr.Error(), ) return res, reterr @@ -178,6 +179,7 @@ func (s *Service) Reconcile(ctx context.Context) (res reconcile.Result, err erro infrav1.ServerAvailableCondition, infrav1.LoadBalancerAttachFailedReason, clusterv1.ConditionSeverityError, + "%s", reterr.Error(), ) return res, reterr @@ -445,6 +447,7 @@ func (s *Service) createServer(ctx context.Context) (*hcloud.Server, error) { infrav1.ServerCreateSucceededCondition, infrav1.SSHKeyNotFoundReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) return nil, errServerCreateNotPossible @@ -475,6 +478,7 @@ func (s *Service) createServer(ctx context.Context) (*hcloud.Server, error) { infrav1.ServerCreateSucceededCondition, infrav1.ServerCreateFailedReason, clusterv1.ConditionSeverityWarning, + "%s", err.Error(), ) record.Warnf(s.scope.HCloudMachine, @@ -547,6 +551,7 @@ func (s *Service) getServerImage(ctx context.Context) (*hcloud.Image, error) { infrav1.ServerCreateSucceededCondition, infrav1.ImageAmbiguousReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) return nil, errServerCreateNotPossible @@ -558,6 +563,7 @@ func (s *Service) getServerImage(ctx context.Context) (*hcloud.Image, error) { infrav1.ServerCreateSucceededCondition, infrav1.ImageNotFoundReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) return nil, errServerCreateNotPossible diff --git a/pkg/services/hcloud/util/utils.go b/pkg/services/hcloud/util/utils.go index 6310b8707..54dc74ada 100644 --- a/pkg/services/hcloud/util/utils.go +++ b/pkg/services/hcloud/util/utils.go @@ -78,6 +78,7 @@ func HandleRateLimitExceeded(obj runtimeObjectWithConditions, err error, functio infrav1.HetznerAPIReachableCondition, infrav1.RateLimitExceededReason, clusterv1.ConditionSeverityWarning, + "%s", msg, ) record.Warnf(obj, "RateLimitExceeded", msg)