Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix deprecated, missing error and type cast lint checks #413

Draft
wants to merge 4 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions cmd/config-reloader/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"github.com/go-kit/log/level"
"github.com/oklog/run"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/thanos-io/thanos/pkg/reloader"
)
Expand Down Expand Up @@ -56,8 +57,8 @@ func main() {

metrics := prometheus.NewRegistry()
metrics.MustRegister(
prometheus.NewGoCollector(),
prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}),
collectors.NewGoCollector(),
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
)

reloadURL, err := url.Parse(*reloadURLStr)
Expand Down Expand Up @@ -154,7 +155,9 @@ func main() {
return server.ListenAndServe()
}, func(err error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
server.Shutdown(ctx)
if err := server.Shutdown(ctx); err != nil {
level.Error(logger).Log("msg", "unable to shut down server", "err", err)
}
cancel()
})
}
Expand Down
18 changes: 13 additions & 5 deletions cmd/frontend/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ import (
"github.com/go-kit/log/level"
"github.com/oklog/run"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/api/option"
apihttp "google.golang.org/api/transport/http"
Expand Down Expand Up @@ -74,8 +75,8 @@ func main() {

metrics := prometheus.NewRegistry()
metrics.MustRegister(
prometheus.NewGoCollector(),
prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}),
collectors.NewGoCollector(),
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
)

if *projectID == "" {
Expand Down Expand Up @@ -147,8 +148,11 @@ func main() {
level.Info(logger).Log("msg", "Starting web server for metrics", "listen", *listenAddress)
return server.ListenAndServe()
}, func(err error) {
ctx, _ = context.WithTimeout(ctx, time.Minute)
server.Shutdown(ctx)
ctx, timeoutCancel := context.WithTimeout(ctx, time.Minute)
if err := server.Shutdown(ctx); err != nil {
level.Error(logger).Log("msg", "unable to shut down server", "err", err)
}
timeoutCancel()
cancel()
})
}
Expand Down Expand Up @@ -192,7 +196,11 @@ func forward(logger log.Logger, target *url.URL, transport http.RoundTripper) ht
// /api/v1/series currently not accepting match[] params on POST.
if req.URL.Path == "/api/v1/series" {
method = "GET"
req.ParseForm()
if err := req.ParseForm(); err != nil {
level.Warn(logger).Log("msg", "parse form failed", "err", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
u.RawQuery = req.Form.Encode()
} else {
u.RawQuery = req.URL.RawQuery
Expand Down
8 changes: 3 additions & 5 deletions cmd/operator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,6 @@ import (
"github.com/GoogleCloudPlatform/prometheus-engine/pkg/operator"
)

func unstableFlagHelp(help string) string {
return help + " (Setting this flag voids any guarantees of proper behavior of the operator.)"
}

func main() {
var (
defaultProjectID string
Expand Down Expand Up @@ -147,7 +143,9 @@ func main() {
return server.ListenAndServe()
}, func(err error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
server.Shutdown(ctx)
if err := server.Shutdown(ctx); err != nil {
logger.Error(err, "unable to shut down server")
}
cancel()
})
}
Expand Down
11 changes: 6 additions & 5 deletions cmd/rule-evaluator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,14 @@ import (
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/oklog/run"
"google.golang.org/api/option"
apihttp "google.golang.org/api/transport/http"

"github.com/prometheus/client_golang/api"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/config"
Expand Down Expand Up @@ -79,9 +79,8 @@ func main() {

reg := prometheus.NewRegistry()
reg.MustRegister(
prometheus.NewGoCollector(),
prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}),
grpc_prometheus.DefaultClientMetrics,
collectors.NewGoCollector(),
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
)

// The rule-evaluator version is identical to the export library version for now, so
Expand Down Expand Up @@ -359,7 +358,9 @@ func main() {
return server.ListenAndServe()
}, func(err error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
server.Shutdown(ctx)
if err := server.Shutdown(ctx); err != nil {
level.Error(logger).Log("msg", "unable to shut down server", "err", err)
}
cancel()
})
}
Expand Down
37 changes: 23 additions & 14 deletions e2e/alertmanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,9 +119,10 @@ func testAlertmanager(ctx context.Context, t *OperatorContext, spec *monitoringv
}

func testAlertmanagerDeployed(ctx context.Context, t *OperatorContext, config *monitoringv1.ManagedAlertmanagerSpec) {
err := wait.Poll(time.Second, 1*time.Minute, func() (bool, error) {
var err error
pollErr := wait.PollUntilContextTimeout(ctx, time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) {
var ss appsv1.StatefulSet
if err := t.Client().Get(ctx, client.ObjectKey{Namespace: t.namespace, Name: operator.NameAlertmanager}, &ss); err != nil {
if err = t.Client().Get(ctx, client.ObjectKey{Namespace: t.namespace, Name: operator.NameAlertmanager}, &ss); err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
Expand All @@ -142,10 +143,11 @@ func testAlertmanagerDeployed(ctx context.Context, t *OperatorContext, config *m
return true, nil
}

const containerName = "alertmanager"
// TODO(pintohutch): clean-up wantArgs init logic.
var wantArgs []string
for _, c := range ss.Spec.Template.Spec.Containers {
if c.Name != "alertmanager" {
if c.Name != containerName {
continue
}
// We're mainly interested in the dynamic flags but checking the entire set including
Expand All @@ -155,15 +157,18 @@ func testAlertmanagerDeployed(ctx context.Context, t *OperatorContext, config *m
}

if diff := cmp.Diff(strings.Join(wantArgs, " "), getEnvVar(c.Env, "EXTRA_ARGS")); diff != "" {
return false, fmt.Errorf("unexpected flags (-want, +got): %s", diff)
err = fmt.Errorf("unexpected flags (-want, +got): %s", diff)
}
return true, nil
return err == nil, nil
}

return true, nil
return false, fmt.Errorf("no container with name %q found", containerName)
})
if err != nil {
t.Errorf("unable to get alertmanager statefulset: %s", err)
if pollErr != nil {
if errors.Is(pollErr, context.DeadlineExceeded) && err != nil {
pollErr = err
}
t.Errorf("unable to get alertmanager statefulset: %s", pollErr)
}
}

Expand All @@ -172,9 +177,10 @@ func testAlertmanagerConfig(ctx context.Context, t *OperatorContext, pub *corev1
t.Fatalf("unable to create alertmanager config secret: %s", err)
}

if err := wait.Poll(3*time.Second, 3*time.Minute, func() (bool, error) {
var err error
if pollErr := wait.PollUntilContextTimeout(ctx, 3*time.Second, 3*time.Minute, true, func(ctx context.Context) (bool, error) {
var secret corev1.Secret
if err := t.Client().Get(ctx, client.ObjectKey{Namespace: t.namespace, Name: operator.AlertmanagerSecretName}, &secret); err != nil {
if err = t.Client().Get(ctx, client.ObjectKey{Namespace: t.namespace, Name: operator.AlertmanagerSecretName}, &secret); err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
Expand All @@ -189,10 +195,13 @@ func testAlertmanagerConfig(ctx context.Context, t *OperatorContext, pub *corev1
// Grab data from public secret and compare.
data := pub.Data[key]
if diff := cmp.Diff(data, bytes); diff != "" {
return false, fmt.Errorf("unexpected configuration (-want, +got): %s", diff)
err = fmt.Errorf("unexpected configuration (-want, +got): %s", diff)
}
return err == nil, nil
}); pollErr != nil {
if errors.Is(pollErr, context.DeadlineExceeded) && err != nil {
pollErr = err
}
return true, nil
}); err != nil {
t.Errorf("unable to get alertmanager config: %s", err)
t.Errorf("unable to get alertmanager config: %s", pollErr)
}
}
29 changes: 16 additions & 13 deletions e2e/collector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,9 +173,10 @@ func testCollectorDeployed(ctx context.Context, t *OperatorContext) {
},
})

err := wait.Poll(3*time.Second, 3*time.Minute, func() (bool, error) {
var err error
pollErr := wait.PollUntilContextTimeout(ctx, 3*time.Second, 3*time.Minute, true, func(ctx context.Context) (bool, error) {
var ds appsv1.DaemonSet
if err := t.Client().Get(ctx, client.ObjectKey{Namespace: t.namespace, Name: operator.NameCollector}, &ds); err != nil {
if err = t.Client().Get(ctx, client.ObjectKey{Namespace: t.namespace, Name: operator.NameCollector}, &ds); err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
Expand Down Expand Up @@ -210,7 +211,7 @@ func testCollectorDeployed(ctx context.Context, t *OperatorContext) {
"cluster-autoscaler.kubernetes.io/safe-to-evict": "true",
}
if diff := cmp.Diff(wantedAnnotations, ds.Spec.Template.Annotations); diff != "" {
return false, fmt.Errorf("unexpected annotations (-want, +got): %s", diff)
err = fmt.Errorf("unexpected annotations (-want, +got): %s", diff)
}

// TODO(pintohutch): clean-up wantArgs init logic.
Expand All @@ -234,15 +235,17 @@ func testCollectorDeployed(ctx context.Context, t *OperatorContext) {

if diff := cmp.Diff(strings.Join(wantArgs, " "), getEnvVar(c.Env, "EXTRA_ARGS")); diff != "" {
t.Log(fmt.Errorf("unexpected flags (-want, +got): %s", diff))
return false, fmt.Errorf("unexpected flags (-want, +got): %s", diff)
err = fmt.Errorf("unexpected flags (-want, +got): %s", diff)
}
return true, nil
return err == nil, nil
}
t.Log(errors.New("no container with name prometheus found"))
return false, errors.New("no container with name prometheus found")
return false, fmt.Errorf("no container with name %q found", operator.CollectorPrometheusContainerName)
})
if err != nil {
t.Fatalf("Waiting for DaemonSet deployment failed: %s", err)
if pollErr != nil {
if errors.Is(pollErr, context.DeadlineExceeded) && err != nil {
pollErr = err
}
t.Fatalf("Waiting for DaemonSet deployment failed: %s", pollErr)
}
}

Expand Down Expand Up @@ -328,7 +331,7 @@ func validateCollectorUpMetrics(ctx context.Context, t *OperatorContext, job str
for _, port := range []string{operator.CollectorPrometheusContainerPortName, operator.CollectorConfigReloaderContainerPortName} {
t.Logf("Poll up metric for pod %q and port %q", pod.Name, port)

err = wait.PollImmediateUntil(3*time.Second, func() (bool, error) {
err = wait.PollUntilContextCancel(ctx, 3*time.Second, true, func(ctx context.Context) (bool, error) {
now := time.Now()

// Validate the majority of labels being set correctly by filtering along them.
Expand Down Expand Up @@ -369,7 +372,7 @@ func validateCollectorUpMetrics(ctx context.Context, t *OperatorContext, job str
return false, fmt.Errorf("expected iterator to be done but got series %v: %w", series, err)
}
return true, nil
}, ctx.Done())
})
if err != nil {
t.Fatalf("Waiting for collector metrics to appear in Cloud Monitoring failed: %s", err)
}
Expand Down Expand Up @@ -414,7 +417,7 @@ func testCollectorScrapeKubelet(ctx context.Context, t *OperatorContext) {
for _, port := range []string{"metrics", "cadvisor"} {
t.Logf("Poll up metric for kubelet on node %q and port %q", node.Name, port)

err = wait.PollImmediateUntil(3*time.Second, func() (bool, error) {
err = wait.PollUntilContextCancel(ctx, 3*time.Second, true, func(ctx context.Context) (bool, error) {
now := time.Now()

// Validate the majority of labels being set correctly by filtering along them.
Expand Down Expand Up @@ -455,7 +458,7 @@ func testCollectorScrapeKubelet(ctx context.Context, t *OperatorContext) {
return false, fmt.Errorf("expected iterator to be done but got series %v: %w", series, err)
}
return true, nil
}, ctx.Done())
})
if err != nil {
t.Fatalf("Waiting for collector metrics to appear in Cloud Monitoring failed: %s", err)
}
Expand Down
4 changes: 2 additions & 2 deletions e2e/kubeutil/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,11 @@ func IsDeploymentReady(ctx context.Context, kubeClient client.Client, namespace,

func WaitForDeploymentReady(ctx context.Context, kubeClient client.Client, namespace, name string) error {
var err error
if waitErr := wait.Poll(3*time.Second, 1*time.Minute, func() (bool, error) {
if waitErr := wait.PollUntilContextTimeout(ctx, 3*time.Second, 1*time.Minute, true, func(ctx context.Context) (bool, error) {
err := IsDeploymentReady(ctx, kubeClient, namespace, name)
return err == nil, nil
}); waitErr != nil {
if errors.Is(waitErr, wait.ErrWaitTimeout) {
if errors.Is(waitErr, context.DeadlineExceeded) {
return err
}
return waitErr
Expand Down
18 changes: 10 additions & 8 deletions e2e/kubeutil/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,13 @@ import (

func IsPodContainerReady(ctx context.Context, restConfig *rest.Config, pod *corev1.Pod, container string) error {
for _, status := range pod.Status.ContainerStatuses {
if status.Name == container && !status.Ready {
key := client.ObjectKeyFromObject(pod)
return fmt.Errorf("pod %s container %s not ready: %s", key, status.Name, containerStateString(&status.State))
if status.Name == container {
if !status.Ready {
key := client.ObjectKeyFromObject(pod)
return fmt.Errorf("pod %s container %s not ready: %s", key, status.Name, containerStateString(&status.State))
}
return nil
}
return nil
}
key := client.ObjectKeyFromObject(pod)
return fmt.Errorf("no container named %s found in pod %s", container, key)
Expand All @@ -54,14 +56,14 @@ func WaitForPodContainerReady(ctx context.Context, t testing.TB, restConfig *res
return nil
}
t.Logf("waiting for pod to be ready: %s", err)
if waitErr := wait.Poll(2*time.Second, 30*time.Second, func() (done bool, err error) {
if waitErr := wait.PollUntilContextTimeout(ctx, 2*time.Second, 30*time.Second, true, func(ctx context.Context) (done bool, err error) {
if err = kubeClient.Get(ctx, client.ObjectKeyFromObject(pod), pod); err != nil {
return false, err
}
err = IsPodContainerReady(ctx, restConfig, pod, container)
return err == nil, nil
}); waitErr != nil {
if errors.Is(waitErr, wait.ErrWaitTimeout) {
if errors.Is(waitErr, context.DeadlineExceeded) {
return err
}
return waitErr
Expand All @@ -87,14 +89,14 @@ func WaitForPodReady(ctx context.Context, t *testing.T, restConfig *rest.Config,
return nil
}
t.Logf("waiting for pod to be ready: %s", err)
if waitErr := wait.Poll(2*time.Second, 30*time.Second, func() (done bool, err error) {
if waitErr := wait.PollUntilContextTimeout(ctx, 2*time.Second, 30*time.Second, true, func(ctx context.Context) (done bool, err error) {
if err = kubeClient.Get(ctx, client.ObjectKeyFromObject(pod), pod); err != nil {
return false, err
}
err = IsPodReady(ctx, restConfig, pod)
return err == nil, nil
}); waitErr != nil {
if errors.Is(waitErr, wait.ErrWaitTimeout) {
if errors.Is(waitErr, context.DeadlineExceeded) {
return err
}
return waitErr
Expand Down
Loading