Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding run with condition to thread #1

Open
wants to merge 4 commits into
base: igal/leader_new
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ OCM_CLIENT_ID := ${OCM_CLIENT_ID}
OCM_CLIENT_SECRET := ${OCM_CLIENT_SECRET}
ENABLE_AUTH := $(or ${ENABLE_AUTH},False)
DELETE_PVC := $(or ${DELETE_PVC},False)
REPLICAS_COUNT = $(shell if ! [ "${TARGET}" = "minikube" ];then echo 3; else echo $(or ${SERVICE_REPLICAS_COUNT},3);fi)

ifdef INSTALLATION_TIMEOUT
INSTALLATION_TIMEOUT_FLAG = --installation-timeout $(INSTALLATION_TIMEOUT)
Expand Down Expand Up @@ -172,7 +173,7 @@ deploy-service-requirements: deploy-namespace deploy-inventory-service-file

deploy-service: deploy-namespace deploy-service-requirements deploy-role
python3 ./tools/deploy_assisted_installer.py $(DEPLOY_TAG_OPTION) --namespace "$(NAMESPACE)" \
--profile "$(PROFILE)" $(TEST_FLAGS) --target "$(TARGET)"
--profile "$(PROFILE)" $(TEST_FLAGS) --target "$(TARGET)" --replicas-count $(REPLICAS_COUNT)
python3 ./tools/wait_for_assisted_service.py --target $(TARGET) --namespace "$(NAMESPACE)" \
--profile "$(PROFILE)" --domain "$(INGRESS_DOMAIN)"

Expand Down
60 changes: 43 additions & 17 deletions cmd/main.go
Original file line number Diff line number Diff line change
@@ -1,13 +1,21 @@
package main

import (
"context"
"flag"
"fmt"
"log"
"net/http"
"strings"
"time"

"github.com/openshift/assisted-service/pkg/thread"

"github.com/openshift/assisted-service/internal/imgexpirer"

"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"

"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/jinzhu/gorm"
Expand All @@ -21,7 +29,6 @@ import (
"github.com/openshift/assisted-service/internal/events"
"github.com/openshift/assisted-service/internal/hardware"
"github.com/openshift/assisted-service/internal/host"
"github.com/openshift/assisted-service/internal/imgexpirer"
"github.com/openshift/assisted-service/internal/metrics"
"github.com/openshift/assisted-service/internal/versions"
"github.com/openshift/assisted-service/models"
Expand All @@ -30,10 +37,10 @@ import (
"github.com/openshift/assisted-service/pkg/db"
"github.com/openshift/assisted-service/pkg/generator"
"github.com/openshift/assisted-service/pkg/job"
"github.com/openshift/assisted-service/pkg/leader"
"github.com/openshift/assisted-service/pkg/ocm"
"github.com/openshift/assisted-service/pkg/requestid"
"github.com/openshift/assisted-service/pkg/s3wrapper"
"github.com/openshift/assisted-service/pkg/thread"
"github.com/openshift/assisted-service/restapi"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
Expand Down Expand Up @@ -67,6 +74,7 @@ var Options struct {
OCMConfig ocm.Config
HostConfig host.Config
LogLevel string `envconfig:"LOG_LEVEL" default:"info"`
LeaderConfig leader.Config
}

func main() {
Expand Down Expand Up @@ -115,6 +123,7 @@ func main() {
}
}

var lead leader.ElectorInterface
authHandler := auth.NewAuthHandler(Options.Auth, ocmClient, log.WithField("pkg", "auth"))
authzHandler := auth.NewAuthzHandler(Options.Auth, ocmClient, log.WithField("pkg", "authz"))
versionHandler := versions.NewHandler(Options.Versions)
Expand All @@ -125,19 +134,6 @@ func main() {
instructionApi := host.NewInstructionManager(log.WithField("pkg", "instructions"), db, hwValidator, Options.InstructionConfig, connectivityValidator)
prometheusRegistry := prometheus.DefaultRegisterer
metricsManager := metrics.NewMetricsManager(prometheusRegistry)
hostApi := host.NewManager(log.WithField("pkg", "host-state"), db, eventsHandler, hwValidator, instructionApi, &Options.HWValidatorConfig, metricsManager, &Options.HostConfig)
clusterApi := cluster.NewManager(Options.ClusterConfig, log.WithField("pkg", "cluster-state"), db,
eventsHandler, hostApi, metricsManager)

clusterStateMonitor := thread.New(
log.WithField("pkg", "cluster-monitor"), "Cluster State Monitor", Options.ClusterStateMonitorInterval, clusterApi.ClusterMonitoring)
clusterStateMonitor.Start()
defer clusterStateMonitor.Stop()

hostStateMonitor := thread.New(
log.WithField("pkg", "host-monitor"), "Host State Monitor", Options.HostStateMonitorInterval, hostApi.HostMonitoring)
hostStateMonitor.Start()
defer hostStateMonitor.Stop()

log.Println("DeployTarget: " + Options.DeployTarget)

Expand Down Expand Up @@ -171,7 +167,22 @@ func main() {
log.Fatal("failed to create client:", err)
}
generator = job.New(log.WithField("pkg", "k8s-job-wrapper"), kclient, Options.JobConfig)

cfg, cerr := clientcmd.BuildConfigFromFlags("", "")
if cerr != nil {
log.WithError(cerr).Fatalf("Failed to create kubernetes cluster config")
}
k8sClient := kubernetes.NewForConfigOrDie(cfg)
lead = leader.NewElector(k8sClient, Options.LeaderConfig, "assisted-service-leader-election-helper",
log.WithField("pkg", "monitor-runner"))
err = lead.StartLeaderElection(context.Background())
if err != nil {
log.WithError(cerr).Fatalf("Failed to start leader")
}

case "onprem":

lead = &leader.DummyElector{}
// in on-prem mode, setup file system s3 driver and use localjob implementation
objectHandler = s3wrapper.NewFSClient("/data", log)
if objectHandler == nil {
Expand All @@ -183,6 +194,21 @@ func main() {
log.Fatalf("not supported deploy target %s", Options.DeployTarget)
}

hostApi := host.NewManager(log.WithField("pkg", "host-state"), db, eventsHandler, hwValidator,
instructionApi, &Options.HWValidatorConfig, metricsManager, &Options.HostConfig, lead)
clusterApi := cluster.NewManager(Options.ClusterConfig, log.WithField("pkg", "cluster-state"), db,
eventsHandler, hostApi, metricsManager, lead)

clusterStateMonitor := thread.New(
log.WithField("pkg", "cluster-monitor"), "Cluster State Monitor", Options.ClusterStateMonitorInterval, clusterApi.ClusterMonitoring)
clusterStateMonitor.StartWithCondition(lead.IsLeader)
defer clusterStateMonitor.Stop()

hostStateMonitor := thread.New(
log.WithField("pkg", "host-monitor"), "Host State Monitor", Options.HostStateMonitorInterval, hostApi.HostMonitoring)
hostStateMonitor.StartWithCondition(lead.IsLeader)
defer hostStateMonitor.Stop()

if newUrl, err = s3wrapper.FixEndpointURL(Options.BMConfig.S3EndpointURL); err != nil {
log.WithError(err).Fatalf("failed to create valid bm config S3 endpoint URL from %s", Options.BMConfig.S3EndpointURL)
} else {
Expand All @@ -193,10 +219,10 @@ func main() {

events := events.NewApi(eventsHandler, logrus.WithField("pkg", "eventsApi"))

expirer := imgexpirer.NewManager(objectHandler, eventsHandler, Options.BMConfig.ImageExpirationTime)
expirer := imgexpirer.NewManager(objectHandler, eventsHandler, Options.BMConfig.ImageExpirationTime, lead)
imageExpirationMonitor := thread.New(
log.WithField("pkg", "image-expiration-monitor"), "Image Expiration Monitor", Options.ImageExpirationInterval, expirer.ExpirationTask)
imageExpirationMonitor.Start()
imageExpirationMonitor.StartWithCondition(lead.IsLeader)
defer imageExpirationMonitor.Stop()

h, err := restapi.Handler(restapi.Config{
Expand Down
2 changes: 1 addition & 1 deletion deploy/assisted-service.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ spec:
selector:
matchLabels:
app: assisted-service
replicas: 1
replicas: 3
template:
metadata:
labels:
Expand Down
13 changes: 13 additions & 0 deletions deploy/roles/default_role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,19 @@ rules:
- batch
resources:
- jobs
- verbs:
- '*'
apiGroups:
- ''
resources:
- configmaps
- verbs:
- '*'
apiGroups:
- coordination.k8s.io
resources:
- leases

---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
Expand Down
4 changes: 2 additions & 2 deletions internal/bminventory/inventory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2149,7 +2149,7 @@ var _ = Describe("KubeConfig download", func() {
mockS3Client = s3wrapper.NewMockAPI(ctrl)
mockJob = job.NewMockAPI(ctrl)
clusterApi = cluster.NewManager(cluster.Config{}, getTestLog().WithField("pkg", "cluster-monitor"),
db, nil, nil, nil)
db, nil, nil, nil, nil)

bm = NewBareMetalInventory(db, getTestLog(), nil, clusterApi, cfg, mockJob, nil, mockS3Client, nil)
c = common.Cluster{Cluster: models.Cluster{
Expand Down Expand Up @@ -2278,7 +2278,7 @@ var _ = Describe("UploadClusterIngressCert test", func() {
mockS3Client = s3wrapper.NewMockAPI(ctrl)
mockJob = job.NewMockAPI(ctrl)
clusterApi = cluster.NewManager(cluster.Config{}, getTestLog().WithField("pkg", "cluster-monitor"),
db, nil, nil, nil)
db, nil, nil, nil, nil)
bm = NewBareMetalInventory(db, getTestLog(), nil, clusterApi, cfg, mockJob, nil, mockS3Client, nil)
c = common.Cluster{Cluster: models.Cluster{
ID: &clusterID,
Expand Down
18 changes: 17 additions & 1 deletion internal/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ import (
"net/http"
"time"

"github.com/openshift/assisted-service/pkg/leader"

"github.com/openshift/assisted-service/pkg/s3wrapper"

"github.com/filanov/stateswitch"
Expand Down Expand Up @@ -82,9 +84,11 @@ type Manager struct {
metricAPI metrics.API
hostAPI host.API
rp *refreshPreprocessor
leaderElector leader.Leader
}

func NewManager(cfg Config, log logrus.FieldLogger, db *gorm.DB, eventsHandler events.Handler, hostAPI host.API, metricApi metrics.API) *Manager {
func NewManager(cfg Config, log logrus.FieldLogger, db *gorm.DB, eventsHandler events.Handler, hostAPI host.API, metricApi metrics.API,
leaderElector leader.Leader) *Manager {
th := &transitionHandler{
log: log,
db: db,
Expand All @@ -100,6 +104,7 @@ func NewManager(cfg Config, log logrus.FieldLogger, db *gorm.DB, eventsHandler e
metricAPI: metricApi,
rp: newRefreshPreprocessor(log, hostAPI),
hostAPI: hostAPI,
leaderElector: leaderElector,
}
}

Expand Down Expand Up @@ -170,6 +175,12 @@ func (m *Manager) GetMasterNodesIds(ctx context.Context, c *common.Cluster, db *
}

func (m *Manager) ClusterMonitoring() {
if !m.leaderElector.IsLeader() {
m.log.Debugf("Not a leader, exiting ClusterMonitoring")
return
}

m.log.Debugf("Running ClusterMonitoring")
var (
clusters []*common.Cluster
clusterAfterRefresh *common.Cluster
Expand All @@ -184,6 +195,11 @@ func (m *Manager) ClusterMonitoring() {
return
}
for _, cluster := range clusters {

if !m.leaderElector.IsLeader() {
m.log.Debugf("Not a leader, exiting ClusterMonitoring")
return
}
if clusterAfterRefresh, err = m.RefreshStatus(ctx, cluster, m.db); err != nil {
log.WithError(err).Errorf("failed to refresh cluster %s state", cluster.ID)
continue
Expand Down
Loading