diff --git a/README.md b/README.md index e410a2dd..3608d85a 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ # Latest image ``` -robustadev/kubewatch:v2.6 +robustadev/kubewatch:v2.8.0 ``` # Usage @@ -75,6 +75,10 @@ You may also provide a values file instead: ```yaml rbac: create: true + customRoles: + - apiGroups: ["monitoring.coreos.com"] + resources: ["prometheusrules"] + verbs: ["get", "list", "watch"] resourcesToWatch: deployment: false replicationcontroller: false @@ -94,6 +98,10 @@ resourcesToWatch: ingress: false coreevent: false event: true +customresources: + - group: monitoring.coreos.com + version: v1 + resource: prometheusrules slack: channel: '#YOUR_CHANNEL' token: 'xoxb-YOUR_TOKEN' @@ -129,7 +137,7 @@ Once the Pod is running, you will start seeing Kubernetes events in your configu ![slack](./docs/slack.png) -To modify what notifications you get, update the `kubewatch` ConfigMap and turn on and off (true/false) resources: +To modify what notifications you get, update the `kubewatch` ConfigMap and turn on and off (true/false) resources or configure any resource of your choosing with customresources (CRDs): ``` resource: @@ -151,6 +159,10 @@ resource: ingress: false coreevent: false event: true +customresources: + - group: monitoring.coreos.com + version: v1 + resource: prometheusrules ``` #### Working with RBAC @@ -179,6 +191,51 @@ Then just create `pod` as usual with: $ kubectl create -f kubewatch.yaml ``` +#### Working with CRDs +`kubewatch` can be configured to monitor Kubernetes Custom Resource Definitions (CRDs), allowing you to receive notifications when changes occur. +To configure kubewatch to watch custom resources, you need to define the `customresources` section either in your values file or by using the `--set` flag with Helm commands. + +Include the custom resource configuration in your values file: + +```yaml +customresources: + - group: monitoring.coreos.com + version: v1 + resource: prometheusrules +``` + +Then deploy or upgrade `kubwatch` with `helm upgrade` or `helm install` + + +Alternatively, you can pass this configuration directly using the `--set` flag: + +```console +helm install kubewatch robusta/kubewatch --set='rbac.create=true,slack.channel=#YOUR_CHANNEL,slack.token=xoxb-YOUR_TOKEN,resourcesToWatch.pod=true,resourcesToWatch.daemonset=true,customresources[0].group=monitoring.coreos.com,customresources[0].version=v1,customresources[0].resource=prometheusrules' +``` +#### Custom RBAC roles +After defining custom resources, make sure that kubewatch has the necessary RBAC permissions to access the custom resources you've configured. Without the appropriate permissions, `kubewatch` will not be able to monitor your custom resources, and you won't receive notifications for changes. + +To grant these permissions, you can define custom RBAC roles using `customRoles` within the `rbac` section of your values file or by using the `--set` flag with Helm commands. This allows you to specify exactly which API groups, resources, and actions kubewatch should have access to. + +Here’s how you can configure the necessary permissions to monitor your resources: +```yaml +rbac: + create: true + customRoles: + - apiGroups: ["monitoring.coreos.com"] + resources: ["prometheusrules"] + verbs: ["get", "list", "watch"] +``` + +Then deploy or upgrade `kubwatch` with `helm upgrade` or `helm install` + + +Alternatively, you can pass this configuration directly using the `--set` flag: + +```console +helm install kubewatch robusta/kubewatch --set='rbac.create=true,slack.channel=#YOUR_CHANNEL,slack.token=xoxb-YOUR_TOKEN,customRoles[0].apiGroups={monitoring.coreos.com},customRoles[0].resources={prometheusrules},customRoles[0].verbs={get,list,watch}' +``` + ### Local Installation #### Using go package installer: diff --git a/config/config.go b/config/config.go index 9778c022..5037285e 100755 --- a/config/config.go +++ b/config/config.go @@ -73,7 +73,12 @@ type Resource struct { CoreEvent bool `json:"coreevent"` } -// Config struct contains kubewatch configuration +type CRD struct { + Group string `json:"group"` + Version string `json:"version"` + Resource string `json:"resource"` +} + type Config struct { // Handlers know how to send notifications to specific services. Handler Handler `json:"handler"` @@ -83,6 +88,9 @@ type Config struct { // Resources to watch. Resource Resource `json:"resource"` + // CustomResources to Watch + CustomResources []CRD `json:"customresources"` + // For watching specific namespace, leave it empty for watching all. // this config is ignored when watching namespaces Namespace string `json:"namespace,omitempty"` diff --git a/examples/conf/kubewatch.conf.crd.yaml b/examples/conf/kubewatch.conf.crd.yaml new file mode 100644 index 00000000..7eb8ada3 --- /dev/null +++ b/examples/conf/kubewatch.conf.crd.yaml @@ -0,0 +1,4 @@ +customresources: + - group: monitoring.coreos.com + version: v1 + resource: prometheusrules \ No newline at end of file diff --git a/helm/kubewatch/Chart.yaml b/helm/kubewatch/Chart.yaml index 4d7203ed..4fa0e3a3 100644 --- a/helm/kubewatch/Chart.yaml +++ b/helm/kubewatch/Chart.yaml @@ -23,4 +23,4 @@ maintainers: [] name: kubewatch sources: - https://github.com/robusta-dev/kubewatch -version: 3.3.10 +version: 3.4.0 diff --git a/helm/kubewatch/templates/clusterrole.yaml b/helm/kubewatch/templates/clusterrole.yaml index 2401fa41..bcc1e78d 100644 --- a/helm/kubewatch/templates/clusterrole.yaml +++ b/helm/kubewatch/templates/clusterrole.yaml @@ -76,4 +76,9 @@ rules: - get - list - watch + {{- range .Values.rbac.customRoles }} + - apiGroups: {{ toYaml .apiGroups | nindent 4 }} + resources: {{ toYaml .resources | nindent 4 }} + verbs: {{ toYaml .verbs | nindent 4 }} + {{- end }} {{- end -}} diff --git a/helm/kubewatch/templates/configmap.yaml b/helm/kubewatch/templates/configmap.yaml index 8e2f35b4..31836f5f 100644 --- a/helm/kubewatch/templates/configmap.yaml +++ b/helm/kubewatch/templates/configmap.yaml @@ -47,4 +47,5 @@ data: lark: {{- toYaml .Values.lark | nindent 8 }} {{- end }} resource: {{- toYaml .Values.resourcesToWatch | nindent 6 }} + customresources: {{- toYaml .Values.customresources | nindent 6 }} namespace: {{ .Values.namespaceToWatch | quote }} diff --git a/helm/kubewatch/values.yaml b/helm/kubewatch/values.yaml index 15975f94..41abbe42 100644 --- a/helm/kubewatch/values.yaml +++ b/helm/kubewatch/values.yaml @@ -62,7 +62,7 @@ extraDeploy: [] ## image: repository: robustadev/kubewatch - tag: v2.6 + tag: v2.8.0 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images @@ -214,6 +214,15 @@ resourcesToWatch: job: false persistentvolume: false event: true + +## @param customresources Define custom resources to watch for changes +## Example: +## customresources: +## - group: monitoring.coreos.com +## version: v1 +## resource: prometheusrules +## +customresources: [] ## @param command Override default container command (useful when using custom images) ## command: [] @@ -439,11 +448,18 @@ initContainers: [] sidecars: [] ## @section RBAC parameters - -## @param rbac.create Whether to create & use RBAC resources or not ## rbac: +## @param rbac.create Whether to create & use RBAC resources or not create: false +## @param rbac.customRoles custom RBAC rules to be applied +## Example: +## customRoles: +## - apiGroups: ["monitoring.coreos.com"] +## resources: ["prometheusrules"] +## verbs: ["get", "list", "watch"] +## + customRoles: [] ## Pods Service Account ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ ## @param serviceAccount.create Specifies whether a ServiceAccount should be created diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 987607b7..a6e2c91d 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -19,6 +19,8 @@ package controller import ( "context" "fmt" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "os" "os/signal" "reflect" @@ -44,6 +46,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" @@ -89,11 +92,14 @@ func objName(obj interface{}) string { // Start prepares watchers and run their controllers, then waits for process termination signals func Start(conf *config.Config, eventHandler handlers.Handler) { var kubeClient kubernetes.Interface + var dynamicClient dynamic.Interface if _, err := rest.InClusterConfig(); err != nil { kubeClient = utils.GetClientOutOfCluster() + dynamicClient = utils.GetDynamicClientOutOfCluster() } else { kubeClient = utils.GetClient() + dynamicClient = utils.GetDynamicClient() } // User Configured Events @@ -542,6 +548,36 @@ func Start(conf *config.Config, eventHandler handlers.Handler) { go c.Run(stopCh) } + for _, crd := range conf.CustomResources { + informer := cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + return dynamicClient.Resource(schema.GroupVersionResource{ + Group: crd.Group, + Version: crd.Version, + Resource: crd.Resource, + }).List(context.Background(), options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + return dynamicClient.Resource(schema.GroupVersionResource{ + Group: crd.Group, + Version: crd.Version, + Resource: crd.Resource, + }).Watch(context.Background(), options) + }, + }, + &unstructured.Unstructured{}, + 0, //Skip resync + cache.Indexers{}, + ) + + c := newResourceController(kubeClient, eventHandler, informer, crd.Resource, fmt.Sprintf("%s/%s", crd.Group, crd.Version)) + stopCh := make(chan struct{}) + defer close(stopCh) + + go c.Run(stopCh) + } + sigterm := make(chan os.Signal, 1) signal.Notify(sigterm, syscall.SIGTERM) signal.Notify(sigterm, syscall.SIGINT) diff --git a/pkg/utils/k8sutil.go b/pkg/utils/k8sutil.go index 78d9e006..faffba94 100644 --- a/pkg/utils/k8sutil.go +++ b/pkg/utils/k8sutil.go @@ -13,12 +13,27 @@ import ( events_v1 "k8s.io/api/events/v1" rbac_v1beta1 "k8s.io/api/rbac/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) -// GetClient returns a k8s clientset to the request from inside of cluster +// GetDynamicClient returns a k8s dynamic clientset to the request from inside of cluster +func GetDynamicClient() dynamic.Interface { + config, err := rest.InClusterConfig() + if err != nil { + logrus.Fatalf("Can not get kubernetes config: %v", err) + } + + clientset, err := dynamic.NewForConfig(config) + if err != nil { + logrus.Fatalf("Can not create dynamic kubernetes client: %v", err) + } + + return clientset +} + func GetClient() kubernetes.Interface { config, err := rest.InClusterConfig() if err != nil { @@ -56,6 +71,21 @@ func GetClientOutOfCluster() kubernetes.Interface { return clientset } +// GetDynamicClientOutOfCluster returns a k8s dynamic clientset to the request from outside of cluster +func GetDynamicClientOutOfCluster() dynamic.Interface { + config, err := buildOutOfClusterConfig() + if err != nil { + logrus.Fatalf("Can not get kubernetes config: %v", err) + } + + clientset, err := dynamic.NewForConfig(config) + if err != nil { + logrus.Fatalf("Can not get kubernetes config: %v", err) + } + + return clientset +} + // GetObjectMetaData returns metadata of a given k8s object func GetObjectMetaData(obj interface{}) (objectMeta meta_v1.ObjectMeta) {