Skip to content

Commit

Permalink
Merge pull request #7 from rusenask/develop
Browse files Browse the repository at this point in the history
Develop
  • Loading branch information
rusenask authored Jun 14, 2017
2 parents 64412cb + 580df80 commit a2d4447
Show file tree
Hide file tree
Showing 8 changed files with 156 additions and 72 deletions.
2 changes: 1 addition & 1 deletion main.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ func setupTriggers(ctx context.Context, k8sImplementer kubernetes.Implementer, p
return
}

ps, err := pubsub.NewSubscriber(&pubsub.Opts{
ps, err := pubsub.NewPubsubSubscriber(&pubsub.Opts{
ProjectID: projectID,
Providers: providers,
})
Expand Down
18 changes: 12 additions & 6 deletions provider/kubernetes/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,6 @@ func (p *Provider) updateDeployments(deployments []v1beta1.Deployment) (updated

// getDeployment - helper function to get specific deployment
func (p *Provider) getDeployment(namespace, name string) (*v1beta1.Deployment, error) {
// dep := p.client.Extensions().Deployments(namespace)
// return dep.Get(name, meta_v1.GetOptions{})
return p.implementer.Deployment(namespace, name)
}

Expand Down Expand Up @@ -157,6 +155,8 @@ func (p *Provider) impactedDeployments(repo *types.Repository) ([]v1beta1.Deploy
"policy": policy,
}).Info("provider.kubernetes: keel policy found, checking deployment...")

shouldUpdateDeployment := false

for idx, c := range deployment.Spec.Template.Spec.Containers {
// Remove version if any
containerImageName := versionreg.ReplaceAllString(c.Image, "")
Expand Down Expand Up @@ -194,7 +194,7 @@ func (p *Provider) impactedDeployments(repo *types.Repository) ([]v1beta1.Deploy
"policy": policy,
}).Info("provider.kubernetes: current image version")

shouldUpdate, err := version.ShouldUpdate(currentVersion, newVersion, policy)
shouldUpdateContainer, err := version.ShouldUpdate(currentVersion, newVersion, policy)
if err != nil {
log.WithFields(log.Fields{
"error": err,
Expand All @@ -213,14 +213,16 @@ func (p *Provider) impactedDeployments(repo *types.Repository) ([]v1beta1.Deploy
"current_version": currentVersion.String(),
"new_version": newVersion.String(),
"policy": policy,
"should_update": shouldUpdate,
"should_update": shouldUpdateContainer,
}).Info("provider.kubernetes: checked version, deciding whether to update")

if shouldUpdate {
if shouldUpdateContainer {
// updating image
c.Image = fmt.Sprintf("%s:%s", containerImageName, newVersion.String())
deployment.Spec.Template.Spec.Containers[idx] = c
impacted = append(impacted, deployment)
// marking this deployment for update
shouldUpdateDeployment = true

log.WithFields(log.Fields{
"parsed_image": containerImageName,
"raw_image_name": c.Image,
Expand All @@ -230,6 +232,10 @@ func (p *Provider) impactedDeployments(repo *types.Repository) ([]v1beta1.Deploy
}).Info("provider.kubernetes: impacted deployment container found")
}
}

if shouldUpdateDeployment {
impacted = append(impacted, deployment)
}
}
}

Expand Down
3 changes: 0 additions & 3 deletions provider/kubernetes/kubernetes_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -398,9 +398,6 @@ func TestGetImpactedTwoContainersInSameDeployment(t *testing.T) {

func TestGetImpactedTwoSameContainersInSameDeployment(t *testing.T) {

// FIXME: enable once sorted
t.Skip()

fp := &fakeImplementer{}
fp.namespaces = &v1.NamespaceList{
Items: []v1.Namespace{
Expand Down
38 changes: 19 additions & 19 deletions readme.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Keel - automated Kubernetes deployments for the rest of us

Lightweight (11MB image size, uses 12MB RAM when running) [Kubernetes](https://kubernetes.io/) controller for automating image updates for deployments. Keel uses [semantic versioning](http://semver.org/) to determine whether deployment needs an update or not. Currently keel has several types of triggers:
Lightweight (uses ~10MB RAM when running) [Kubernetes](https://kubernetes.io/) controller for automating deployment updates when new image is available. Keel uses [semantic versioning](http://semver.org/) to determine whether deployment needs an update or not. Currently keel has several types of triggers:

* Google's pubsub integration with [Google Container Registry](https://cloud.google.com/container-registry/)
* Webhooks
Expand All @@ -9,20 +9,22 @@ Upcomming integrations:

* DockerHub webhooks

## Why?

I have built Keel since I have a relatively small Golang project which doesn't use a lot of memory and introducing an antique, heavy weight CI solution with lots dependencies seemed like a terrible idea.
## Keel overview

You should consider using Keel:
* If you are not Netflix, Google, Amazon, {insert big company here} - you might not want to run something like Spinnaker that has heavy dependencies such as "JDK8, Redis, Cassandra, Packer". You probably need something lightweight, stateless, that you don't have to think about.
* If you are not a bank that uses RedHat's OpenShift which embedded Jenkins that probably already does what Keel is doing.
* You want automated Kubernetes deployment updates.
* Stateless, runs as a single container in kube-system namespace
* Automatically detects images that you have in your Kubernetes environment and configures relevant [Google Cloud pubsub](https://cloud.google.com/pubsub/) topic, subscriptions.
* Updates deployment if you have set Keel policy and newer image is available.

Here is a list of Keel dependencies:
## Why?

1.
I have built Keel since I have a relatively small Golang project which doesn't use a lot of memory and introducing an antique, heavy weight CI solution with lots dependencies seemed like a terrible idea.

You should consider using Keel if:
* You don't want your "Continous Delivery" tool to consume more resources than your actual deployment does.
* You are __not__ Netflix, Google, Amazon, {insert big company here} that already has something like Spinnaker that has too many dependencies such as "JDK8, Redis, Cassandra, Packer".
* You want simple, automated Kubernetes deployment updates.

Yes, none.

## Getting started

Expand Down Expand Up @@ -70,10 +72,10 @@ spec:

Available policy options:

* all - update whenever there is a version bump
* major - update major versions
* minor - update only minor versions (ignores major)
* patch - update only patch versions (ignores minor and major versions)
* __all__ - update whenever there is a version bump
* __major__ - update major versions
* __minor__ - update only minor versions (ignores major)
* __patch__ - update only patch versions (ignores minor and major versions)

## Deployment

Expand All @@ -87,17 +89,15 @@ gcloud container node-pools create new-pool --cluster CLUSTER_NAME --scopes http

### Step 2: Kubernetes

Since keel will be updating deployments, let's create a new [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in `kube-system` namespace:
Keel will be updating deployments, let's create a new [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in `kube-system` namespace:

```
kubectl create serviceaccount keel --namespace=kube-system
```

Now, edit [deployment file](https://github.com/rusenask/keel/blob/master/hack/deployment.sample.yml) that is supplied with the repo (basically point to the newest keel release and set your PROJECT_ID to the actual project ID that you have):
Now, edit [deployment file](https://github.com/rusenask/keel/blob/master/hack/deployment.sample.yml) that is supplied with the repository (basically point to the [newest Keel release](https://hub.docker.com/r/karolisr/keel/tags/) and set your PROJECT_ID to the actual project ID that you have):

```
kubectl create -f hack/deployment.yml
```

Once Keel is deployed in your Kubernetes cluster - it occasionally scans your current deployments and looks for ones that have label _keel.observer/policy_. It then checks whether appropriate subscriptions and topics are set for GCR registries, if not - auto-creates them.

Once Keel is deployed in your Kubernetes cluster - it occasionally scans your current deployments and looks for ones that have label _keel.observer/policy_. It then checks whether appropriate subscriptions and topics are set for GCR registries, if not - auto-creates them.
64 changes: 31 additions & 33 deletions trigger/pubsub/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ import (
type DefaultManager struct {
implementer kubernetes.Implementer

client *Subscriber
client Subscriber
// existing subscribers
mu *sync.Mutex
// a map of GCR URIs and subscribers to those URIs
Expand All @@ -35,8 +35,12 @@ type DefaultManager struct {
ctx context.Context
}

type Subscriber interface {
Subscribe(ctx context.Context, topic, subscription string) error
}

// NewDefaultManager - creates new pubsub manager to create subscription for deployments
func NewDefaultManager(projectID string, implementer kubernetes.Implementer, subClient *Subscriber) *DefaultManager {
func NewDefaultManager(projectID string, implementer kubernetes.Implementer, subClient Subscriber) *DefaultManager {
return &DefaultManager{
implementer: implementer,
client: subClient,
Expand Down Expand Up @@ -65,7 +69,7 @@ func (s *DefaultManager) Start(ctx context.Context) error {
case <-ctx.Done():
return nil
default:
log.Info("performing scan")
log.Debug("performing scan")
err := s.scan(ctx)
if err != nil {
log.WithFields(log.Fields{
Expand Down Expand Up @@ -114,10 +118,31 @@ func (s *DefaultManager) subscribed(gcrURI string) bool {
return ok
}

func (s *DefaultManager) addSubscription(ctx context.Context, gcrURI string) {
func (s *DefaultManager) ensureSubscription(gcrURI string) {
s.mu.Lock()
defer s.mu.Unlock()
s.subscribers[gcrURI] = ctx

_, ok := s.subscribers[gcrURI]
if !ok {
ctx, cancel := context.WithCancel(s.ctx)
s.subscribers[gcrURI] = ctx
subName := containerRegistrySubName(s.projectID, gcrURI)
go func() {
defer cancel()
err := s.client.Subscribe(s.ctx, gcrURI, subName)
if err != nil {
log.WithFields(log.Fields{
"error": err,
"gcr_uri": gcrURI,
"subscription_name": subName,
}).Error("trigger.pubsub.manager: failed to create subscription")
}

// cleanup
s.removeSubscription(gcrURI)

}()
}
}

func (s *DefaultManager) removeSubscription(gcrURI string) {
Expand All @@ -141,34 +166,7 @@ func (s *DefaultManager) checkDeployment(deployment *v1beta1.Deployment) error {

// uri
gcrURI := containerRegistryURI(s.projectID, registry)

// existing sub
ok := s.subscribed(gcrURI)
if !ok {
// create sub in a separate goroutine since client.Subscribe is a blocking call
go func() {
ctx, cancel := context.WithCancel(s.ctx)
defer cancel()
s.addSubscription(ctx, gcrURI)
defer s.removeSubscription(gcrURI)
subName := containerRegistrySubName(s.projectID, gcrURI)
err := s.client.Subscribe(s.ctx, gcrURI, subName)
if err != nil {
log.WithFields(log.Fields{
"error": err,
"gcr_uri": gcrURI,
"subscription_name": subName,
}).Error("trigger.pubsub.manager: failed to create subscription")
}
}()
} else {
log.WithFields(log.Fields{
"registry": registry,
"gcr_uri": gcrURI,
"deployment": deployment.Name,
"image_name": c.Image,
}).Debug("trigger.pubsub.manager: existing subscription for deployment's image found")
}
s.ensureSubscription(gcrURI)

}

Expand Down
80 changes: 80 additions & 0 deletions trigger/pubsub/manager_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
package pubsub

import (
"golang.org/x/net/context"
"sync"
"time"

meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/pkg/apis/extensions/v1beta1"

"github.com/rusenask/keel/types"

"testing"
)

type fakeSubscriber struct {
TimesSubscribed int
SubscribedTopicName string
SubscribedSubName string
}

func (s *fakeSubscriber) Subscribe(ctx context.Context, topic, subscription string) error {
s.TimesSubscribed++
s.SubscribedTopicName = topic
s.SubscribedSubName = subscription
for {
select {
case <-ctx.Done():
return nil
}
}
}

func TestCheckDeployment(t *testing.T) {
fs := &fakeSubscriber{}
mng := &DefaultManager{
client: fs,
mu: &sync.Mutex{},
ctx: context.Background(),
subscribers: make(map[string]context.Context),
}

dep := &v1beta1.Deployment{
meta_v1.TypeMeta{},
meta_v1.ObjectMeta{
Name: "dep-1",
Namespace: "xxxx",
Labels: map[string]string{types.KeelPolicyLabel: "all"},
},
v1beta1.DeploymentSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
v1.Container{
Image: "gcr.io/v2-namespace/hello-world:1.1.1",
},
v1.Container{
Image: "gcr.io/v2-namespace/greetings-world:1.1.1",
},
},
},
},
},
v1beta1.DeploymentStatus{},
}

err := mng.checkDeployment(dep)
if err != nil {
t.Errorf("deployment check failed: %s", err)
}

// sleeping a bit since our fake subscriber goes into a separate goroutine
time.Sleep(100 * time.Millisecond)

if fs.TimesSubscribed != 1 {
t.Errorf("expected to find one subscription, found: %d", fs.TimesSubscribed)
}

}
Loading

0 comments on commit a2d4447

Please sign in to comment.