Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add example of managing aws-auth configmap #2

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,16 @@
# iac-in-go

A reference architecture example for Pulumi in Go

## Usage

This repo contains multiple go modules, so a [go workspace](https://go.dev/doc/tutorial/workspaces) should be created to use it effectively:

```bash
go work init
go work use eks
go work use network
go work use pkg/cluster
go work use pkg/irsa
go work use pkg/cluster/kubeconfig
```
13 changes: 0 additions & 13 deletions eks/README.md
Original file line number Diff line number Diff line change
@@ -1,16 +1,3 @@
# EKS

This project creates an EKS cluster that can be used to run compute workloads in AWS.

## Usage

This repo contains multiple go modules, so a [go workspace](https://go.dev/doc/tutorial/workspaces) should be created to use it effectively:

```bash
go work init
go work use eks
go work use network
go work use pkg/cluster
go work use pkg/irsa
go work use pkg/cluster/kubeconfig
```
36 changes: 31 additions & 5 deletions eks/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"fmt"
"github.com/lbrlabs/iac-in-go/pkg/cluster"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/eks"
)

func main() {
Expand All @@ -20,18 +21,43 @@ func main() {

publicSubnetIds := network.GetOutput(pulumi.String("publicSubnetIds"))
privateSubnetIds := network.GetOutput(pulumi.String("privateSubnetIds"))

cluster, err := cluster.NewCluster(ctx, "eks", &cluster.ClusterArgs{

nodeRole, err := cluster.NewNodeRole(ctx, "eks", &cluster.NodeRoleArgs{})
if err != nil {
return fmt.Errorf("error creating node role: %v", err)
}

theCluster, err := cluster.NewCluster(ctx, "eks", &cluster.ClusterArgs{
ClusterSubnetIds: pulumi.StringArrayOutput(publicSubnetIds),
SystemNodeSubnetIds: pulumi.StringArrayOutput(privateSubnetIds),
LetsEncryptEmail: pulumi.String("[email protected]"),
LetsEncryptEmail: pulumi.String("[email protected]"),
RoleMappings: []cluster.RoleMappingArgs{

},
NodeRoleArns: pulumi.StringArray{
nodeRole.Role.Arn,
},
})
if err != nil {
return fmt.Errorf("error creating cluster: %v", err)
}

ctx.Export("clusterName", cluster.ControlPlane.Name)
ctx.Export("kubeconfig", cluster.KubeConfig)
_, err = cluster.NewNodeGroup(ctx, "eks", &cluster.NodeGroupArgs{
ClusterName: theCluster.ControlPlane.Name,
Role: nodeRole.Role,
SubnetIds: pulumi.StringArrayOutput(privateSubnetIds),
ScalingConfig: eks.NodeGroupScalingConfigArgs{
DesiredSize: pulumi.Int(1),
MaxSize: pulumi.Int(10),
MinSize: pulumi.Int(1),
},
})
if err != nil {
return fmt.Errorf("error creating node group: %v", err)
}

ctx.Export("clusterName", theCluster.ControlPlane.Name)
ctx.Export("kubeconfig", theCluster.KubeConfig)

return nil
})
Expand Down
26 changes: 26 additions & 0 deletions pkg/cluster/awsauth/awsauth.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
package awsauth

import (
"gopkg.in/yaml.v2"
)
type RoleMapping struct {
RoleArn string `yaml:"roleArn"`
Username string `yaml:"username"`
Groups []string `yaml:"groups"`
}

func BuildAuthData(arns []string) ([]byte, error) {

roles := []RoleMapping{}

for _, arn := range arns {
role := RoleMapping{
RoleArn: arn,
Username: "system:node:{{EC2PrivateDNSName}}",
Groups: []string{"system:bootstrappers", "system:nodes"},
}
roles = append(roles, role)
}

return yaml.Marshal(roles)
}
87 changes: 63 additions & 24 deletions pkg/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@ package cluster
import (
"encoding/json"
"fmt"
"github.com/lbrlabs/iac-in-go/pkg/cluster/awsauth"
"github.com/lbrlabs/iac-in-go/pkg/cluster/kubeconfig"

"github.com/lbrlabs/iac-in-go/pkg/irsa"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/eks"
Expand All @@ -21,6 +23,12 @@ import (
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

type RoleMappingArgs struct {
RoleArn pulumi.StringInput `pulumi:"roleArn"`
Username pulumi.StringInput `pulumi:"username"`
Groups pulumi.StringArrayInput `pulumi:"groups"`
}

// The set of arguments for creating a Cluster component resource.
type ClusterArgs struct {
ClusterSubnetIds pulumi.StringArrayInput `pulumi:"clusterSubnetIds"`
Expand All @@ -31,6 +39,8 @@ type ClusterArgs struct {
SystemNodeDesiredCount *pulumi.IntInput `pulumi:"systemNodeDesiredCount"`
ClusterVersion pulumi.StringPtrInput `pulumi:"clusterVersion"`
LetsEncryptEmail pulumi.StringInput `pulumi:"letsEncryptEmail"`
NodeRoleArns pulumi.StringArrayInput `pulumi:"nodeRoleArns"`
RoleMappings []RoleMappingArgs `pulumi:"roleMappings"`
}

// The Cluster component resource.
Expand Down Expand Up @@ -173,6 +183,55 @@ func NewCluster(ctx *pulumi.Context,
return nil, fmt.Errorf("error creating cluster control plane: %w", err)
}

kc, err := kubeconfig.Generate(name, controlPlane.Endpoint, controlPlane.CertificateAuthorities.Index(pulumi.Int(0)).Data().Elem(), controlPlane.Name)
if err != nil {
return nil, fmt.Errorf("error generating kubeconfig: %w", err)
}

provider, err := kubernetes.NewProvider(ctx, fmt.Sprintf("%s-k8s-provider", name), &kubernetes.ProviderArgs{
Kubeconfig: kc,
SuppressHelmHookWarnings: pulumi.Bool(true),
}, pulumi.Parent(controlPlane))
if err != nil {
return nil, fmt.Errorf("error creating kubernetes provider: %w", err)
}

serverSideProvider, err := kubernetes.NewProvider(ctx, fmt.Sprintf("%s-k8s-ssa-provider", name), &kubernetes.ProviderArgs{
Kubeconfig: kc,
EnableServerSideApply: pulumi.Bool(true),
}, pulumi.Parent(controlPlane))
if err != nil {
return nil, fmt.Errorf("error creating server side apply kubernetes provider: %w", err)
}

nodeRole, err := NewNodeRole(ctx, fmt.Sprintf("%s-system-node-role", name), &NodeRoleArgs{}, pulumi.Parent(controlPlane))
if err != nil {
return nil, fmt.Errorf("error creating system node role: %w", err)
}

nodeRoleArns := appendToStringArrayInput(ctx, args.NodeRoleArns, nodeRole.Role.Arn)

mapRoleData := nodeRoleArns.ToStringArrayOutput().ApplyT(func(arns []string) (string, error) {
data, err := awsauth.BuildAuthData(arns)
if err != nil {
return "", fmt.Errorf("error building auth data: %w", err)
}
return string(data), nil
}).(pulumi.StringOutput)

_, err = corev1.NewConfigMap(ctx, fmt.Sprintf("%s-aws-auth", name), &corev1.ConfigMapArgs{
Metadata: &metav1.ObjectMetaArgs{
Name: pulumi.String("aws-auth"),
Namespace: pulumi.String("kube-system"),
},
Data: pulumi.StringMap{
"mapRoles": mapRoleData,
},
}, pulumi.Parent(controlPlane), pulumi.Provider(provider))
if err != nil {
return nil, fmt.Errorf("error creating aws-auth configmap: %w", err)
}

cert := tls.GetCertificateOutput(ctx, tls.GetCertificateOutputArgs{
Url: controlPlane.Identities.Index(pulumi.Int(0)).Oidcs().Index(pulumi.Int(0)).Issuer().Elem(),
}, pulumi.Parent(component))
Expand Down Expand Up @@ -233,18 +292,19 @@ func NewCluster(ctx *pulumi.Context,
},
}

systemNodes, err := NewNodeGroup(ctx, fmt.Sprintf("%s-system-nodes", name), &NodeGroupArgs{
systemNodes, err := NewNodeGroup(ctx, fmt.Sprintf("%s-system", name), &NodeGroupArgs{
ClusterName: controlPlane.Name,
SubnetIds: args.SystemNodeSubnetIds,
InstanceTypes: &instanceTypes,
Labels: systemNodeLabels,
Role: nodeRole.Role,
Taints: taints,
ScalingConfig: eks.NodeGroupScalingConfigArgs{
MaxSize: systemNodeMaxCount,
MinSize: systemNodeMinCount,
DesiredSize: systemNodeDesiredCount,
},
}, pulumi.Parent(controlPlane))
}, pulumi.Parent(nodeRole))
if err != nil {
return nil, fmt.Errorf("error creating system nodegroup: %w", err)
}
Expand Down Expand Up @@ -346,27 +406,6 @@ func NewCluster(ctx *pulumi.Context,
return nil, fmt.Errorf("error installing EBS csi: %w", err)
}

kc, err := kubeconfig.Generate(name, controlPlane.Endpoint, controlPlane.CertificateAuthorities.Index(pulumi.Int(0)).Data().Elem(), controlPlane.Name)
if err != nil {
return nil, fmt.Errorf("error generating kubeconfig: %w", err)
}

provider, err := kubernetes.NewProvider(ctx, fmt.Sprintf("%s-k8s-provider", name), &kubernetes.ProviderArgs{
Kubeconfig: kc,
SuppressHelmHookWarnings: pulumi.Bool(true),
}, pulumi.Parent(controlPlane))
if err != nil {
return nil, fmt.Errorf("error creating kubernetes provider: %w", err)
}

serverSideProvider, err := kubernetes.NewProvider(ctx, fmt.Sprintf("%s-k8s-ssa-provider", name), &kubernetes.ProviderArgs{
Kubeconfig: kc,
EnableServerSideApply: pulumi.Bool(true),
}, pulumi.Parent(controlPlane))
if err != nil {
return nil, fmt.Errorf("error creating server side apply kubernetes provider: %w", err)
}

// FIXME: this is a workaround for EKS creating a broken default storage class
// FIXME: what we likely want to do here in future is delete this
_, err = storagev1.NewStorageClass(ctx, fmt.Sprintf("%s-gp2-storage-class", name), &storagev1.StorageClassArgs{
Expand Down Expand Up @@ -808,4 +847,4 @@ func NewCluster(ctx *pulumi.Context,
}

return component, nil
}
}
50 changes: 5 additions & 45 deletions pkg/cluster/nodegroup.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package cluster

import (
"encoding/json"
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/eks"
"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/iam"
Expand All @@ -18,13 +17,13 @@ type NodeGroupArgs struct {
Taints eks.NodeGroupTaintArray `pulumi:"taints"`
Labels pulumi.StringMapInput `pulumi:"labels"`
ScalingConfig eks.NodeGroupScalingConfigArgs `pulumi:"scalingConfig"`
Role *iam.Role `pulumi:"nodeRole"`
}

type NodeGroup struct {
pulumi.ResourceState

NodeGroup *eks.NodeGroup `pulumi:"attachedNodeGroup"`
Role *iam.Role `pulumi:"nodeRole"`
}

// NewNodeGroup creates a new EKS Node group component resource.
Expand All @@ -40,45 +39,6 @@ func NewNodeGroup(ctx *pulumi.Context,
return nil, err
}

nodePolicyJSON, err := json.Marshal(map[string]interface{}{
"Statement": []map[string]interface{}{
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": map[string]interface{}{
"Service": "ec2.amazonaws.com",
},
},
},
"Version": "2012-10-17",
})
if err != nil {
return nil, fmt.Errorf("error marshalling node policy: %w", err)
}

nodeRole, err := iam.NewRole(ctx, fmt.Sprintf("%s-node-role", name), &iam.RoleArgs{
AssumeRolePolicy: pulumi.String(nodePolicyJSON),
}, pulumi.Parent(component))
if err != nil {
return nil, fmt.Errorf("error creating node role: %w", err)
}

_, err = iam.NewRolePolicyAttachment(ctx, fmt.Sprintf("%s-node-worker-policy", name), &iam.RolePolicyAttachmentArgs{
PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"),
Role: nodeRole.Name,
}, pulumi.Parent(nodeRole))
if err != nil {
return nil, fmt.Errorf("error attaching node worker policy: %w", err)
}

_, err = iam.NewRolePolicyAttachment(ctx, fmt.Sprintf("%s-node-ecr-policy", name), &iam.RolePolicyAttachmentArgs{
PolicyArn: pulumi.String("arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"),
Role: nodeRole.Name,
}, pulumi.Parent(nodeRole))
if err != nil {
return nil, fmt.Errorf("error attaching system node ecr policy: %w", err)
}

var instanceTypes pulumi.StringArrayInput

if args.InstanceTypes == nil {
Expand All @@ -89,10 +49,12 @@ func NewNodeGroup(ctx *pulumi.Context,
instanceTypes = *args.InstanceTypes
}



nodeGroup, err := eks.NewNodeGroup(ctx, fmt.Sprintf("%s-nodes", name), &eks.NodeGroupArgs{
ClusterName: args.ClusterName,
SubnetIds: args.SubnetIds,
NodeRoleArn: nodeRole.Arn,
NodeRoleArn: args.Role.Arn,
Taints: args.Taints,
InstanceTypes: instanceTypes,
Labels: args.Labels,
Expand All @@ -103,15 +65,13 @@ func NewNodeGroup(ctx *pulumi.Context,
}

component.NodeGroup = nodeGroup
component.Role = nodeRole

if err := ctx.RegisterResourceOutputs(component, pulumi.Map{
"nodeGroup": nodeGroup,
"nodeRole": nodeRole,
}); err != nil {
return nil, err
}

return component, nil

}
}
Loading