Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Mass Updates #3

Merged
merged 16 commits into from
Apr 17, 2024
Merged

Mass Updates #3

merged 16 commits into from
Apr 17, 2024

Conversation

bensoer
Copy link
Member

@bensoer bensoer commented Apr 12, 2024

No description provided.

@github-advanced-security
Copy link

This pull request sets up GitHub code scanning for this repository. Once the scans have completed and the checks have passed, the analysis results for this pull request branch will appear on this overview. Once you merge this pull request, the 'Security' tab will show more code scanning analysis results (for example, for the default branch). Depending on your configuration and choice of analysis tool, future pull requests will be annotated with code scanning analysis results. For more information about GitHub code scanning, check out the documentation.

Copy link

Terraform validate Failed

Show Output
╷
│ Error: Module not installed
│ 
│   on main.tf line 28:
│   28: module "k8infra" {
│ 
│ This module is not yet installed. Run "terraform init" to install all
│ modules required by this configuration.
╵
╷
│ Error: Module not installed
│ 
│   on main.tf line 43:
│   43: module "k8config" {
│ 
│ This module is not yet installed. Run "terraform init" to install all
│ modules required by this configuration.
╵

Copy link

Terraform fmt Failed

Show Output
main.tf
--- old/main.tf
+++ new/main.tf
@@ -1,63 +1,63 @@
 terraform {
-    required_providers {
-      digitalocean = {
-          source = "digitalocean/digitalocean"
-      }
-      kubernetes = {
-        source = "hashicorp/kubernetes"
-        version = "2.27.0"
-      }
-      helm = {
-        source  = "hashicorp/helm"
-        version = ">= 2.0.1"
-      }
-
-      kubectl = {
-        source = "alekc/kubectl"
-        version = "2.0.4"
-      }
-
-      cloudflare = {
-          source = "cloudflare/cloudflare"
-          version = "~> 2.0"
-      }
+  required_providers {
+    digitalocean = {
+      source = "digitalocean/digitalocean"
     }
+    kubernetes = {
+      source  = "hashicorp/kubernetes"
+      version = "2.27.0"
+    }
+    helm = {
+      source  = "hashicorp/helm"
+      version = ">= 2.0.1"
+    }
+
+    kubectl = {
+      source  = "alekc/kubectl"
+      version = "2.0.4"
+    }
+
+    cloudflare = {
+      source  = "cloudflare/cloudflare"
+      version = "~> 2.0"
+    }
+  }
 }
 
 
 module "k8infra" {
-    source = "./modules/k8infra"
-    do_token = var.do_token
+  source   = "./modules/k8infra"
+  do_token = var.do_token
 
-    providers = {
-      digitalocean = digitalocean
-      cloudflare = cloudflare
-    }
+  providers = {
+    digitalocean = digitalocean
+    cloudflare   = cloudflare
+  }
 }
 
-resource time_sleep "wait_60_seconds" {
-  depends_on = [ module.k8infra ]
+resource "time_sleep" "wait_60_seconds" {
+  depends_on      = [module.k8infra]
   create_duration = "60s"
 }
 
 module "k8config" {
-    source = "./modules/k8config"
-
-    cluster_id = module.k8infra.cluster_id
-    cluster_name = module.k8infra.cluster_name
-    do_token = var.do_token
-    cf_email = var.cf_email
-    cf_token = var.cf_token
-    domain = var.domain
-
-    providers = {
-      kubernetes = kubernetes
-      helm=helm
-      kubectl = kubectl
-    }
+  source = "./modules/k8config"
 
-    depends_on = [ 
-        time_sleep.wait_60_seconds
-     ]
+  cluster_id   = module.k8infra.cluster_id
+  cluster_name = module.k8infra.cluster_name
+  do_token     = var.do_token
+  cf_email     = var.cf_email
+  cf_token     = var.cf_token
+  domain       = var.domain
+
+  providers = {
+    kubernetes = kubernetes
+    helm       = helm
+    kubectl    = kubectl
+  }
+
+  depends_on = [
+    time_sleep.wait_60_seconds
+  ]
 
 }
\ No newline at end of file
modules/k8config/main.tf
--- old/modules/k8config/main.tf
+++ new/modules/k8config/main.tf
@@ -1,21 +1,21 @@
 terraform {
-    required_providers {
-      kubernetes = {
-        source = "hashicorp/kubernetes"
-        version = "2.27.0"
-      }
-      helm = {
-        source  = "hashicorp/helm"
-        version = ">= 2.0.1"
-      }
-      kubectl = {
-        source = "alekc/kubectl"
-        version = "2.0.4"
-      }
+  required_providers {
+    kubernetes = {
+      source  = "hashicorp/kubernetes"
+      version = "2.27.0"
     }
+    helm = {
+      source  = "hashicorp/helm"
+      version = ">= 2.0.1"
+    }
+    kubectl = {
+      source  = "alekc/kubectl"
+      version = "2.0.4"
+    }
+  }
 }
 
-module "crds"{
+module "crds" {
   source = "./modules/crds"
 
   providers = {
@@ -23,8 +23,8 @@
   }
 }
 
-resource time_sleep "wait_60_seconds" {
-  depends_on = [ module.crds ]
+resource "time_sleep" "wait_60_seconds" {
+  depends_on      = [module.crds]
   create_duration = "60s"
 }
 
@@ -35,14 +35,14 @@
   cf_token = var.cf_token
 
   providers = {
-    helm = helm
+    helm       = helm
     kubernetes = kubernetes
-    kubectl = kubectl
+    kubectl    = kubectl
   }
 
-  depends_on = [ 
+  depends_on = [
     time_sleep.wait_60_seconds
-   ]
+  ]
 }
 
 
@@ -51,15 +51,15 @@
 
   cf_email = var.cf_email
   cf_token = var.cf_token
-  domain = var.domain
+  domain   = var.domain
 
   providers = {
-    helm = helm
+    helm       = helm
     kubernetes = kubernetes
-    kubectl = kubectl
+    kubectl    = kubectl
   }
 
-  depends_on = [ 
+  depends_on = [
     module.certmanager
   ]
 }
@@ -70,7 +70,7 @@
   domain = var.domain
 
   providers = {
-    helm = helm,
+    helm    = helm,
     kubectl = kubectl
   }
 
@@ -86,14 +86,14 @@
   domain = var.domain
 
   providers = {
-    helm = helm
+    helm       = helm
     kubernetes = kubernetes
-    kubectl = kubectl
+    kubectl    = kubectl
   }
 
-  depends_on = [ 
+  depends_on = [
     module.certmanager,
     module.traefik,
     time_sleep.wait_60_seconds
-   ]
+  ]
 }
modules/k8config/modules/_archive/grafana/main.tf
--- old/modules/k8config/modules/_archive/grafana/main.tf
+++ new/modules/k8config/modules/_archive/grafana/main.tf
@@ -1,51 +1,51 @@
 terraform {
-    required_providers {
-      kubernetes = {
-        source = "hashicorp/kubernetes"
-        version = "2.27.0"
-      }
-      helm = {
-        source  = "hashicorp/helm"
-        version = ">= 2.0.1"
-      }
-      kubectl = {
-        source = "alekc/kubectl"
-        version = "2.0.4"
-      }
+  required_providers {
+    kubernetes = {
+      source  = "hashicorp/kubernetes"
+      version = "2.27.0"
     }
+    helm = {
+      source  = "hashicorp/helm"
+      version = ">= 2.0.1"
+    }
+    kubectl = {
+      source  = "alekc/kubectl"
+      version = "2.0.4"
+    }
+  }
 }
 
 resource "helm_release" "grafana" {
   name = "grafana"
 
   repository = "https://grafana.github.io/helm-charts"
-  chart = "grafana"
+  chart      = "grafana"
 
   atomic = true
 
   # due to order of things, the namespace is created seperatly
   create_namespace = false
-  namespace = "grafana"
+  namespace        = "grafana"
 
-  recreate_pods = true
-  reuse_values = true
-  force_update = true
-  cleanup_on_fail = true
+  recreate_pods     = true
+  reuse_values      = true
+  force_update      = true
+  cleanup_on_fail   = true
   dependency_update = true
 
   values = [
     file("${abspath(path.module)}/res/grafana-values.yaml")
   ]
 
-  depends_on = [ 
+  depends_on = [
     kubernetes_secret.grafana_dashboard_credentials,
     kubernetes_namespace.grafana_namespace
-   ]
+  ]
 }
 
 resource "random_password" "password" {
-  length           = 25
-  special          = false
+  length  = 25
+  special = false
   #override_special = "!#$%&*()-_=+[]{}<>:?"
 }
 
@@ -57,7 +57,7 @@
 
 resource "kubernetes_secret" "grafana_dashboard_credentials" {
   metadata {
-    name = "grafana-dashboard-admin-credentials"
+    name      = "grafana-dashboard-admin-credentials"
     namespace = "grafana"
   }
 
@@ -66,9 +66,9 @@
     password = random_password.password.result
   }
 
-  depends_on = [ 
+  depends_on = [
     kubernetes_namespace.grafana_namespace
-   ]
+  ]
 }
 
 resource "kubectl_manifest" "grafana_ingress_certificate" {
@@ -76,7 +76,7 @@
     domain = var.domain
   })
 
-  depends_on = [ 
+  depends_on = [
     helm_release.grafana
   ]
 }
@@ -86,7 +86,7 @@
     domain = var.domain
   })
 
-  depends_on = [ 
+  depends_on = [
     helm_release.grafana,
     kubectl_manifest.grafana_ingress_certificate
   ]
modules/k8config/modules/_archive/grafana/variables.tf
--- old/modules/k8config/modules/_archive/grafana/variables.tf
+++ new/modules/k8config/modules/_archive/grafana/variables.tf
@@ -1,5 +1,5 @@
 variable "domain" {
-    description = "Root Domain For Service"
-    sensitive = true
-    type = string
+  description = "Root Domain For Service"
+  sensitive   = true
+  type        = string
 }
\ No newline at end of file
modules/k8config/modules/_archive/loki/main.tf
--- old/modules/k8config/modules/_archive/loki/main.tf
+++ new/modules/k8config/modules/_archive/loki/main.tf
@@ -1,27 +1,27 @@
 terraform {
-    required_providers {
-      helm = {
-        source  = "hashicorp/helm"
-        version = ">= 2.0.1"
-      }
+  required_providers {
+    helm = {
+      source  = "hashicorp/helm"
+      version = ">= 2.0.1"
     }
+  }
 }
 
 resource "helm_release" "loki" {
   name = "loki"
 
   repository = "https://grafana.github.io/helm-charts"
-  chart = "loki"
+  chart      = "loki"
 
   atomic = true
 
   create_namespace = true
-  namespace = "loki"
+  namespace        = "loki"
 
-  recreate_pods = true
-  reuse_values = true
-  force_update = true
-  cleanup_on_fail = true
+  recreate_pods     = true
+  reuse_values      = true
+  force_update      = true
+  cleanup_on_fail   = true
   dependency_update = true
 
   values = [
modules/k8config/modules/_archive/tempo/main.tf
--- old/modules/k8config/modules/_archive/tempo/main.tf
+++ new/modules/k8config/modules/_archive/tempo/main.tf
@@ -1,27 +1,27 @@
 terraform {
-    required_providers {
-      helm = {
-        source  = "hashicorp/helm"
-        version = ">= 2.0.1"
-      }
+  required_providers {
+    helm = {
+      source  = "hashicorp/helm"
+      version = ">= 2.0.1"
     }
+  }
 }
 
 resource "helm_release" "tempo" {
   name = "tempo"
 
   repository = "https://grafana.github.io/helm-charts"
-  chart = "tempo-distributed"
+  chart      = "tempo-distributed"
 
   atomic = true
 
   create_namespace = true
-  namespace = "tempo"
+  namespace        = "tempo"
 
-  recreate_pods = true
-  reuse_values = true
-  force_update = true
-  cleanup_on_fail = true
+  recreate_pods     = true
+  reuse_values      = true
+  force_update      = true
+  cleanup_on_fail   = true
   dependency_update = true
 
   values = [
modules/k8config/modules/argocd/main.tf
--- old/modules/k8config/modules/argocd/main.tf
+++ new/modules/k8config/modules/argocd/main.tf
@@ -1,31 +1,31 @@
 
 terraform {
-    required_providers {
-      helm = {
-        source  = "hashicorp/helm"
-        version = ">= 2.0.1"
-      }
-      kubectl = {
-        source = "alekc/kubectl"
-        version = "2.0.4"
-      }
+  required_providers {
+    helm = {
+      source  = "hashicorp/helm"
+      version = ">= 2.0.1"
     }
+    kubectl = {
+      source  = "alekc/kubectl"
+      version = "2.0.4"
+    }
+  }
 }
 
 resource "helm_release" "argocd" {
   name = "argocd"
 
   repository = "https://argoproj.github.io/argo-helm"
-  chart = "argo-cd"
+  chart      = "argo-cd"
 
-  atomic = true
+  atomic           = true
   create_namespace = true
-  namespace = "argocd"
+  namespace        = "argocd"
 
-  recreate_pods = true
-  reuse_values = true
-  force_update = true
-  cleanup_on_fail = true
+  recreate_pods     = true
+  reuse_values      = true
+  force_update      = true
+  cleanup_on_fail   = true
   dependency_update = true
 
   values = [
@@ -40,7 +40,7 @@
     domain = var.domain
   })
 
-  depends_on = [ 
+  depends_on = [
     helm_release.argocd
   ]
 }
@@ -50,7 +50,7 @@
     domain = var.domain
   })
 
-  depends_on = [ 
+  depends_on = [
     helm_release.argocd,
     kubectl_manifest.argocd_ingress_certificate
   ]
modules/k8config/modules/argocd/variables.tf
--- old/modules/k8config/modules/argocd/variables.tf
+++ new/modules/k8config/modules/argocd/variables.tf
@@ -1,5 +1,5 @@
 variable "domain" {
-    description = "Root Domain For Service"
-    sensitive = true
-    type = string
+  description = "Root Domain For Service"
+  sensitive   = true
+  type        = string
 }
\ No newline at end of file
modules/k8config/modules/certmanager/main.tf
--- old/modules/k8config/modules/certmanager/main.tf
+++ new/modules/k8config/modules/certmanager/main.tf
@@ -1,36 +1,36 @@
 
 terraform {
-    required_providers {
-      kubernetes = {
-        source = "hashicorp/kubernetes"
-        version = "2.27.0"
-      }
-      helm = {
-        source  = "hashicorp/helm"
-        version = ">= 2.0.1"
-      }
-      kubectl = {
-        source = "alekc/kubectl"
-        version = "2.0.4"
-      }
+  required_providers {
+    kubernetes = {
+      source  = "hashicorp/kubernetes"
+      version = "2.27.0"
     }
+    helm = {
+      source  = "hashicorp/helm"
+      version = ">= 2.0.1"
+    }
+    kubectl = {
+      source  = "alekc/kubectl"
+      version = "2.0.4"
+    }
+  }
 }
 
 resource "helm_release" "cert_manager" {
   name = "cert-manager"
 
   repository = "https://charts.jetstack.io"
-  chart = "cert-manager"
+  chart      = "cert-manager"
 
-  atomic = true
+  atomic           = true
   create_namespace = true
-  namespace = "cert-manager"
-  version = "v1.14.4"
+  namespace        = "cert-manager"
+  version          = "v1.14.4"
 
-  recreate_pods = true
-  reuse_values = true
-  force_update = true
-  cleanup_on_fail = true
+  recreate_pods     = true
+  reuse_values      = true
+  force_update      = true
+  cleanup_on_fail   = true
   dependency_update = true
 
   values = [
@@ -41,7 +41,7 @@
 # https://cert-manager.io/docs/configuration/acme/dns01/cloudflare/
 resource "kubernetes_secret" "cloudflare_api_credentials" {
   metadata {
-    name = "cloudflare-api-token"
+    name      = "cloudflare-api-token"
     namespace = "cert-manager"
   }
 
@@ -49,9 +49,9 @@
     api-token = var.cf_token
   }
 
-  depends_on = [ 
+  depends_on = [
     helm_release.cert_manager
-   ]
+  ]
 }
 
 /**
@@ -85,7 +85,7 @@
 
   override_namespace = "cert-manager"
 
-  depends_on = [ 
+  depends_on = [
     helm_release.cert_manager,
     kubernetes_secret.cloudflare_api_credentials
   ]
@@ -98,7 +98,7 @@
 
   override_namespace = "cert-manager"
 
-  depends_on = [ 
+  depends_on = [
     helm_release.cert_manager,
     kubernetes_secret.cloudflare_api_credentials
   ]
modules/k8config/modules/certmanager/variables.tf
--- old/modules/k8config/modules/certmanager/variables.tf
+++ new/modules/k8config/modules/certmanager/variables.tf
@@ -1,11 +1,11 @@
 variable "cf_token" {
-    description = "CloudFlare API Auth Token"
-    sensitive = true
-    type = string
+  description = "CloudFlare API Auth Token"
+  sensitive   = true
+  type        = string
 }
 
 variable "cf_email" {
-    description = "CloudFlare Email Account"
-    sensitive = true
-    type = string
+  description = "CloudFlare Email Account"
+  sensitive   = true
+  type        = string
 }
\ No newline at end of file
modules/k8config/modules/crds/main.tf
--- old/modules/k8config/modules/crds/main.tf
+++ new/modules/k8config/modules/crds/main.tf
@@ -1,25 +1,25 @@
 terraform {
-    required_providers {
-      kubectl = {
-        source = "alekc/kubectl"
-        version = "2.0.4"
-      }
+  required_providers {
+    kubectl = {
+      source  = "alekc/kubectl"
+      version = "2.0.4"
     }
+  }
 }
 
 
 /* Cert Manager */
 resource "kubectl_manifest" "cert-manager-crds" {
-    for_each  = toset(split("---", file("${abspath(path.module)}/res/cert-manager-1.14.4/cert-manager.crds.yaml")))
-    yaml_body = each.value
-    server_side_apply = true
+  for_each          = toset(split("---", file("${abspath(path.module)}/res/cert-manager-1.14.4/cert-manager.crds.yaml")))
+  yaml_body         = each.value
+  server_side_apply = true
 }
 
 
 /* Prometheus */
 resource "kubectl_manifest" "prometheus-crds" {
-  for_each  = fileset("${abspath(path.module)}/res/prometheus-25.19.1", "*.yaml")
-  yaml_body = file("${abspath(path.module)}/res/prometheus-25.19.1/${each.value}")
+  for_each          = fileset("${abspath(path.module)}/res/prometheus-25.19.1", "*.yaml")
+  yaml_body         = file("${abspath(path.module)}/res/prometheus-25.19.1/${each.value}")
   server_side_apply = true
 }
 
modules/k8config/modules/prometheus/main.tf
--- old/modules/k8config/modules/prometheus/main.tf
+++ new/modules/k8config/modules/prometheus/main.tf
@@ -1,18 +1,18 @@
 terraform {
-    required_providers {
-      helm = {
-        source  = "hashicorp/helm"
-        version = ">= 2.0.1"
-      }
-      kubernetes = {
-        source = "hashicorp/kubernetes"
-        version = "2.27.0"
-      }
-      kubectl = {
-        source = "alekc/kubectl"
-        version = "2.0.4"
-      }
+  required_providers {
+    helm = {
+      source  = "hashicorp/helm"
+      version = ">= 2.0.1"
     }
+    kubernetes = {
+      source  = "hashicorp/kubernetes"
+      version = "2.27.0"
+    }
+    kubectl = {
+      source  = "alekc/kubectl"
+      version = "2.0.4"
+    }
+  }
 }
 
 /*
@@ -44,25 +44,25 @@
   name = "kube-prometheus-stack"
 
   repository = "https://prometheus-community.github.io/helm-charts"
-  chart = "kube-prometheus-stack"
+  chart      = "kube-prometheus-stack"
 
-  atomic = true
+  atomic           = true
   create_namespace = true
-  namespace = "prometheus"
+  namespace        = "prometheus"
 
   version = "v58.0.0"
 
-  recreate_pods = true
-  reuse_values = true
-  force_update = true
-  cleanup_on_fail = true
+  recreate_pods     = true
+  reuse_values      = true
+  force_update      = true
+  cleanup_on_fail   = true
   dependency_update = true
 
   values = [
     file("${abspath(path.module)}/res/kube-prometheus-stack-values.yaml")
   ]
 
-  depends_on = [ 
+  depends_on = [
     kubernetes_namespace.prometheus_namespace
   ]
 }
@@ -75,7 +75,7 @@
     domain = var.domain
   })
 
-  depends_on = [ 
+  depends_on = [
     helm_release.kube-prometheus-stack
   ]
 }
@@ -85,7 +85,7 @@
     domain = var.domain
   })
 
-  depends_on = [ 
+  depends_on = [
     helm_release.kube-prometheus-stack,
     kubectl_manifest.prometheus_ingress_certificate,
     kubectl_manifest.prometheus_dashboard_auth_middleware
@@ -93,14 +93,14 @@
 }
 
 resource "random_password" "prometheus_dahsboard_password" {
-  length           = 25
-  special          = false
+  length  = 25
+  special = false
   #override_special = "!#$%&*()-_=+[]{}<>:?"
 }
 
 resource "kubernetes_secret" "prometheus_dashboard_auth_secret" {
   metadata {
-    name = "prometheus-dashboard-auth-secret"
+    name      = "prometheus-dashboard-auth-secret"
     namespace = "traefik"
   }
 
@@ -115,7 +115,7 @@
 resource "kubectl_manifest" "prometheus_dashboard_auth_middleware" {
   yaml_body = file("${abspath(path.module)}/res/prometheus-dashboard-auth.yaml")
 
-  depends_on = [ 
+  depends_on = [
     kubernetes_secret.prometheus_dashboard_auth_secret
   ]
 }
@@ -130,14 +130,14 @@
 
 /* Grafana */
 resource "random_password" "grafana_password" {
-  length           = 25
-  special          = false
+  length  = 25
+  special = false
   #override_special = "!#$%&*()-_=+[]{}<>:?"
 }
 
 resource "kubernetes_secret" "grafana_dashboard_credentials" {
   metadata {
-    name = "grafana-dashboard-admin-credentials"
+    name      = "grafana-dashboard-admin-credentials"
     namespace = "prometheus"
   }
 
@@ -146,9 +146,9 @@
     password = random_password.grafana_password.result
   }
 
-  depends_on = [ 
+  depends_on = [
     kubernetes_namespace.prometheus_namespace
-   ]
+  ]
 }
 
 resource "kubectl_manifest" "grafana_ingress_certificate" {
@@ -156,7 +156,7 @@
     domain = var.domain
   })
 
-  depends_on = [ 
+  depends_on = [
     helm_release.kube-prometheus-stack
   ]
 }
@@ -166,7 +166,7 @@
     domain = var.domain
   })
 
-  depends_on = [ 
+  depends_on = [
     helm_release.kube-prometheus-stack,
     kubectl_manifest.grafana_ingress_certificate
   ]
modules/k8config/modules/prometheus/variables.tf
--- old/modules/k8config/modules/prometheus/variables.tf
+++ new/modules/k8config/modules/prometheus/variables.tf
@@ -1,5 +1,5 @@
 variable "domain" {
-    description = "Root Domain For Service"
-    sensitive = true
-    type = string
+  description = "Root Domain For Service"
+  sensitive   = true
+  type        = string
 }
\ No newline at end of file
modules/k8config/modules/traefik/main.tf
--- old/modules/k8config/modules/traefik/main.tf
+++ new/modules/k8config/modules/traefik/main.tf
@@ -1,57 +1,57 @@
 terraform {
-    required_providers {
-      kubernetes = {
-        source = "hashicorp/kubernetes"
-        version = "2.27.0"
-      }
-      helm = {
-        source  = "hashicorp/helm"
-        version = ">= 2.0.1"
-      }
-      kubectl = {
-        source = "alekc/kubectl"
-        version = "2.0.4"
-      }
+  required_providers {
+    kubernetes = {
+      source  = "hashicorp/kubernetes"
+      version = "2.27.0"
     }
+    helm = {
+      source  = "hashicorp/helm"
+      version = ">= 2.0.1"
+    }
+    kubectl = {
+      source  = "alekc/kubectl"
+      version = "2.0.4"
+    }
+  }
 }
 
 resource "helm_release" "traefik_ingress" {
   name = "traefik-ingress-controller"
 
   repository = "https://helm.traefik.io/traefik"
-  chart = "traefik"
+  chart      = "traefik"
 
-  atomic = true
+  atomic           = true
   create_namespace = true
-  namespace = "traefik"
+  namespace        = "traefik"
 
-  recreate_pods = true
-  reuse_values = true
-  force_update = true
-  cleanup_on_fail = true
+  recreate_pods     = true
+  reuse_values      = true
+  force_update      = true
+  cleanup_on_fail   = true
   dependency_update = true
 
   values = [
-    templatefile(format("%s%s", path.module , "/res/traefik-ingress-values.yaml.tftpl"), {
+    templatefile(format("%s%s", path.module, "/res/traefik-ingress-values.yaml.tftpl"), {
       domain = var.domain
     })
   ]
 
-  depends_on = [ 
+  depends_on = [
     #kubernetes_secret.cloudflare_api_credentials
   ]
 }
 
 
 resource "random_password" "password" {
-  length           = 25
-  special          = false
+  length  = 25
+  special = false
   #override_special = "!#$%&*()-_=+[]{}<>:?"
 }
 
 resource "kubernetes_secret" "traefik_dashboard_password" {
   metadata {
-    name = "traefik-dashboard-auth-password"
+    name      = "traefik-dashboard-auth-password"
     namespace = "traefik"
   }
 
@@ -62,7 +62,7 @@
     password = random_password.password.result
   }
 
-  depends_on = [ 
+  depends_on = [
     helm_release.traefik_ingress
-   ]
+  ]
 }
modules/k8config/modules/traefik/variables.tf
--- old/modules/k8config/modules/traefik/variables.tf
+++ new/modules/k8config/modules/traefik/variables.tf
@@ -1,18 +1,18 @@
 
 variable "cf_token" {
-    description = "CloudFlare API Auth Token"
-    sensitive = true
-    type = string
+  description = "CloudFlare API Auth Token"
+  sensitive   = true
+  type        = string
 }
 
 variable "cf_email" {
-    description = "CloudFlare Email Account"
-    sensitive = true
-    type = string
+  description = "CloudFlare Email Account"
+  sensitive   = true
+  type        = string
 }
 
 variable "domain" {
-    description = "Root Domain For Service"
-    sensitive = true
-    type = string
+  description = "Root Domain For Service"
+  sensitive   = true
+  type        = string
 }
\ No newline at end of file
modules/k8config/variables.tf
--- old/modules/k8config/variables.tf
+++ new/modules/k8config/variables.tf
@@ -1,33 +1,33 @@
 variable "cluster_name" {
   description = "Kubernetes Cluster Name"
-  type = string
+  type        = string
 }
 
 variable "cluster_id" {
   description = "Kubernetes Cluster ID as given by Digital Ocean"
-  type = string
+  type        = string
 }
 
 variable "do_token" {
-    description = "Digital Ocean API Auth Token"
-    sensitive = true
-    type = string
+  description = "Digital Ocean API Auth Token"
+  sensitive   = true
+  type        = string
 }
 
 variable "cf_token" {
-    description = "CloudFlare API Auth Token"
-    sensitive = true
-    type = string
+  description = "CloudFlare API Auth Token"
+  sensitive   = true
+  type        = string
 }
 
 variable "cf_email" {
-    description = "CloudFlare Email Account"
-    sensitive = true
-    type = string
+  description = "CloudFlare Email Account"
+  sensitive   = true
+  type        = string
 }
 
 variable "domain" {
-    description = "Root Domain For Service"
-    sensitive = true
-    type = string
+  description = "Root Domain For Service"
+  sensitive   = true
+  type        = string
 }
\ No newline at end of file
modules/k8infra/main.tf
--- old/modules/k8infra/main.tf
+++ new/modules/k8infra/main.tf
@@ -1,13 +1,13 @@
 terraform {
-    required_providers {
-      digitalocean = {
-          source = "digitalocean/digitalocean"
-      }
-      cloudflare = {
-          source = "cloudflare/cloudflare"
-          version = "~> 2.0"
-      }
+  required_providers {
+    digitalocean = {
+      source = "digitalocean/digitalocean"
     }
+    cloudflare = {
+      source  = "cloudflare/cloudflare"
+      version = "~> 2.0"
+    }
+  }
 }
 
 resource "random_id" "cluster_name" {
@@ -37,7 +37,7 @@
   description = "debenstack Kubernetes Cluster"
   purpose     = "Production debenstack Kubernetes"
   environment = "Production"
-  resources   = [
+  resources = [
     digitalocean_kubernetes_cluster.k8stack.urn
   ]
 }
modules/k8infra/variables.tf
--- old/modules/k8infra/variables.tf
+++ new/modules/k8infra/variables.tf
@@ -1,5 +1,5 @@
 variable "do_token" {
-    description = "Digital Ocean API Auth Token"
-    sensitive = true
-    type = string
+  description = "Digital Ocean API Auth Token"
+  sensitive   = true
+  type        = string
 }
\ No newline at end of file
providers.tf
--- old/providers.tf
+++ new/providers.tf
@@ -8,8 +8,8 @@
 }
 
 provider "kubernetes" {
-  host             = data.digitalocean_kubernetes_cluster.k8stack.endpoint
-  token            = data.digitalocean_kubernetes_cluster.k8stack.kube_config[0].token
+  host  = data.digitalocean_kubernetes_cluster.k8stack.endpoint
+  token = data.digitalocean_kubernetes_cluster.k8stack.kube_config[0].token
   cluster_ca_certificate = base64decode(
     data.digitalocean_kubernetes_cluster.k8stack.kube_config[0].cluster_ca_certificate
   )
@@ -26,8 +26,8 @@
 }
 
 provider "cloudflare" {
-    email = var.cf_email
-    api_token = var.cf_token
+  email     = var.cf_email
+  api_token = var.cf_token
 }
 
 provider "kubectl" {
@@ -36,6 +36,6 @@
   cluster_ca_certificate = base64decode(
     data.digitalocean_kubernetes_cluster.k8stack.kube_config[0].cluster_ca_certificate
   )
-  load_config_file = false
+  load_config_file  = false
   apply_retry_count = 3
 }
\ No newline at end of file
variables.tf
--- old/variables.tf
+++ new/variables.tf
@@ -1,24 +1,24 @@
 variable "do_token" {
-    description = "Digital Ocean API Auth Token"
-    sensitive = true
-    type = string
+  description = "Digital Ocean API Auth Token"
+  sensitive   = true
+  type        = string
 }
 
 
 variable "cf_token" {
-    description = "CloudFlare API Auth Token"
-    sensitive = true
-    type = string
+  description = "CloudFlare API Auth Token"
+  sensitive   = true
+  type        = string
 }
 
 variable "cf_email" {
-    description = "CloudFlare Email Account"
-    sensitive = true
-    type = string
+  description = "CloudFlare Email Account"
+  sensitive   = true
+  type        = string
 }
 
 variable "domain" {
-    description = "Root Domain For Service"
-    sensitive = true
-    type = string
+  description = "Root Domain For Service"
+  sensitive   = true
+  type        = string
 }

Copy link

Linter TFLint Failed for Workspace: default

Show Output
5 issue(s) found:

outputs.tf:1:1: Notice - `cluster_id` output has no description (terraform_documented_outputs)
outputs.tf:5:1: Notice - `cluster_name` output has no description (terraform_documented_outputs)
main.tf:38:1: Warning - Missing version constraint for provider "time" in `required_providers` (terraform_required_providers)
main.tf:3:22: Warning - Missing version constraint for provider "digitalocean" in `required_providers` (terraform_required_providers)
:0:0: Warning - terraform "required_version" attribute is required (terraform_required_version)

@bensoer bensoer merged commit d78ff73 into main Apr 17, 2024
6 of 9 checks passed
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

Successfully merging this pull request may close these issues.

1 participant