Skip to content

Commit

Permalink
split up kube resource and client helper methods
Browse files Browse the repository at this point in the history
Signed-off-by: Robert Detjens <[email protected]>
  • Loading branch information
detjensrobert committed Jan 12, 2025
1 parent 255845c commit 813d8d9
Show file tree
Hide file tree
Showing 5 changed files with 131 additions and 35 deletions.
46 changes: 41 additions & 5 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ rust-s3 = { version = "0.35.1", default-features = false, features = [
"fail-on-err",
"tokio-rustls-tls",
] }
ureq = "2.12.1"


[dev-dependencies]
Expand Down
2 changes: 0 additions & 2 deletions src/asset_files/setup_manifests/cert-manager.helm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,10 @@ metadata:
spec:
repo: https://charts.jetstack.io
chart: cert-manager
version: v1.15.1

targetNamespace: ingress
createNamespace: true

valuesContent: |
crds:
enabled: true
keep: true
54 changes: 31 additions & 23 deletions src/clients.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
// Builders for the various client structs for Docker/Kube etc.

use std::error::Error;

use anyhow::{anyhow, bail, Ok, Result};
use anyhow::{anyhow, bail, Context, Error, Result};
use bollard;
use futures_util::TryFutureExt;
use kube::{
Expand Down Expand Up @@ -79,25 +77,25 @@ pub async fn kube_client(profile: &config::ProfileConfig) -> Result<kube::Client

// check kube api readiness endpoint to make sure its reachable
let ready_req = http::Request::get("/readyz").body(vec![]).unwrap();
if client.request_text(ready_req).await.map_err(|e| {
let ready_resp = client
.request_text(ready_req)
.await
// change 'Connection refused' error into something more helpful
// anyhow!("could not connect to Kubernetes (is KUBECONFIG or KUBECONTEXT correct?)")
e
})? != "ok"
{
bail!("kubernetes is not ready")
.map_err(|e| {
anyhow!("could not connect to Kubernetes (is KUBECONFIG or KUBECONTEXT correct?)")
})?;

if ready_resp != "ok" {
bail!("Kubernetes is not ready")
};

Ok(client)
}

/// Create a Kube API client for the passed object's resource type
pub async fn kube_api_for(
pub async fn kube_resource_for(
kube_object: &DynamicObject,
client: kube::Client,
) -> Result<kube::Api<DynamicObject>> {
let ns = kube_object.metadata.namespace.as_deref();

client: &kube::Client,
) -> Result<(ApiResource, ApiCapabilities)> {
let gvk = if let Some(tm) = &kube_object.types {
GroupVersionKind::try_from(tm)?
} else {
Expand All @@ -108,15 +106,25 @@ pub async fn kube_api_for(
};

let name = kube_object.name_any();
let (resource, caps) = kube::discovery::pinned_kind(&client, &gvk)

kube::discovery::pinned_kind(&client, &gvk)
.await
// .with_context(|| {
// format!(
// "could not find resource type {:?} on cluster",
// kube_object.types.unwrap_or(TypeMeta::default())
// )
// })
?;
.with_context(|| {
format!(
"could not find resource type {:?} on cluster",
kube_object.types.clone().unwrap_or(TypeMeta::default())
)
})
}

/// Create a Kube API client for the passed object's resource type
pub async fn kube_api_for(
kube_object: &DynamicObject,
client: kube::Client,
) -> Result<kube::Api<DynamicObject>> {
let ns = kube_object.metadata.namespace.as_deref();

let (resource, caps) = kube_resource_for(&kube_object, &client).await?;

if caps.scope == kube::discovery::Scope::Cluster {
Ok(kube::Api::all_with(client, &resource))
Expand Down
63 changes: 58 additions & 5 deletions src/cluster_setup/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,14 @@ use k8s_openapi::{
apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
};
use kube::api::{DynamicObject, Patch, PatchParams};
use kube::runtime::WatchStreamExt;
use kube::{Api, ResourceExt};
use serde;
use serde_yml;
use simplelog::*;
use ureq;

use crate::clients::{kube_api_for, kube_client};
use crate::clients::{kube_api_for, kube_client, kube_resource_for};
use crate::configparser::{config, get_config, get_profile_config};

// De../asset_files/setup_ploy cluster resources needed for challenges to work.
Expand Down Expand Up @@ -56,7 +58,7 @@ pub async fn install_ingress(profile: &config::ProfileConfig) -> Result<()> {
const CHART_YAML: &str = include_str!("../asset_files/setup_manifests/ingress-nginx.helm.yaml");

// TODO: watch for helm chart manifest to apply correctly
apply_manifest_yaml(client, CHART_YAML).await
apply_helm_crd(client, CHART_YAML).await
}

pub async fn install_certmanager(profile: &config::ProfileConfig) -> Result<()> {
Expand All @@ -65,8 +67,24 @@ pub async fn install_certmanager(profile: &config::ProfileConfig) -> Result<()>
let client = kube_client(profile).await?;

const CHART_YAML: &str = include_str!("../asset_files/setup_manifests/cert-manager.helm.yaml");
apply_manifest_yaml(client.clone(), CHART_YAML).await?;

apply_helm_crd(client.clone(), CHART_YAML).await?;

// install cert-manager from upstream static manifest instead of helm chart.
// the helm install gets confused when its CRDs already exist, so this is
// more reliable against running cluster-setup multiple times.
// EDIT: this is not the case when `crds.keep=false` so :shrug:

// let certmanager_manifest = ureq::get(
// "https://github.com/cert-manager/cert-manager/releases/download/v1.16.2/cert-manager.yaml",
// )
// .call()
// .context("could not download cert-manager manifest from Github release")?
// .into_string()?
// // deploy this into ingress namespace with other resources
// .replace("namespace: cert-manager", "namespace: ingress");
// apply_manifest_yaml(client.clone(), &certmanager_manifest);

// letsencrypt and letsencrypt-staging
const ISSUERS_YAML: &str =
include_str!("../asset_files/setup_manifests/letsencrypt.issuers.yaml");
apply_manifest_yaml(client, ISSUERS_YAML).await
Expand All @@ -78,7 +96,42 @@ pub async fn install_extdns(profile: &config::ProfileConfig) -> Result<()> {
let client = kube_client(profile).await?;

const CHART_YAML: &str = include_str!("../asset_files/setup_manifests/external-dns.helm.yaml");
apply_manifest_yaml(client, CHART_YAML).await
apply_helm_crd(client, CHART_YAML).await
}

//
// install helpers
//

// Apply helm chart manifest and wait for deployment status
async fn apply_helm_crd(client: kube::Client, manifest: &str) -> Result<()> {
apply_manifest_yaml(client.clone(), manifest).await?;

// now wait for the deploy job to run and update status

// pull out name and namespace from yaml string
// this will only get a single manifest from the install_* functions,
// so this unwrapping is safe:tm:
let chart_crd: DynamicObject = serde_yml::from_str(manifest)?;
debug!(
"waiting for chart deployment {} {}",
chart_crd
.namespace()
.unwrap_or("[default namespace]".to_string()),
chart_crd.name_any()
);

let (chart_resource, caps) = kube_resource_for(&chart_crd, &client).await?;
let chart_api = kube_api_for(&chart_crd, client).await?;

let watch_conf = kube::runtime::watcher::Config::default();
let watcher = kube::runtime::metadata_watcher(chart_api, watch_conf);

// TODO: actually wait for chart status to change...
// need to get job name from chart resource `.status.jobName`, and look at
// status of job. helm-controller does not update status of chart CRD :/

Ok(())
}

/// Apply multi-document manifest file
Expand Down

0 comments on commit 813d8d9

Please sign in to comment.