From 6d4f741eefae365f6d3ba75700e1f35556268aa8 Mon Sep 17 00:00:00 2001 From: Stefan Matting Date: Tue, 12 Sep 2023 17:22:59 +0200 Subject: [PATCH] wip (undo me) --- ansible/roles-external/ansible-minio | 2 +- bin/offline-cluster.sh | 14 +--- offline/docs_ubuntu_22.04.md | 117 ++++++++++++++------------- offline/ubuntu22.04_installation.md | 2 +- 4 files changed, 66 insertions(+), 69 deletions(-) diff --git a/ansible/roles-external/ansible-minio b/ansible/roles-external/ansible-minio index 22ab28f75..2c0244b35 160000 --- a/ansible/roles-external/ansible-minio +++ b/ansible/roles-external/ansible-minio @@ -1 +1 @@ -Subproject commit 22ab28f75c007a0c48dc47db574773773ef19d22 +Subproject commit 2c0244b352f08e3d55d80939c839686b951ba69f diff --git a/bin/offline-cluster.sh b/bin/offline-cluster.sh index 0cfb0c0b6..8fe1acd45 100755 --- a/bin/offline-cluster.sh +++ b/bin/offline-cluster.sh @@ -15,11 +15,8 @@ set -x # If this step fails partway, and you know that parts of it completed, the `--skip-tags debs,binaries,containers,containers-helm,containers-other` tags may come in handy. # ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/setup-offline-sources.yml -# Kubernetes, part 1 -# # Run kubespray until docker is installed and runs. This allows us to preseed the docker containers that -# are part of the offline bundle: -# +# are part of the offline bundle # ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/kubernetes.yml --tags bastion,bootstrap-os,preinstall,container-engine # Install docker on the restund nodes @@ -37,13 +34,10 @@ set -x # Run the rest of kubespray. This should bootstrap a kubernetes cluster successfully: # ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/kubernetes.yml --skip-tags bootstrap-os,preinstall,container-engine -# ./bin/fix_default_router.sh +#./bin/fix_default_router.sh # Deploy all other services which don't run in kubernetes. -# ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/cassandra.yml +ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/cassandra.yml # ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/elasticsearch.yml # ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/restund.yml -ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/minio.yml - -# TODO: this was not in the docs -# ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/helm_external.yml \ No newline at end of file +# ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/minio.yml \ No newline at end of file diff --git a/offline/docs_ubuntu_22.04.md b/offline/docs_ubuntu_22.04.md index ffedf8243..2c89ed2b4 100644 --- a/offline/docs_ubuntu_22.04.md +++ b/offline/docs_ubuntu_22.04.md @@ -13,9 +13,13 @@ docker](https://docker.com) for instructions. On ubuntu 22.04, connected to the internet: ``` -sudo apt install docker.io -sudo systemctl enable docker -sudo systemctl start docker +sudo bash -c ' +set -eo pipefail; + +apt install docker.io; +systemctl enable docker; +systemctl start docker; +' ``` Ensure the user you are using for the install has permission to run docker, or add 'sudo' to the docker commands below. @@ -386,68 +390,60 @@ Now you are ready to start deploying services. #### WORKAROUND: dependency (TODO: move this somewhere else?) some ubuntu systems do not have GPG by default. wire assumes this is already present. ensure you have gpg installed on all of your nodes before continuing to the next step. -### Deploying with Ansible +### Deploy Kubernetes and all ansible-managed services -In order to deploy all the ansible-managed services run: +In order to deploy all the run: ``` # d ./bin/offline-cluster.sh ``` - In case any of the steps in this script fail, see the notes in the comments that accompany each step. +#### Troubleshooting restund +Is case the restund firewall fails to start. Fix -#### Ensuring kubernetes is healthy. - -Ensure the cluster comes up healthy. The container also contains kubectl, so check the node status: +On each ansnode you set in the `[restund]` section of the `hosts.ini` file +delete the outbound rule to 172.16.0.0/12 ``` -d kubectl get nodes -owide -``` -They should all report ready. +sudo bash -c ' +set -eo pipefail; - -#### Non-kubernetes services (restund, cassandra, elasticsearch, minio) -Now, deploy all other services which don't run in kubernetes. - -``` -d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/cassandra.yml -d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/elasticsearch.yml -d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/minio.yml -d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/restund.yml +ufw status numbered; +ufw delete ; +' ``` -### ERROR: after you install restund, the restund firewall will fail to start. +#### Ensuring kubernetes is healthy. -On each ansnode you set in the `[restund]` section of the `hosts.ini` file +Ensure the cluster comes up healthy. The container also contains kubectl, so check the node status: -delete the outbound rule to 172.16.0.0/12 ``` -sudo ufw status numbered -sudo ufw delete +d kubectl get nodes -owide ``` +They should all report ready. + #### enable the ports colocated services run on: -cassandra: -``` -sudo ufw allow 9042/tcp -sudo ufw allow 9160/tcp -sudo ufw allow 7000/tcp -sudo ufw allow 7199/tcp ``` +sudo bash -c ' +set -eo pipefail; -elasticsearch: -``` -sudo ufw allow 9300/tcp -sudo ufw allow 9200/tcp -``` +# cassandra +ufw allow 9042/tcp; +ufw allow 9160/tcp; +ufw allow 7000/tcp; +ufw allow 7199/tcp; -minio: -``` -sudo ufw allow 9000/tcp -sudo ufw allow 9092/tcp -``` +# elasticsearch +ufw allow 9300/tcp; +ufw allow 9200/tcp; +# minio +ufw allow 9000/tcp; +ufw allow 9092/tcp; +' +``` Afterwards, run the following playbook to create helm values that tell our helm charts what the IP addresses of cassandra, elasticsearch and minio are. @@ -469,17 +465,12 @@ d helm install elasticsearch-external ./charts/elasticsearch-external --values . d helm install minio-external ./charts/minio-external --values ./values/minio-external/values.yaml ``` -Also copy the values file for `databases-ephemeral` as it is required for the next step: - -``` -cp values/databases-ephemeral/prod-values.example.yaml values/databases-ephemeral/values.yaml -``` - #### Deploying stateless dependencies Next, we have 4 services that need to be deployed but need no additional configuration: ``` d helm install fake-aws ./charts/fake-aws --values ./values/fake-aws/prod-values.example.yaml d helm install demo-smtp ./charts/demo-smtp --values ./values/demo-smtp/prod-values.example.yaml +cp values/databases-ephemeral/prod-values.example.yaml values/databases-ephemeral/values.yaml d helm install databases-ephemeral ./charts/databases-ephemeral/ --values ./values/databases-ephemeral/values.yaml d helm install reaper ./charts/reaper ``` @@ -531,7 +522,7 @@ d helm install wire-server ./charts/wire-server --timeout=15m0s --values ./value This component requires no configuration, and is a requirement for all of the methods we support for getting traffic into your cluster: ``` -mv ./values/ingress-nginx-controller/prod-values.example.yaml ./values/ingress-nginx-controller/values.yaml +cp ./values/ingress-nginx-controller/prod-values.example.yaml ./values/ingress-nginx-controller/values.yaml d helm install ingress-nginx-controller ./charts/ingress-nginx-controller --values ./values/ingress-nginx-controller/values.yaml ``` @@ -557,6 +548,9 @@ They assume all traffic destined to your wire cluster is going through a single Here, you should check the ethernet interface name for your outbound IP. ``` ip ro | sed -n "/default/s/.* dev \([enpso0-9]*\) .*/export OUTBOUNDINTERFACE=\1/p" + +export OUTBOUNDINTERFACE=$(ip ro | sed -n "/default/s/.* dev \([enpso0-9]*\) .*/\1/p") +echo "OUTBOUNDINTERFACE is $OUTBOUNDINTERFACE" ``` This will return a shell command setting a variable to your default interface. copy and paste it. next, supply your outside IP address: @@ -568,8 +562,10 @@ Select one of your kubernetes nodes that you are fine with losing service if it Make sure it is the same pod on which ingress-nginx is running: -1. Run `d kubectl get pods -o wide` -2. See on which node `ingress-nginx` is running +1. Find out on which node `ingress-nginx` is running: +``` +d kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName +``` 3. Get the IP of this node by running `ip address` on that node 4. Use that IP for $KUBENODEIP @@ -577,18 +573,27 @@ Make sure it is the same pod on which ingress-nginx is running: export KUBENODEIP= ``` -then, if your box owns the public IP (you can see the IP in `ip addr`), run the following: +then, if case the server owns the public IP (i.e. you can see the IP in `ip addr`), run the following: ``` -sudo iptables -t nat -A PREROUTING -d $PUBLICIPADDRESS -i $OUTBOUNDINTERFACE -p tcp --dport 80 -j DNAT --to-destination $KUBENODEIP:31772 -sudo iptables -t nat -A PREROUTING -d $PUBLICIPADDRESS -i $OUTBOUNDINTERFACE -p tcp --dport 443 -j DNAT --to-destination $KUBENODEIP:31773 +sudo bash -c " +set -eo pipefail; + +echo meh: $OUTBOUNDINTERFACE +# iptables -t nat -A PREROUTING -d $PUBLICIPADDRESS -i $OUTBOUNDINTERFACE -p tcp --dport 80 -j DNAT --to-destination $KUBENODEIP:31772; +# iptables -t nat -A PREROUTING -d $PUBLICIPADDRESS -i $OUTBOUNDINTERFACE -p tcp --dport 443 -j DNAT --to-destination $KUBENODEIP:31773; +" ``` -If your box is being forwarded traffic from another firewall (you do not see the IP in `ip addr`), run the following: +If your server is being forwarded traffic from another firewall (you do not see the IP in `ip addr`), run the following: ``` sudo iptables -t nat -A PREROUTING -i $OUTBOUNDINTERFACE -p tcp --dport 80 -j DNAT --to-destination $KUBENODEIP:31772 sudo iptables -t nat -A PREROUTING -i $OUTBOUNDINTERFACE -p tcp --dport 443 -j DNAT --to-destination $KUBENODEIP:31773 ``` +If you are running a UFW firewall, make sure to add the above iptables rules to /etc/ufw/before.rules, so they persist after a reboot. + + + If you are running a UFW firewall, make sure to allow inbound traffic on 443 and 80: ``` sudo ufw enable @@ -596,8 +601,6 @@ sudo ufw allow in on $OUTBOUNDINTERFACE proto tcp to any port 443 sudo ufw allow in on $OUTBOUNDINTERFACE proto tcp to any port 80 ``` -if you are running a UFW firewall, make sure to add the above iptables rules to /etc/ufw/before.rules, so they persist after a reboot. - ###### Mirroring the public IP cert-manager has a requirement on being able to reach the kubernetes on it's external IP. this is trouble, because in most security concious environments, the external IP is not owned by any of the kubernetes hosts. diff --git a/offline/ubuntu22.04_installation.md b/offline/ubuntu22.04_installation.md index ebe0e389e..de0f7b53f 100644 --- a/offline/ubuntu22.04_installation.md +++ b/offline/ubuntu22.04_installation.md @@ -379,7 +379,7 @@ Layout - English, Variant -> English --> Done Select Ubuntu Server (minimized) --> Done -Network connections --> Make suere you get something like "DHCPv4 172.16.0.8/24" --> Done +Network connections --> Make sure you get something like "DHCPv4 172.16.0.8/24" --> Done Proxy Address - dont change anything --> Done