diff --git a/bin/newvm.sh b/bin/newvm.sh deleted file mode 100644 index 5b8b38db4..000000000 --- a/bin/newvm.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash - -usage() { echo "Usage: $0 usage:" && grep ") \#" "$0" && echo " " 1>&2; exit 1; } - -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -while getopts ":qm:d:c:" o; do - case "${o}" in - d) # set amount of disk, in gigabytes - d=${OPTARG} - ;; - m) # set amount of memory, in megabytes - m=${OPTARG} - ;; - c) # set amount of CPU cores. - c=${OPTARG} - ;; - q) # use qemu instead of kvm. - q=1 - ;; - *) # un-handled cases - usage - ;; - esac -done -shift $((OPTIND-1)) - -if [ -z "${d}" ] || [ -z "${m}" ]; then - echo "here" - usage -fi - -VM_NAME=$1 - -if [ -n "$2" ]; then - echo "ERROR: too many arguments!" 1>&2 - usage -fi - -if [ -z "$VM_NAME" ]; then - echo "ERROR: no VM name specified?" 1>&2 - usage -fi - -if [ ! -f ubuntu.iso ]; then - echo "ERROR: no ubuntu.iso found in $SCRIPT_DIR" 1>&2 - echo "no actions performed." - exit 1 -fi - -if [ ! -d "./kvmhelpers" ]; then - echo "ERROR: could not find kvmhelpers directory." 1>&2 - echo "no actions performed." - exit 1 -fi - -if [ -d "$VM_NAME" ]; then - echo "ERROR: directory for vm $VM_NAME already exists." 1>&2 - echo "no actions performed." - exit 1 -fi - -echo "disk size = ${d} gigabytes" -echo "memory = ${m} megabytes" -echo "CPUs: ${c}" -echo "hostname: $VM_NAME" -if [ -n "$q" ]; then - echo "USE QEMU" -fi - -# exit 0 - -mkdir "$VM_NAME" -cp ./kvmhelpers/* "$VM_NAME"/ -qemu-img create "$VM_NAME"/drive-c.img "${d}"G -sed -i "s/MEM=.*/MEM=${m}/" "$VM_NAME"/start_kvm.sh -sed -i "s@CDROM=.*@CDROM=../ubuntu.iso@" "$VM_NAME"/start_kvm.sh -sed -i "s/^export eth1=/#export eth1=/" "$VM_NAME"/start_kvm.sh -sed -i "s/^CPUS=.*/CPUS=${c}/" "$VM_NAME"/start_kvm.sh -sed -i 's/\(.*\)CURSES=.*/\1CURSES="-nographic -device sga"/' "$VM_NAME"/start_kvm.sh - -if [ -n "$q" ]; then - echo "forcing QEMU." - sed -i "s=/usr/bin/kvm=/usr/bin/qemu-system-x86_64=" "$VM_NAME"/start_kvm.sh -fi diff --git a/kvmhelpers/GUESTBRIDGE-down.sh b/kvmhelpers/GUESTBRIDGE-down.sh deleted file mode 100755 index 4d535b5a1..000000000 --- a/kvmhelpers/GUESTBRIDGE-down.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh - -sh ./GUESTBRIDGE-vars.sh - -$SUDO "$IP" link set "$1" down promisc off -#$SUDO $IFCONFIG $1 0.0.0.0 promisc down - -# remove ourself from the bridge. -$SUDO "$BRCTL" delif "$BRIDGE" "$1" - -# this script is not responsible for destroying the tap device. -#ip tuntap del dev $1 - -BRIDGEDEV=$($SUDO "$BRCTL" show | grep -E ^"$BRIDGE" | grep tap) - -if [ -z "$BRIDGEDEV" ]; then - { - # we are the last one out. burn the bridge. - $SUDO "$IFCONFIG" "$BRIDGE" down - $SUDO "$BRCTL" delif "$BRIDGE" "$1" - $SUDO "$BRCTL" delbr "$BRIDGE" - } -fi diff --git a/kvmhelpers/GUESTBRIDGE-vars.sh b/kvmhelpers/GUESTBRIDGE-vars.sh deleted file mode 100755 index b773dc331..000000000 --- a/kvmhelpers/GUESTBRIDGE-vars.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/sh - -# The bridge shared by all VMs. if you change this, you should probably reboot. -BRIDGE=br1 -export BRIDGE - -# The paths to binaries we use for bringing up and down the interface. -BRCTL="/sbin/brctl" -export BRCTL - -IP="/sbin/ip" -export IP - -IFCONFIG="/sbin/ifconfig" -export IFCONFIG - -SUDO="/usr/bin/sudo" -export SUDO - -# none of the rest of this should matter. - -# The IP of the host system, on the host<->VM network. where we should provide services (dhcp, dns, ...) that the VMs can see. -#BRIDGEIP=172.16.0.1 -# The broadcast address for the above network. -#BRIDGEBROADCAST=172.16.0.255 - -# 0 for true. -# manage ISC DHCPD -USEDHCP=1 -export USEDHCP - -# manage BIND -USEDNS=1 -export USEDNS - -# Whether to assign an IP and use ufw to provide internet to the VMs using HOSTBRIDGE. -HOSTROUTE=1 -export HOSTROUTE - diff --git a/kvmhelpers/GUESTBRIDGE.sh b/kvmhelpers/GUESTBRIDGE.sh deleted file mode 100755 index 19de4c0a6..000000000 --- a/kvmhelpers/GUESTBRIDGE.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/sh - -USER=$(whoami) - -{ - - sh ./GUESTBRIDGE-vars.sh - - BRIDGEDEV=$($BRCTL show | grep -E ^"$BRIDGE") - - if [ -n "$BRIDGEDEV" ]; then - { - $SUDO "$BRCTL" addif "$BRIDGE" "$1" - $SUDO "$IP" link set "$1" up promisc on - } - else - { - $SUDO "$BRCTL" addbr "$BRIDGE" - if [ "$HOSTROUTE" -eq "0" ]; then - $SUDO "$IP" addr add "$BRIDGEIP"/24 broadcast "$BRIDGEBROADCAST" dev "$BRIDGE" - fi - $SUDO "$BRCTL" stp "$BRIDGE" off - # $SUDO $IP tuntap add dev $1 mode tap user $USER - $SUDO "$IP" link set "$1" up promisc on - $SUDO "$BRCTL" addif "$BRIDGE" "$1" - $SUDO "$IP" link set "$BRIDGE" up - if [ "$USEDHCP" -eq "0" ]; then - $SUDO service isc-dhcp-server stop - $SUDO service isc-dhcp-server start - # workaround arno and fail2ban not working well together. - # $SUDO service fail2ban stop - # $SUDO service fail2ban start - fi - if [ "$USEDNS" -eq "0" ]; then - $SUDO service bind9 restart - fi - } - fi - - if [ "$HOSTROUTE" -eq "0" ]; then - # Allow VMs to use ip masquerading on the host to contact the internet, as well as to have port forwards. - $SUDO service ufw restart - fi - - echo "Bridge ifup completed." -} > tapbridge.ifup 2>&1 diff --git a/kvmhelpers/HOSTBRIDGE-down.sh b/kvmhelpers/HOSTBRIDGE-down.sh deleted file mode 100755 index 76237eb56..000000000 --- a/kvmhelpers/HOSTBRIDGE-down.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/sh -# shellcheck disable=SC1091 - -. ./HOSTBRIDGE-vars.sh - -$SUDO "$IP" link set "$1" down promisc off -#$SUDO $IFCONFIG $1 0.0.0.0 promisc down - -# remove ourself from the bridge. -$SUDO "$BRCTL" delif "$BRIDGE" "$1" - -# this script is not responsible for destroying the tap device. -#ip tuntap del dev $1 - -BRIDGEDEV=$($SUDO "$BRCTL" show | grep -E ^"$BRIDGE" | grep tap) - -if [ -z "$BRIDGEDEV" ]; then - { - # we are the last one out. burn the bridge. - $SUDO "$IFCONFIG" "$BRIDGE" down - $SUDO "$BRCTL" delif "$BRIDGE" "$1" - $SUDO "$BRCTL" delbr "$BRIDGE" - } -fi diff --git a/kvmhelpers/HOSTBRIDGE-vars.sh b/kvmhelpers/HOSTBRIDGE-vars.sh deleted file mode 100755 index aad961191..000000000 --- a/kvmhelpers/HOSTBRIDGE-vars.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/sh -# The bridge shared by all VMs using HOSTBRIDGE. if you change this, you should probably reboot. -export BRIDGE=br0 - -# The IP of the host system, on the host<->VM network. where we should provide services (dhcp, dns, ...) that the VMs can see. -export BRIDGEIP=172.16.0.1 -# The broadcast address for the above network. -export BRIDGEBROADCAST=172.16.0.255 - -# 0 for true. -# manage ISC DHCPD -export USEDHCP=1 -# manage BIND -export USEDNS=1 -# manage DNSMASQ -export USEDNSMASQ=0 - -# Whether to assign an IP and use ufw to provide internet to the VMs using HOSTBRIDGE. -export HOSTROUTE=0 - -# The paths to binaries we use for bringing up and down the interface. -export BRCTL="/sbin/brctl" -export IP="/sbin/ip" -export IFCONFIG="/sbin/ifconfig" -export SUDO="/usr/bin/sudo" diff --git a/kvmhelpers/HOSTBRIDGE.sh b/kvmhelpers/HOSTBRIDGE.sh deleted file mode 100755 index 2ba15cbd2..000000000 --- a/kvmhelpers/HOSTBRIDGE.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/sh -# shellcheck disable=SC1091 - -USER=$(whoami) - -{ - - . ./HOSTBRIDGE-vars.sh - - BRIDGEDEV=$($BRCTL show | grep -E ^"$BRIDGE") - - if [ -n "$BRIDGEDEV" ]; then - { - $SUDO "$BRCTL" addif "$BRIDGE" "$1" - $SUDO "$IP" link set "$1" up promisc on - } - else - { - $SUDO "$BRCTL" addbr "$BRIDGE" - if [ "$HOSTROUTE" -eq "0" ]; then - $SUDO "$IP" addr add "$BRIDGEIP"/24 broadcast "$BRIDGEBROADCAST" dev "$BRIDGE" - fi - $SUDO "$BRCTL" stp "$BRIDGE" off - # $SUDO $IP tuntap add dev $1 mode tap user $USER - $SUDO "$IP" link set "$1" up promisc on - $SUDO "$BRCTL" addif "$BRIDGE" "$1" - $SUDO "$IP" link set "$BRIDGE" up - if [ "$USEDNSMASQ" -eq "0" ]; then - $SUDO service dnsmasq restart - fi - if [ "$USEDHCP" -eq "0" ]; then - $SUDO service isc-dhcp-server stop - $SUDO service isc-dhcp-server start - # workaround arno and fail2ban not working well together. - # $SUDO service fail2ban stop - # $SUDO service fail2ban start - fi - if [ "$USEDNS" -eq "0" ]; then - $SUDO service bind9 restart - fi - } - fi - - if [ "$HOSTROUTE" -eq "0" ]; then - # Allow VMs to use ip masquerading on the host to contact the internet, as well as to have port forwards. - $SUDO service ufw restart - fi - - echo "Bridge ifup completed." -} >tapbridge.ifup 2>&1 diff --git a/kvmhelpers/README.md b/kvmhelpers/README.md deleted file mode 100644 index 5185615b7..000000000 --- a/kvmhelpers/README.md +++ /dev/null @@ -1,340 +0,0 @@ -# Setting up a KVM network for WIRE: - -## Scope of this document: - -This document and the files contained in this directory contain instructions and code for setting up KVM virtual hosts and virtual networking, for the testing of WIRE and its dependencies. - -## Assumptions: - -We're going to assume basic command line skills, and that you have installed some version of ubuntu, debian, or a debian derivative on a machine you plan on using as a hypervisor. - -## Installing KVM Virtual Machines - -### Preparation - -#### Verifying KVM extensions, and enabling them. - -First, make sure KVM is available and ready to use. - -* To see if your CPUs support it, see: https://vitux.com/how-to-check-if-your-processor-supports-virtualization-technology/ - * We recommend method '2'. - * If method 2 does not tell you "KVM acceleration can be used", try method 3. If method 3 works, but method 2 does not, you need to enable virtualization in your BIOS. - * For loose directions on enabling virtualization in your BIOS, follow https://www.bleepingcomputer.com/tutorials/how-to-enable-cpu-virtualization-in-your-computer-bios/ . - -#### Install QEMU: - -QEMU is the application that lets us take advantage of KVM extensions. - -* To install QEMU: -``` -sudo apt install qemu-kvm -``` - -##### Configuring a non-priviledged user - -QEMU can be run as a user (suggested for security, but more complicated) or as the 'root' user. - -* If you want to run QEMU as a user, add your user to the 'kvm' system group, and ensure your user is in the sudo group. - -``` -# usermod -a -G sudo -$ sudo usermod -a -G kvm -``` - -Make sure you log out, and back in again afterwards, to make these group changes take effect.. - -#### Network Plans: - -When setting up a fake network of VMs for wire, there are several ways you can hook up the VMs to each other, network wise. - -for the purposes of this document, we are going to use: -host <-> proxybox - | - admin - | - kubenode1 - | - kubenode2 - | - kubenode3 - | - ansnode1 - | - ansnode2 - | - ansnode3 - -This is to say, we are going to create a proxy machine which will be the only thing with internet access. In addition to this machine, we will have one node for administration tasks(during the install, and for maintainence activities), three for kubernetes, and three for non-kubernetes services, managed by ansible. - -We are going to refer to this as 'network plan 1'. - -### Preparing to install ubuntu on KVM - -* Make a directory for containing each of your virtual machines, inside of a directory. For example, to create the directories for network plan 1: -``` -mkdir kvm -mkdir kvm/proxybox -mkdir kvm/admin -mkdir kvm/kubenode1 -mkdir kvm/kubenode2 -mkdir kvm/kubenode3 -mkdir kvm/ansnode1 -mkdir kvm/ansnode2 -mkdir kvm/ansnode3 -``` - -* Change into the kvm directory, and download an ubuntu iso: -``` -cd kvm/ -wget http://releases.ubuntu.com/18.04/ubuntu-18.04.3-live-server-amd64.iso -``` - -* Create a virtual hard disk image, to serve as the disk of each of our virtual machines. we're going to make each disk the same, 20 Gigabytes: -``` -sudo apt install qemu-utils -cd kvm -qemu-img create proxybox/drive-c.img 20G -qemu-img create admin/drive-c.img 20G -qemu-img create kubenode1/drive-c.img 20G -qemu-img create kubenode2/drive-c.img 20G -qemu-img create kubenode3/drive-c.img 20G -qemu-img create ansnode1/drive-c.img 20G -qemu-img create ansnode2/drive-c.img 20G -qemu-img create ansnode3/drive-c.img 20G -``` - -### Copying helper scripts: - -The repository this file is in (https://github.com/wireapp/wire-server-deploy-networkless.git) contains this README.md. Along side it are helper scripts, for managing QEMU and it's network interfaces. - -The helper scripts consist of all of the files in the directory containing this readme that end in '.sh'. Copy them into the directories you are using to contain your virtual machines. For instance, with this repo checked out under our home directory in ~/wire-app/wire-server-deploy-networkless: - -``` -cd kvm -cp ~/wire-app/wire-server-deploy-networkless/kvmhelpers/*.sh proxybox -cp ~/wire-app/wire-server-deploy-networkless/kvmhelpers/*.sh admin -cp ~/wire-app/wire-server-deploy-networkless/kvmhelpers/*.sh kubenode1 -cp ~/wire-app/wire-server-deploy-networkless/kvmhelpers/*.sh kubenode2 -cp ~/wire-app/wire-server-deploy-networkless/kvmhelpers/*.sh kubenode3 -cp ~/wire-app/wire-server-deploy-networkless/kvmhelpers/*.sh ansnode1 -cp ~/wire-app/wire-server-deploy-networkless/kvmhelpers/*.sh ansnode2 -cp ~/wire-app/wire-server-deploy-networkless/kvmhelpers/*.sh ansnode3 -``` - -#### Choosing a user interface: -If the system you are using has a graphical interface, and you elected to set up QEMU to be used by a non-priviledged user, these helper scripts will use the graphical interface by default. If one of these conditions is not true, Then this scripts will use the ncurses (text) interface. Should it chose wrong, there are settings in each start_kvm.sh script that you can change. - -#### Choosing networking, Ram, CPUs, and boot media: - -If you edit the 'start_kvm.sh' script in any of the directories that we're using to contain a VM, there are self-explaining configuration options at the top of the file. So let me explain them. :) - -* The first user-editable option is MEM, or how much ram you want to give your VM, in megabytes. At present, our testing requires 6144MB for ansnode[1-3], 3072MB for kubenode[1-3], and 2048MB for the admin node and proxybox. -* The second option is CPUS, which sets how many CPUs you can see from inside of the VM. Note that this is not a hard reservation, so you can have up to two CPUs for each of your VMs, even if you only have two physical CPUs. -* The third and forth options are what files to use as the virtual cd-rom and virtual hard disks. - -The final two options we're going to examine configure the networking. For each network card in our VM, there is a coresponding "eth=" line. There are currently two strategies available: - * HOSTBRIDGE -- This network interface is for the VM to talk over ethernet to the machine the VM is running on. - * GUESTBRIDGE -- This network interface is connected to a virtual switch, which has any other VM that uses this strategy also plugged into it. - -Following our example network plan, we're going to leave proxybox with one interface configured for HOSTBRIDGE so it has internet access, and one interface configured for GUESTBRIDGE, so the machines we are installing wire on can communicate with it. we are going to comment out the HOSTBRIDGE interface on all other VMs, so that they only speak to the proxybox, via the GUESTBRIDGE. - -#### Configuring the physical host to provide networking: - -* Install bridge-utils, for GUESTBRIDGE and HOSTBRIDGE to work. -``` -sudo apt install bridge-utils -``` - -##### LocalHost -> KVM -== Skip this entire step if we are not providing internet and IP connectivity to any VM, AKA if you are not using HOSTBRIDGE == - -For HOSTBRIDGE, we are going to install and configure an ip-masquerading firewall, a DHCP server, and a DNS server, so that VMs using the HOSTBRIDGE strategy can access the internet, through services on the host machine. - -* Install dependencies. the UFW firewall, ISC's DHCP server, and the Bind nameserver: -``` -sudo apt install ufw isc-dhcp-server bind9 -``` - -* make sure we can connect on port 22 tcp so we can ssh into the hypervisor from the outside world, and from machines using HOSTBRIDGE. -``` -sudo ufw allow 22/tcp -``` - -###### Sharing Bridge Devices: -Each networking strategy requires ownership of a bridge device, in order to do it's work. by default, these scripts are set up with 'br0' owned by the HOSTBRIDGE strategy, and 'br1' owned by the GUESTBRIDGE strategy. This is fine if you're the only one on the box, and you only want to follow these directions one time. If someone else is using br0 or br1, or has followed these instructions, you're going to need to change the bridge devices assigned to the strategies for the network plan you're installing. - -Assuming someone else, or you, have followed these instructions already, and you don't want to interfere with the 'other' set of strategies, go to each HOSTBRIDGE-vars.sh, and change the 'BRIDGE=br0' line to read 'BRIDGE=br2'. Likewise, go to each GUESTBRIDGE-vars.sh file, and change 'BRIDGE=br1' to 'BRIDGE=br3'. This will ensure you don't interfere with the br0 and br1 owned by the strategies of the already installed scripts. - -Or, if you trust my quick and dirty sed/bash scripts: -``` -for each in $(find ./ -name HOSTBRIDGE-vars.sh) ; do { sed -i "s/BRIDGE=br0/BRIDGE=br2/" $each ; } done; -for each in $(find ./ -name GUESTBRIDGE-vars.sh) ; do { sed -i "s/BRIDGE=br1/BRIDGE=br3/" $each ; } done; -``` - -###### Sharing internet on the HOSTBRIDGE via IP Masquerading - -We're using the [UFW](https://wiki.debian.org/Uncomplicated%20Firewall%20%28ufw%29) product to provide internet to any machine using the HOSTBRIDGE strategy. similar to the 'Sharing Bridge Devices' step, we're going to have to be a bit aware of our neighbors when picking an IP block to use between the physical machine, and virtual machines on HOSTBRIDGE. We're also going to have to pick something that does not conflict with any of the other interfaces on this machine, lest we accidentally mess up routing for the interface we use to get internet, and access TO the machine. - -Usually, I pick '172.16.0/24' as a safe default. docker picks '172.17.0/24', so i suggest avoiding that. for an idea what your options are, look at the interface of the machine you're getting internet acces via, and see if it's on a [Private Network](https://en.wikipedia.org/wiki/Private_network). Select a '/24' network (that is, a range of 255 IPs in a block, in one of the Private IPv4 address ranges) that none of your coworkers are using on this box, and use it for the following steps. - -####### Configuring HOSTBRIDGE-vars.sh - -Once you have selected the IP subnet you're going to use on your HOSTBRIDGE, you need to change some settings in HOSTBRIDGE-vars.sh. specifically: -``` -# The IP of the host system, on the host<->VM network. where we should provide services (dhcp, dns, ...) that the VMs can see. -BRIDGEIP=172.16.0.1 -# The broadcast address for the above network. -BRIDGEBROADCAST=172.16.0.255 -``` - -As with the last step, to change these, you can either edit each HOSTBRIDGE-vars.sh file, or use some quick and dirty sed/bash scripts: -``` -for each in $(find ./ -name HOSTBRIDGE-vars.sh) ; do { sed -i "s/BRIDGEIP=172.16.0.1/BRIDGEIP=172.18.0.1/" $each ; } done; -for each in $(find ./ -name HOSTBRIDGE-vars.sh) ; do { sed -i "s/BRIDGEBROADCAST=172.16.0.255/BRIDGEBROADCAST=172.18.0.255/" $each ; } done; -``` - -####### Configuring IP Masquerading - -* Make sure "DEFAULT_FORWARD_POLICY=DROP" has been changed to 'DEFAULT_FORWARD_POLICY="ACCEPT"' in /etc/default/ufw - -* Make sure /etc/ufw/sysctl.conf has been edited to Disable ipv6, and allow ipv4 forwarding. you should only have to uncomment the first line: -``` -net.ipv4.ip_forward=1 -#net/ipv6/conf/default/forwarding=1 -#net/ipv6/conf/all/forwarding=1 -``` - -* Add a 'POSTROUTING' rule in the 'NAT' table, to direct traffic bidirectionally between your HOSTBRIDGE subnet and the internet. This entry is added in /etc/ufw/before.rules. If this has not been done on this machine before, you may have to add this entire section right after the first comment block in /etc/ufw/before.rules. If this section already exists, add just the line starting with '-A POSTROUTING' into the already existing block. Make sure to change the 'enp0s25' interface name to match the interface your machine uses to get to the internet (look at 'ip route show default'), and to use your selected HOSTBRIDGE subnet range: -``` - -# NAT table rules -*nat -:POSTROUTING ACCEPT [0:0] - -# Masqeurade traffic from our HOSTBRIDGE network of 172.16.0/24 to enp0s25. enp0s25 is probably not the name of your network card. check, and adjust. --A POSTROUTING -s 172.16.0/24 -o enp0s25 -j MASQUERADE - -# don't delete the 'COMMIT' line or these nat table rules won't -# be processed -COMMIT -``` - -* Restart the firewall to enable these changes: -``` -sudo ufw disable && sudo ufw enable -``` - -####### DHCP services: - -In order for VMs plugged into the HOSTBRIDGE to get an address, they will use DHCP. We're going to configure ISC's DHCPD to provide those addresses. - -* edit /etc/dhcp/dhcpd.conf - * comment out the line at the top reading: 'option domain-name "example.org";' - * comment out the line near the top reading: 'option domain-name-servers ns1.example.org, ns2.example.org;' - * add the following to the end of the file, to provide addresses to your selected HOSTBRIDGE subnet range. Make sure to change the addresses to match your selected HOSTBRIDGE subnet: -``` -# provide DHCP to our hosted kvm network. -subnet 172.16.0.0 netmask 255.255.255.0 { - range 172.16.0.10 172.16.0.20; - option routers 172.16.0.1; - option domain-name-servers 172.16.0.1; -} -``` - -* Edit /etc/default/isc-dhcp-server, and Add your selected LOCALNET bridge device to the list of ipv4 interfaces dhcpd can listen to. If there is already an entry, note that spaces are used as delimeters in this list: -``` -INTERFACESv4="br0" -``` - -* Restart isc-dhcpd-server to make changes effective: -``` -sudo service isc-dhcp-server restart -``` - -####### Name Services: -DNS services will be handled by BIND, which is configured properly by default. The only thing we need to do is poke a hole in the firewall, so that the HOSTBRIDGEs can access it. - -* add port 53 udp to the list of ports to allow remote connections from. -``` -sudo ufw allow 53/udp -``` - -##### GUESTBRIDGE: - -As no services from the host are available on this network, nothing needs done for this. - - -### Launching VMs, and installing ubuntu. - -You can now run each of the VMs, and perform your OS install. To perform a regular startup, booting from the ISO you have selected, change directory into one of the directories containing your VMs, and run start_kvm.sh: -``` -cd kvm/proxybox/ -./start_kvm.sh -``` - -At this point, you can install ubuntu on each of your nodes like normal. - -#### Ubuntu 16.04 - -##### Getting a text mode installer: - -###### Ubuntu 16.04 (mini ISO) -Note that the AMD64 mini iso for ubuntu 16 is broken, and will not install. - -* If you want to perform your install in text mode: -``` -down arrow -tab -backspace 6 times -left arrow 22 times -backspace 7 times -type 'debian-installer/framebuffer=false' -enter -``` - -###### Ubuntu 16.04 (official ISO) -Downloaded from: http://releases.ubuntu.com/16.04.6/ubuntu-16.04.6-server-amd64.iso - -* If you want to perform your install in text mode: -``` -enter -f6 -escape -left arrow 5 times. -backspace 5 times. -left arrow 27 times. -backspace 7 times. -type 'debian-installer/framebuffer=false' -enter -``` -##### Performing the install - -Proceed with installation as normal. When you get to the 'Finish the installation' stage where it prompts you to remove the CD and reboot: -* Hit 'Go Back' to avoid rebooting. go out to the 'Ubuntu installer main menu' -* Select 'Execute a shell', and drop to a shell. -* At the shell prompt: -``` -cd /target -chroot ./ -apt install -y openssh-server -vi etc/default/grub -``` -* Using vi, comment out the 'GRUB_CMDLINE_LINUX_DEFAULT' line, set 'GRUB_CMDLINE_LINUX' to just 'text', and uncomment the 'GRUB_TERMINAL=console' line. -* Write and Quit vi, exit the chroot, and exit the shell. You should be back in the installation menu. -* Re-run the 'Install the grub bootloader on a hard disk' step. -* Reboot. - -You will have to shut down the VM to change it to booting from the hard drive, instead of the CD. to do that, you can log into another terminal, and kill -15 the qemu process. - - -#### Ubuntu 18.04 (official ISO) -Downloaded from: http://releases.ubuntu.com/18.04/ubuntu-18.04.3-live-server-amd64.iso - -You should see '640 x 480 Graphic mode' when you start the install, but it will quickly give way to a text based installation system, by default. - -### Booting your VM: - -To boot into the OS: -``` -DRIVE=c ./start_kvm.sh -``` diff --git a/kvmhelpers/ifdown-tap-bridge-physif.sh b/kvmhelpers/ifdown-tap-bridge-physif.sh deleted file mode 100755 index 261905582..000000000 --- a/kvmhelpers/ifdown-tap-bridge-physif.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -# shellcheck disable=SC1091 - -IP="/sbin/ip" -IFCONFIG="/sbin/ifconfig" -SUDO="/usr/bin/sudo" -DHCLIENT="/sbin/dhclient" - -. ./tap-bridge-physif-vars.sh - -$SUDO $IP link set "$1" down promisc off - -# remove ourself from the bridge. -$SUDO "$BRCTL" delif "$BRIDGE" "$1" - -# this script is not responsible for destroying the tap device. -#ip tuntap del dev $1 - -BRIDGEDEV=$($SUDO "$BRCTL" show "$BRIDGE" | grep tap) - -if [ -z "$BRIDGEDEV" ]; then - { - if [ "$SHAREDIF" -eq "0" ]; then - # restore internet on the physical interface. - $DHCLIENT -r "$BRIDGE" - else - # remove the physical device from the bridge. - $SUDO "$BRCTL" delif "$BRIDGE" "$PHYSIF" - # shut down the physical device. - $SUDO $IFCONFIG "$PHYSIF" down - fi - # we are the last one out. burn the bridge. - $SUDO $IFCONFIG "$BRIDGE" down - $SUDO "$BRCTL" delif "$BRIDGE" "$1" - $SUDO "$BRCTL" delbr "$BRIDGE" - if [ "$SHAREDIF" -eq "0" ]; then - # restore internet on the physical interface. - $DHCLIENT -i "$PHYSIF" - fi - } -fi diff --git a/kvmhelpers/ifup-tap-bridge-physif.sh b/kvmhelpers/ifup-tap-bridge-physif.sh deleted file mode 100755 index cfbe74660..000000000 --- a/kvmhelpers/ifup-tap-bridge-physif.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/sh -# shellcheck disable=SC1091 - -IP="/sbin/ip" -export IFCONFIG="/sbin/ifconfig" -SUDO="/usr/bin/sudo" -DHCLIENT="/sbin/dhclient" - -{ - - . ./tap-bridge-physif-vars.sh - - BRIDGEDEV=$($BRCTL show | grep -E ^"$BRIDGE") - - if [ -n "$BRIDGEDEV" ]; then - { - $SUDO "$BRCTL" addif "$BRIDGE" "$1" - $SUDO $IP link set "$1" up promisc on - } - else - { - $SUDO "$BRCTL" addbr "$BRIDGE" - if [ "$HOSTROUTE" -eq "0" ]; then - $SUDO $IP addr add "$BRIDGEIP"/24 broadcast "$BRIDGEBROADCAST" dev "$BRIDGE" - fi - $SUDO "$BRCTL" stp "$BRIDGE" off - $SUDO $IP link set "$1" up promisc on - $SUDO "$BRCTL" addif "$BRIDGE" "$1" - $SUDO "$BRCTL" addif "$BRIDGE" "$PHYSIF" - if [ "$SHAREDIF" -eq "0" ]; then - # FIXME: assumes DHCP, assumes all kind of things. - $SUDO $DHCLIENT -r "$PHYSIF" - $SUDO $DHCLIENT -i "$BRIDGE" - fi - $SUDO $IP link set "$PHYSIF" up - $SUDO $IP link set "$BRIDGE" up - if [ "$USEDHCP" -eq "0" ]; then - $SUDO service isc-dhcp-server stop - $SUDO service isc-dhcp-server start - # workaround arno and fail2ban not working well together. - # $SUDO service fail2ban stop - # $SUDO service fail2ban start - fi - if [ "$USEDNS" -eq "0" ]; then - $SUDO service bind9 restart - fi - } - fi - - if [ "$HOSTROUTE" -eq "0" ]; then - # Allow VMs to use ip masquerading on the host to contact the internet, as well as to have port forwards. - $SUDO service ufw restart - fi - - echo "Bridge ifup completed." -} >tapbridgephysif.ifup 2>&1 diff --git a/kvmhelpers/start_kvm.sh b/kvmhelpers/start_kvm.sh deleted file mode 100755 index b0a7a1742..000000000 --- a/kvmhelpers/start_kvm.sh +++ /dev/null @@ -1,155 +0,0 @@ -#!/bin/bash -# shellcheck disable=SC2181,SC2001,SC1090,SC2046,SC2154 - -# How much memory to allocate to this VM. -MEM=2048 - -# How many CPUs to allocate to this VM. note: you can allocate a total of more than you have, this is fine. -CPUS=2 - -# The CDROM image. Used for installing. -CDROM=../ubuntu-18.04.3-live-server-amd64.iso - -# The disk image. -DISK=drive-c.img - -# How to wire up the network cards. To add more ports, just add more eth entries. -# HOSTBRIDGE talks to the physical machine. -# GUESTBRIDGE talks only to other VMs. -# PRIVATEPORT talks to other VMs and a physical port on the host. -# SHAREDPORT talks to other VMs and a physical port on the host, which also uses that port for internet access. -export eth0=HOSTBRIDGE -export eth1=GUESTBRIDGE - -# Where the global configuration is at. stores global settings, like whether to use graphics or text. -#config_file="start_kvm-vars.sh" - -# Uncomment if you want to always use the ncurses frontend, EG, you are trying to run this without a GUI. -# Note that this script will detect the presence of GUI access, so enabling this will disable that. -#CURSES="-curses" - -# You should not have to modify anything below this line. -#=====================================LINE================================ - -# Load an optional configuration file. -if [ -n "${config_file+x}" ]; then - echo "loading config file: ${config_file}" - source "${config_file}" -fi - -# Create parameters from the CPUS setting above -if [ -n "${CPUS+x}" ]; then - echo "Restricting to $CPUS processors." - PROCESSORS="-smp cores=$CPUS" -fi - -# select an interface. -if [ -z "${DISPLAY+x}" ]; then - CURSES="-curses" -else - if [ -n "${CURSES+x}" ]; then - echo "Disabling graphical display." - unset "$DISPLAY" - fi -fi - -# paths to binaries we use. -IP="/sbin/ip" -SUDO="/usr/bin/sudo" -WHOAMI="/usr/bin/whoami" -GREP="/bin/grep" -WC="/usr/bin/wc" -SEQ="/usr/bin/seq" -SORT="/usr/bin/sort" -TAIL="/usr/bin/tail" -SED="/bin/sed" - -# The user who is running this script. -USER=$(${WHOAMI}) - -# Claim a tap device, and use it. -function claim_tap() { - - TAPDEVS=$($IP tuntap | $GREP -E ^tap | $SED "s/:.*//") - TAPDEVCOUNT=$(echo -n "$TAPDEVS" | $WC -l) - # First, try to fill in any gaps. - LASTTAP=$(echo -n "$TAPDEVS" | $SED "s/t..//" | $SORT -g | $TAIL -n 1) - if [ -n "$LASTTAP" ]; then - for each in $($SEQ 0 "$LASTTAP"); do - if [ $((TAPSTRIED + TAPDEVCOUNT)) == "$LASTTAP" ]; then - break - fi - if [ -z "$($IP tuntap | $GREP -E ^tap"$each")" ]; then - $SUDO $IP tuntap add dev tap"$each" mode tap user "$USER" - if [ $? -eq 0 ]; then - echo tap"$each" - return 0 - fi - TAPSTRIED=$((TAPSTRIED + 1)) - fi - done - else - LASTTAP=-1 - fi - # Then, try to claim one on the end. up to 99 - for each in $($SEQ $((LASTTAP + 1)) 99); do - $SUDO $IP tuntap add dev tap"$each" mode tap user "$USER" - if [ $? -eq 0 ]; then - echo tap"$each" - return 0 - fi - done -} - -# set up networking. -for each in ${!eth*}; do - TAPDEV=$(claim_tap) - ASSIGNED_TAPS="$ASSIGNED_TAPS $TAPDEV" - MACADDR="52:54:00:12:34:$(printf '%02g' $(echo "$TAPDEV" | sed 's/tap//'))" - echo Setting up tap "$TAPDEV" for device "$each" with mac address "$MACADDR" - if [ "${!each}" == "HOSTBRIDGE" ]; then - NETWORK="$NETWORK -netdev tap,id=$each,ifname=$TAPDEV,script=HOSTBRIDGE.sh,downscript=HOSTBRIDGE-down.sh -device rtl8139,netdev=$each,mac=$MACADDR" - else - if [ "${!each}" == "GUESTBRIDGE" ]; then - NETWORK="$NETWORK -netdev tap,id=$each,ifname=$TAPDEV,script=GUESTBRIDGE.sh,downscript=GUESTBRIDGE-down.sh -device rtl8139,netdev=$each,mac=$MACADDR" - fi - fi -done - -# boot from the CDROM if the user did not specify to boot from the disk on the command line (DRIVE=c ./start_kvm.sh). -if [ -z "$DRIVE" ]; then - echo "Booting from CD. run with \"DRIVE=c $0\" in order to boot from the hard disk." - DRIVE=d -else - echo "Booting from hard disk." -fi - -if [ -z "$NOREBOOT" ]; then - echo "Booting normally. A reboot will reboot, and keep the VM running." -else - echo "Booting in single shot mode. a reboot will return you to your shell prompt, powering off the VM." - NOREBOOT=-no-reboot -fi - -sleep 5 - -# Actually launch qemu-kvm. - -# Create the qemu-kvm command -COMMAND="/usr/bin/kvm -m $MEM -boot $DRIVE -drive file=$DISK,index=0,media=disk,format=raw -drive file=$CDROM,index=1,media=cdrom -rtc base=utc $NETWORK $PROCESSORS $CURSES $NOREBOOT" - -# Display the qemu-kvm command -echo "executing:" -echo "$COMMAND" - -# Execute the qemu-kvm command -$COMMAND - -# VM has shut down, remove all of the taps. -for each in $ASSIGNED_TAPS; do - { - $SUDO ip tuntap del dev "$each" mode tap - } -done - -#### you should not have to modify these. tell the author if you have to. #### diff --git a/kvmhelpers/tap-bridge-physif-vars.sh b/kvmhelpers/tap-bridge-physif-vars.sh deleted file mode 100755 index 7aca274f4..000000000 --- a/kvmhelpers/tap-bridge-physif-vars.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -export BRCTL=/sbin/brctl -export BRIDGE=br1 -export MGMT_DIR=/var/local/createvm - -# The physical interface we bind to. -export PHYSIF=enxf4f951f090bb - -# 0 for true. -export USEDHCP=1 -export USEDNS=1 -export HOSTROUTE=1 - -# 0 if the interface is shared with the OS, 1 if the OS does not manage the physical interface. -export SHAREDIF=1 - -# only matters if HOSTROUTE is 0. -export BRIDGEIP=172.16.0.1 diff --git a/offline/docs.md b/offline/docs.md deleted file mode 100644 index 518a034da..000000000 --- a/offline/docs.md +++ /dev/null @@ -1,603 +0,0 @@ -# How to install wire - -We have a pipeline in `wire-server-deploy` producing container images, static -binaries, ansible playbooks, debian package sources and everything required to -install Wire. - -## Installing docker -On your machine (we call this the "admin host"), you need to have `docker` -installed (or any other compatible container runtime really, even though -instructions may need to be modified). See [how to install -docker](https://docker.com) for instructions. - -On ubuntu 18.04, connected to the internet: - -``` -apt install docker.io -``` - -Ensure the user you are using for the install has permission to run docker, or add 'sudo' to the docker commands below. - - -## Downloading and extracting the artifact -Create a fresh workspace to download the artifacts: - -``` -$ cd ... # you pick a good location! -``` - -Obtain the latest airgrap artifact for wire-server-deploy. Please contact us to get it for now. We are -working on publishing a list of airgap artifacts. - -Extract the above listed artifacts into your workspace: - -``` -$ wget https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-.tgz -$ tar xvzf wire-server-deploy-static-.tgz -``` -Where the HASH above is the hash of your deployment artifact, given to you by Wire, or acquired by looking at the above build job. -Extract this tarball. - -Make sure that the admin host can `ssh` into all the machines that you want to provision. Our docker container will use the `.ssh` folder and the `ssh-agent` of the user running the scripts. - -There's also a docker image containing the tooling inside this repo. - -## Making tooling available in your environment. - -If you don't intend to develop *on wire-server-deploy itself*, you should source the following shell script. -``` -source ./bin/offline-env.sh -``` - -The shell script will set up a `d` alias. Which runs commands passed to it inside the docker container -with all the tools needed for doing an offline deploy. - -E.g.: - -``` -$ d ansible --version -ansible 2.9.27 - config file = /wire-server-deploy/ansible/ansible.cfg - configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] - ansible python module location = /nix/store/vmz21km0crjx8j21bhd77vzwkpiiq9w0-python3.9-ansible-2.9.27/lib/python3.9/site-packages/ansible - executable location = /nix/store/vmz21km0crjx8j21bhd77vzwkpiiq9w0-python3.9-ansible-2.9.27/bin/ansible - python version = 3.9.10 (main, Jan 13 2022, 23:32:03) [GCC 10.3.0] - - -``` - -## Artifacts provided in the deployment tarball. - -The following artifacts are provided: - - - `containers-adminhost/wire-server-deploy-*.tar` - A container image containing ansible, helm, and other tools and their - dependencies in versions verified to be compatible with the current wire - stack. Published to `quay.io/wire/wire-server-deploy` as well, but shipped - in the artifacts tarball for convenience. - - `ansible` - These contain all the ansible playbooks the rest of the guide refers to, as - well as an example inventory, which should be configured according to the - environment this is installed into. - - `binaries.tar` - This contains static binaries, both used during the kubespray-based - kubernetes bootstrapping, as well as to provide some binaries that are - installed during other ansible playbook runs. - - `charts` - The charts themselves, as tarballs. We don't use an external helm - repository, every helm chart dependency is resolved already. - - `containers-system.tar` - These are the container images needed to bootstrap kubernetes itself - (currently using kubespray) - - `containers-helm.tar` - These are the container images our charts (and charts we depend on) refer to. - Also come as tarballs, and are seeded like the system containers. - - `containers-other.tar` - These are other container images, not deployed inside k8s. Currently, only - contains Restund. - - `debs-bionic.tar` - This acts as a self-contained dump of all packages required to install - kubespray, as well as all other packages that are installed by ansible - playbooks on nodes that don't run kubernetes. - There's an ansible playbook copying these assets to an "assethost", starting - a little webserver there serving it, and configuring all nodes to use it as - a package repo. - - `values` - Contains helm chart values and secrets. Needs to be tweaked to the - environment. - -## Editing the inventory - -Copy `ansible/inventory/offline/99-static` to `ansible/inventory/offline/hosts.ini`, and remove the original. - -``` -cp ansible/inventory/offline/99-static ansible/inventory/offline/hosts.ini -mv ansible/inventory/offline/99-static ansible/inventory/offline/orig.99-static -``` - -Edit `ansible/inventory/offline/hosts.ini`. -Here, you will describe the topology of your offline deploy. There's instructions in the comments on how to set -everything up. You can also refer to extra information here. https://docs.wire.com/how-to/install/ansible-VMs.html - -Add one entry in the `all` section of this file for each machine you are managing via ansible. This will be all of the machines in your Wire cluster. - -If you are using username/password to log into and sudo up, in the `all:vars` section, add: -``` -ansible_user= -ansible_password= -ansible_become_pass= -``` - -### Configuring kubernetes and etcd - -You'll need at least 3 `kubenode`s. 3 of them should be added to the -`[kube-master]`, `[etcd]` and `[kube-node]` groups of the inventory file. Any -additional nodes should only be added to the `[kube-node]` group. - -### Setting up databases and kubernetes to talk over the correct (private) interface -If you are deploying wire on servers that are expected to use one interface to talk to the public, and a separate interface to talk amongst themselves, you will need to add "ip=" declarations for the private interface of each node. for instance, if the first kubenode was expected to talk to the world on 172.16.0.129, but speak to other wire services (kubernetes, databases, etc) on 192.168.0.2, you should edit its entry like the following: -``` -kubenode1 ansible_host=172.16.0.129 ip=192.168.0.2 -``` -Do this for all of the instances. - -### Setting up Database network interfaces. -* Make sure that `assethost` is present in the inventory file with the correct `ansible_host` (and `ip` values if required) -* Make sure that `cassandra_network_interface` is set to the interface on which - the kubenodes can reach cassandra and on which the cassandra nodes - communicate among eachother. Your private network. -* Similarly `elasticsearch_network_interface` and `minio_network_interface` - should be set to the private network interface as well. - -### Configuring Restund - -Restund is deployed for NAT-hole punching and relaying. So that 1-to-1 calls -can be established between Wire users. Restund needs to be directly publicly -reachable on a public IP. - -If you need Restund to listen on a different interface than the default gateway, set `restund_network_interface` - -If the interface on which Restund is listening does not know its own public IP -(e.g. because it is behind NAT itself) extra configuration is necessary. Please provide the public IP on which -Restund is available as `restund_peer_udp_advertise_addr`. - -Due to this *NAT-hole punching* relay purpose and depending on where the Restund instance resides within your network -topology, it could be used to access private services. We consider this to be unintended and thus set a couple -of network rules on a Restund instance. If egress traffic to certain private network ranges should still -be allowed, you may adjust `restund_allowed_private_network_cidrs` according to your setup. - -### Marking kubenode for calling server (SFT) - -The SFT Calling server should be running on a kubernetes nodes that are connected to the public internet. -If not all kubernetes nodes match these criteria, you should specifically label the nodes that do match -these criteria, so that we're sure SFT is deployed correctly. - - -By using a `node_label` we can make sure SFT is only deployed on a certain node like `kubenode4` - -``` -kubenode4 node_labels="{'wire.com/role': 'sftd'}" node_annotations="{'wire.com/external-ip': 'a.b.c.d'}" -``` - -If the node does not know its onw public IP (e.g. becuase it's behind NAT) then you should also set -the `wire.com/external-ip` annotation to the public IP of the node. - -### Configuring MinIO - -In order to automatically generate deeplinks, Edit the minio variables in `[minio:vars]` (`prefix`, `domain` and `deeplink_title`) by replacing `example.com` with your own domain. - -## Generating secrets - -Minio and restund services have shared secrets with the `wire-server` helm chart. We have a utility -script that generates a fresh set of secrets for these components. - -Please run: -``` -./bin/offline-secrets.sh -``` - -This should generate two files. `./ansible/inventory/offline/group_vars/all/secrets.yaml` and `values/wire-server/secrets.yaml`. - -## Deploying Kubernetes, Restund and stateful services - -### WORKAROUND: old debian key -All of our debian archives up to version 4.12.0 used a now-outdated debian repository signature. Some modifications are required to be able to install everything properly. - -First, gather a copy of the 'setup-offline-sources.yml' file from: https://raw.githubusercontent.com/wireapp/wire-server-deploy/kvm_support/ansible/setup-offline-sources.yml . -``` -wget https://raw.githubusercontent.com/wireapp/wire-server-deploy/kvm_support/ansible/setup-offline-sources.yml -``` -copy it into the ansible/ directory: -``` -cp ansible/setup-offline-sources.yml ansible/setup-offline-sources.yml.backup -cp setup-offline-sources.yml ansible/ -``` - -Open it with your prefered text editor and edit the following: -* find a big block of comments and uncomment everything in it `- name: trust everything...` -* after the block you will find `- name: Register offline repo key...`. Comment out that segment (do not comment out the part with `- name: Register offline repo`!) - -Then disable checking for outdated signatures by editing the following file: -``` -ansible/roles-external/kubespray/roles/container-engine/docker/tasks/main.yml -``` -* comment out the block with -name: ensure docker-ce repository public key is installed... -* comment out the next block -name: ensure docker-ce repository is enabled - -Now you are ready to start deploying services. - -#### WORKAROUND: dependency -some ubuntu systems do not have GPG by default. wire assumes this is already present. ensure you have gpg installed on all of your nodes before continuing to the next step. - -### Deploying with Ansible - -In order to deploy all the ansible-managed services you can run: -``` -# d ./bin/offline-cluster.sh -``` -... However a conservitave approach is to perform each step of the script step by step, for better understanding, and better handling of errors.. - -#### Populate the assethost, and prepare to install images from it. - -Copy over binaries and debs, serves assets from the asset host, and configure -other hosts to fetch debs from it: - -``` -d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/setup-offline-sources.yml -``` -If this step fails partway, and you know that parts of it completed, the `--skip-tags debs,binaries,containers,containers-helm,containers-other` tags may come in handy. - -#### Kubernetes, part 1 -Run kubespray until docker is installed and runs. This allows us to preseed the docker containers that -are part of the offline bundle: - -``` -d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/kubernetes.yml --tags bastion,bootstrap-os,preinstall,container-engine -``` - -#### Restund -Now; run the restund playbook until docker is installed: -``` -d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/restund.yml --tags docker -``` - - -#### Pushing docker containers to kubenodes, and restund nodes. -With docker being installed on all nodes that need it, seed all container images: - -``` -d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/seed-offline-docker.yml -``` - -#### Kubernetes, part 2 -Run the rest of kubespray. This should bootstrap a kubernetes cluster successfully: - -``` -d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/kubernetes.yml --skip-tags bootstrap-os,preinstall,container-engine -``` - -#### Ensuring kubernetes is healthy. - -Ensure the cluster comes up healthy. The container also contains kubectl, so check the node status: - -``` -d kubectl get nodes -owide -``` -They should all report ready. - - -#### Non-kubernetes services (restund, cassandra, elasticsearch, minio) -Now, deploy all other services which don't run in kubernetes. - -``` -d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/restund.yml -d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/cassandra.yml -d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/elasticsearch.yml -d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/minio.yml -``` - -Afterwards, run the following playbook to create helm values that tell our helm charts -what the IP addresses of cassandra, elasticsearch and minio are. - -``` -d ansible-playbook -i ./ansible/inventory/offline/hosts.ini ansible/helm_external.yml -``` - -### Deploying Wire - -It's now time to deploy the helm charts on top of kubernetes, installing the Wire platform. - -#### Finding the stateful services -First. Make kubernetes aware of where alll the external stateful services are by running: - -``` -d helm install cassandra-external ./charts/cassandra-external --values ./values/cassandra-external/values.yaml -d helm install elasticsearch-external ./charts/elasticsearch-external --values ./values/elasticsearch-external/values.yaml -d helm install minio-external ./charts/minio-external --values ./values/minio-external/values.yaml -``` - -Also copy the values file for `databases-ephemeral` as it is required for the next step: - -``` -cp values/databases-ephemeral/prod-values.example.yaml values/databases-ephemeral/values.yaml -``` - -#### Deploying stateless dependencies -Next, we have 4 services that need to be deployed but need no additional configuration: -``` -d helm install fake-aws ./charts/fake-aws --values ./values/fake-aws/prod-values.example.yaml -d helm install demo-smtp ./charts/demo-smtp --values ./values/demo-smtp/prod-values.example.yaml -d helm install databases-ephemeral ./charts/databases-ephemeral/ --values ./values/databases-ephemeral/values.yaml -d helm install reaper ./charts/reaper -``` - -#### Preparing your values - -Next, move `./values/wire-server/prod-values.example.yaml` to `./values/wire-server/values.yaml`. -Inspect all the values and adjust domains to your domains where needed. - -Add the IPs of your `restund` servers to the `turnStatic.v2` list: -```yaml - turnStatic: - v1: [] - v2: - - "turn::80" - - "turn::80" - - "turn::80?transport=tcp" - - "turn::80?transport=tcp" -``` - -Open up `./values/wire-server/secrets.yaml` and inspect the values. In theory -this file should have only generated secrets, and no additional secrets have to -be added, unless additional options have been enabled. - -Open up `./values/wire-server/values.yaml` and replace example.com and other domains and subdomain with your domain. You can do it with: - -``` -sed -i 's/example.com//g' values.yaml -``` - - -#### Deploying Wire-Server - -Now deploy `wire-server`: - -``` -d helm install wire-server ./charts/wire-server --timeout=15m0s --values ./values/wire-server/values.yaml --values ./values/wire-server/secrets.yaml -``` - -## Directing Traffic to Wire - -### Deploy ingress-nginx-controller - -This component requires no configuration, and is a requirement for all of the methods we support for getting traffic into your cluster: -``` -d helm install ingress-nginx-controller ./charts/ingress-nginx-controller -``` - -### Forwarding traffic to your cluster - -#### Using network services - -Most enterprises have network service teams to forward traffic appropriately. Ask that your network team forward TCP port 443 to each one of the kubernetes servers on port 31773. ask the same for port 80, directing it to 31772. - -If they ask for clarification, a longer way of explaining it is "wire expects https traffic to be on port 31773, and http traffic to go to port 80. a load balancing rule needs to be in place, so that no matter which kubernetes host is up or down, the router will direct traffic to one of the operational kubernetes nodes. any node that accepts connections on port 31773 and 31772 can be considered as operational." - -#### Through an IP Masquerading Firewall - -Your ip masquerading firewall must forward port 443 and port 80 to one of the kubernetes nodes (which must always remain online). -Additionally, if you want to use letsEncrypt CA certificates, items behind your firewall must be redirected to your kubernetes node, when the cluster is attempting to contact the outside IP. - -The following instructions are given only as an example. -Properly configuring IP Masquerading requires a seasoned linux administrator with deep knowledge of networking. -They assume all traffic destined to your wire cluster is going through a single IP masquerading firewall, running some modern version of linux. - -##### Incoming SSL Traffic - -Here, you should check the ethernet interface name for your outbound IP. -``` -ip ro | sed -n "/default/s/.* dev \([enpso0-9]*\) .*/export OUTBOUNDINTERFACE=\1/p" -``` - -This will return a shell command setting a variable to your default interface. copy and paste it. next, supply your outside IP address: -``` -export PUBLICIPADDRESS= -``` - -Select one of your kubernetes nodes that you are fine with losing service if it is offline: -``` -export KUBENODE1IP= -``` - -then, if your box owns the public IP (you can see the IP in `ip addr`), run the following: -``` -sudo iptables -t nat -A PREROUTING -d $PUBLICIPADDRESS -i $OUTBOUNDINTERFACE -p tcp --dport 80 -j DNAT --to-destination $KUBENODE1IP:31772 -sudo iptables -t nat -A PREROUTING -d $PUBLICIPADDRESS -i $OUTBOUNDINTERFACE -p tcp --dport 443 -j DNAT --to-destination $KUBENODE1IP:31773 -``` - -If your box is being forwarded traffic from another firewall (you do not see the IP in `ip addr`), run the following: -``` -sudo iptables -t nat -A PREROUTING -i $OUTBOUNDINTERFACE -p tcp --dport 80 -j DNAT --to-destination $KUBENODE1IP:31772 -sudo iptables -t nat -A PREROUTING -i $OUTBOUNDINTERFACE -p tcp --dport 443 -j DNAT --to-destination $KUBENODE1IP:31773 -``` - -If you are running a UFW firewall, make sure to allow inbound traffic on 443 and 80: -``` -sudo ufw allow in on $OUTBOUNDINTERFACE proto tcp to any port 443 -sudo ufw allow in on $OUTBOUNDINTERFACE proto tcp to any port 80 -``` - -if you are running a UFW firewall, make sure to add the above iptables rules to /etc/ufw/before.rules, so they persist after a reboot. - -###### Mirroring the public IP - -cert-manager has a requirement on being able to reach the kubernetes on it's external IP. this is trouble, because in most security concious environments, the external IP is not owned by any of the kubernetes hosts. - -on an IP Masquerading router, you can redirect outgoing traffic from your cluster, that is to say, when the cluster asks to connect to your external IP, you can instead choose to send it to a kubernetes node inside of the cluster. -``` -export INTERNALINTERFACE=br0 -sudo iptables -t nat -A PREROUTING -i $INTERNALINTERFACE -d $PUBLICIPADDRESS -p tcp --dport 80 -j DNAT --to-destination $KUBENODE1IP:31772 -sudo iptables -t nat -A PREROUTING -i $INTERNALINTERFACE -d $PUBLICIPADDRESS -p tcp --dport 443 -j DNAT --to-destination $KUBENODE1IP:31773 -``` - -### Incoming Calling Traffic - -Here, you should check the ethernet interface name for your outbound IP. -``` -ip ro | sed -n "/default/s/.* dev \([enps0-9]*\) .*/export OUTBOUNDINTERFACE=\1/p" -``` - -This will return a shell command setting a variable to your default interface. copy and paste it. next, supply your outside IP address: -``` -export PUBLICIPADDRESS= -``` - -Select one of your kubernetes nodes that you are fine with losing service if it is offline: -``` -export RESTUND01IP= -``` - -then run the following: -``` -sudo iptables -t nat -A PREROUTING -d $PUBLICIPADDRESS -i $OUTBOUNDINTERFACE -p tcp --dport 80 -j DNAT --to-destination $RESTUND01IP:80 -sudo iptables -t nat -A PREROUTING -d $PUBLICIPADDRESS -i $OUTBOUNDINTERFACE -p udp --dport 80 -j DNAT --to-destination $RESTUND01IP:80 -sudo iptables -t nat -A PREROUTING -d $PUBLICIPADDRESS -i $OUTBOUNDINTERFACE -p udp -m udp --dport 32768:60999 -j DNAT --to-destination $RESTUND01IP -``` -or add an appropriate rule to a config file (for UFW, /etc/ufw/before.rules) - -### Changing the TURN port. - -FIXME: ansibleize this! -turn's connection port for incoming clients is set to 80 by default. to change it: -on the restund nodes, edit /etc/restund.conf, and replace ":80" with your desired port. for instance, 8080 like above. - - -### Acquiring / Deploying SSL Certificates: - -SSL certificates are required by the nginx-ingress-services helm chart. You can either register and provide your own, or use cert-manager to request certificates from LetsEncrypt. - -##### Prepare to deploy nginx-ingress-services - -Move the example values for `nginx-ingress-services`: -``` -mv ./values/nginx-ingress-services/{prod-values.example.yaml,values.yaml} -mv ./values/nginx-ingress-services/{prod-secrets.example.yaml,secrets.yaml} -``` - -#### Bring your own certificates - -if you generated your SSL certificates yourself, there are two ways to give these to wire: - -##### From the command line -if you have the certificate and it's corresponding key available on the filesystem, copy them into the root of the Wire-Server directory, and: - -``` -d helm install nginx-ingress-services ./charts/nginx-ingress-services --values ./values/nginx-ingress-services/values.yaml --set-file secrets.tlsWildcardCert=certificate.pem --set-file secrets.tlsWildcardKey=key.pem -``` - -Do not try to use paths to refer to the certificates, as the 'd' command messes with file paths outside of Wire-Server. - -##### In your nginx-ingress-services values file -Change the domains in `values.yaml` to your domain. And add your wildcard or SAN certificate that is valid for all these -domains to the `secrets.yaml` file. - -Now install the service with helm: - -``` -d helm install nginx-ingress-services ./charts/nginx-ingress-services --values ./values/nginx-ingress-services/values.yaml --values ./values/nginx-ingress-services/secrets.yaml -``` - -#### Use letsencrypt generated certificates - -first, download cert manager, and place it in the appropriate location: -``` -wget https://charts.jetstack.io/charts/cert-manager-v1.9.1.tgz -mkdir tmp -cd tmp -tar -xzf ../cert-manager-*.tgz -ls -cd .. - mv tmp/cert-manager/ charts/ -rm -rf tmp -``` - -edit values/nginx-ingress-services/values.yaml , to tell ingress-ingress-services to use cert-manager: - * set useCertManager: true - * set certmasterEmail: your.email.address - -set your domain name with sed: -``` -sed -i "s/example.com/YOURDOMAINHERE/" values/nginx-ingress-services/values.yaml -``` - -UNDER CONSTRUCTION: -``` -d kubectl create namespace cert-manager-ns -d helm upgrade --install -n cert-manager-ns --set 'installCRDs=true' cert-manager charts/cert-manager -d helm upgrade --install nginx-ingress-services charts/nginx-ingress-services -f values/nginx-ingress-services/values.yaml -``` - -Watch the output of the following command to know how your request is going: -``` -d kubectl get certificate -``` - -#### Old wire-server releases - -on older wire-server releases, nginx-ingress-services may fail to deploy. some version numbers of services have changed. make the following changes, and try to re-deploy till it works. - -certificate.yaml: -v1alpha2 -> v1 -remove keyAlgorithm keySize keyEncoding - -certificate-federator.yaml: -v1alpha2 -> v1 -remove keyAlgorithm keySize keyEncoding - -issuer: -v1alpha2 -> v1 - -## Installing sftd - -For full docs with details and explanations please see https://github.com/wireapp/wire-server-deploy/blob/d7a089c1563089d9842aa0e6be4a99f6340985f2/charts/sftd/README.md - -First, make sure you have a certificate for `sftd.`, or you are using letsencrypt certificate. -for bring-your-own-certificate, this could be the same wildcard or SAN certificate you used at previous steps. - -Next, copy `values/sftd/prod-values.example.yaml` to `values/sftd/values.yaml`, and change the contents accordingly. - - * If your turn servers can be reached on their public IP by the SFT service, Wire recommends you enable cooperation between turn and SFT. add a line reading `turnDiscoveryEnabled: true` to `values/sftd/values.yaml`. - -edit values/sftd/values.yaml, and select whether you want lets-encrypt certificates, and ensure the alloworigin and the host point to the appropriate domains. - -#### Deploying - -##### Node Annotations and External IPs. -If you want to restrict SFT to certain nodes, make sure that in your inventory file you have annotated all of the nodes that are able to run sftd workloads with a node label indicating they are to be used, and their external IP, if they are behind a 1:1 firewall (Wire recommends this.). -``` -kubenode3 node_labels="{'wire.com/role': 'sftd'}" node_annotations="{'wire.com/external-ip': 'XXXX'}" -``` - -If you failed to perform the above step during the ansible deployment of your sft services, you can perform then manually: -``` -d kubectl annotate node kubenode1 wire.com/external-ip=178.63.60.45 -d kubectl label node kubenode1 wire.com/role=sftd -``` - -##### A selected group of kubernetes nodes: -If you are restricting SFT to certain nodes, use `nodeSelector` to run on specific nodes (**replacing the example.com domains with yours**): -``` -d helm upgrade --install sftd ./charts/sftd \ - --set 'nodeSelector.wire\.com/role=sftd' \ - --values values/sftd/values.yaml -``` - -##### All kubernetes nodes. -If you are not doing that, omit the `nodeSelector` argument: -``` -d helm upgrade --install sftd ./charts/sftd \ - --set-file tls.crt=/path/to/tls.crt \ - --set-file tls.key=/path/to/tls.key \ - --values values/sftd/values.yaml -``` - diff --git a/offline/docs_ubuntu_22.04.md b/offline/docs_ubuntu_22.04.md index 3451b27a9..37407202e 100644 --- a/offline/docs_ubuntu_22.04.md +++ b/offline/docs_ubuntu_22.04.md @@ -54,7 +54,7 @@ If you see the curent docker version and no error, it means that Docker is now c ## Downloading and extracting the artifact -Note: If you have followed the Ubuntu installation instructions (`ubuntu22.04_installation.md` or `ubuntu_installation.md`) before following this page, you already have a wire-server-deploy folder with an artifact extracted into it, and you can simply use that. +Note: If you have followed the Ubuntu installation instructions (`single_hetzner_machine_installation.md`) before following this page, you already have a wire-server-deploy folder with an artifact extracted into it, and you can simply use that. Create a fresh workspace to download the artifacts: @@ -403,6 +403,15 @@ d ./bin/offline-cluster.sh In case any of the steps in this script fail, see the notes in the comments that accompany each step. Comment out steps that have already completed when re-running the scripts. +#### Ensuring kubernetes is healthy. + +Ensure the cluster comes up healthy. The container also contains kubectl, so check the node status: + +``` +d kubectl get nodes -owide +``` +They should all report ready. + ### WORKAROUND: old debian key All of our debian archives up to version 4.12.0 used a now-outdated debian repository signature. Some modifications are required to be able to install everything properly. @@ -469,17 +478,9 @@ Then find the right number and delete it ufw delete ; ``` -#### Ensuring kubernetes is healthy. - -Ensure the cluster comes up healthy. The container also contains kubectl, so check the node status: - -``` -d kubectl get nodes -owide -``` -They should all report ready. +and enable the ports for colocated services running on these nodes: -#### Enable the ports colocated services run on: ``` sudo bash -c ' set -eo pipefail; diff --git a/offline/kvm-hetzner.md b/offline/kvm-hetzner.md deleted file mode 100644 index 90dcc740d..000000000 --- a/offline/kvm-hetzner.md +++ /dev/null @@ -1,545 +0,0 @@ -# Scope - -This document gives exact instructions for performing an offline installation of Wire on a single VM from Hetzner. it uses the KVM virtual machine system to create all of the required virtual machines. - -This document also gives instructions for creating a TURN calling server on a separate VM. - -## Use the hetzner robot console to create a new server. - -Select Ubuntu 18.04 or Ubuntu 20.04 on an ax101 dedicated server. - -If not using Hetzner, for reference, the specs of the ax101 server are: - -* AMD Ryzen™ 9 5950X -* 128 GB DDR4 ECC RAM -* 2 x 3.84 TB NVMe SSD Datacenter Edition (software RAID 1) -* 1 GBit/s port - -In our example, the returned IP when creating the server was: 65.21.197.76 - -## Pre-requisites - -First off, generate a ssh key if you do not have one already. - -``` -ssh-keygen -t ed25519 -``` - -## tighten security. - -### log in as root. - -``` -ssh -i ~/.ssh/id_ed25519 root@65.21.197.76 -o serveraliveinterval=60 -``` - -### update OS -When prompted about the ssh config, just accept the maintainer's version. -``` -apt update -apt upgrade -y -``` - -### Install tmate - -Tmate is a terminal sharing service, which you might need in order for more than one person to collaborate on solving issues, Wire might ask you for a tmate session when debugging any problem you encounter. - -``` -sudo apt install tmate -``` - -If asked, to start a tmate session, you would simply then do: - -``` -tmate -``` - -And copy/paste the links that are generated, which would then result in the terminal session being shared with whomever you shared the links with. - -### Reboot -reboot to load a new, patched kernel. -``` -reboot -``` - -### Disable password login for sshd - -Install `nano` or your favorite text editor: - -``` -sudo apt install nano -y -``` - -Make sure the following values are configured in /etc/ssh/sshd_config: -``` -# this is the important value -PasswordAuthentication no - -# make sure PAM and Challenge Response is also disabled -ChallengeResponseAuthentication no -UsePAM no - -# don't allow root to login via password -PermitRootLogin prohibit-password -``` -### re-start SSH -``` -service ssh restart -``` - -### Install fail2ban -``` -apt install fail2ban -``` - -## Create demo user. - -### create our 'demo' user -``` -adduser --disabled-password --gecos "" demo -``` - -### copy ssh key to demo user - -``` -mkdir ~demo/.ssh -cp ~/.ssh/authorized_keys /home/demo/.ssh/ -chown demo.demo ~demo/.ssh/ -chown demo.demo ~demo/.ssh/authorized_keys -``` - -### add a configuration for demo not to need a password in order to SUDO. - -``` -echo "demo ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/10-demo_user -chmod 440 /etc/sudoers.d/10-demo_user -``` - -## ssh in as demo user. - -on the remote machine: -``` -logout -``` - -on the local machine: -``` -ssh -i ~/.ssh/id_ed25519 demo@65.21.197.76 -o serveraliveinterval=60 -``` - -## Disable root login via ssh - -Use sudo to edit `/etc/ssh/sshd_config` - -``` -sudo nano /etc/ssh/sshd_config -``` - -And set the following: -``` -# even better: don't allow to login as root via ssh at all -PermitRootLogin no -``` - -### re-start SSH -``` -sudo service ssh restart -``` - -### Install screen -``` -sudo apt install screen -``` - -### Start a screen session -``` -screen -``` - -### download offline artifact. -``` -wget https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-03fad4ff6d9a67eb56668fb259a0c1571cabcac4.tgz -``` - -### extract offline artifact. - -``` -mkdir Wire-Server -cd Wire-Server -tar -xzf ../wire-server-deploy-static-*.tgz -``` - -### extract debian archive -We'll use the docker that is in the archive. - -``` -tar -xf debs-jammy.tar -``` - -### (FIXME: add iptables to the repo) Install Docker from debian archive. -``` -sudo apt -y install iptables -sudo dpkg -i debs/public/pool/main/d/docker-ce/docker-ce-cli_*.deb -sudo dpkg -i debs/public/pool/main/c/containerd.io/containerd.io_*.deb -sudo dpkg -i debs/public/pool/main/d/docker-ce/docker-ce_*.deb -sudo dpkg --configure -a -``` - -### (missing) point host OS to debian archive - -### (rewrite) Install networking tools -We're going to install dnsmasq in order to provide DNS to virtual machines, and DHCP to virtual machines. networking will be handled by ufw. - -Note that dnsmasq always fails when it installs. the failures (red stuff) is normal. -``` -sudo systemctl disable systemd-resolved -sudo apt install dnsmasq ufw -y -sudo systemctl stop systemd-resolved -``` - -### Tell dnsmasq to provide DNS locally. -``` -sudo bash -c 'echo "listen-address=127.0.0.53" > /etc/dnsmasq.d/00-lo-systemd-resolvconf' -sudo bash -c 'echo "no-resolv" >> /etc/dnsmasq.d/00-lo-systemd-resolvconf' -sudo bash -c 'echo "server=8.8.8.8" >> /etc/dnsmasq.d/00-lo-systemd-resolvconf' -sudo service dnsmasq restart -``` - -### Configure Firewall -``` -sudo ufw allow 22/tcp -sudo ufw allow from 172.16.0.0/24 proto udp to any port 53 -sudo ufw allow from 127.0.0.0/24 proto udp to any port 53 -sudo ufw allow in on br0 from any proto udp to any port 67 -sudo ufw enable -``` - -### (temporary) copy helper scripts from wire-server-deploy -``` -sudo apt install git -y -git clone https://github.com/wireapp/wire-server-deploy.git -cp -a wire-server-deploy/kvmhelpers/ ./ -cp -a wire-server-deploy/bin/newvm.sh ./bin -cp -a wire-server-deploy/ansible/setup-offline-sources.yml ./ansible # see https://github.com/wireapp/wire-server-deploy/blob/kvm_support/offline/docs.md#workaround-old-debian-key -chmod 550 ./bin/newvm.sh -chmod 550 ./kvmhelpers/*.sh -``` - -### (rewrite) install qemu-kvm -KVM is the virtualization system we're using. -``` -sudo apt install qemu-kvm qemu-utils -y -``` - -#### Ubuntu 18 -If you are using ubuntu 18, you have to install the sgabios package: -``` -sudo apt install sgabios -y -``` - -### add the demo user to the kvm group -``` -sudo usermod -a -G kvm demo -``` - -### log out, log back in, and return to Wire-Server. - -you have to logout twice, once to get out of screen, once to get out of the machine. -``` -logout -logout -``` - -``` -ssh -i ~/.ssh/id_ed25519 demo@65.21.197.76 -o serveraliveinterval=60 -cd Wire-Server/ -screen -``` - -### install bridge-utils -So that we can manage the virtual network. -``` -sudo apt install bridge-utils net-tools -y -``` - -### (personal) install emacs -``` -sudo apt install emacs-nox -y -``` - -### (temporary) manually create bridge device. -This is the interface we are going to use to talk to the virtual machines. -``` -sudo brctl addbr br0 -sudo ifconfig br0 172.16.0.1 netmask 255.255.255.0 up -``` - -### tell DnsMasq to provide DHCP to our KVM VMs. -``` -sudo bash -c 'echo "listen-address=172.16.0.1" > /etc/dnsmasq.d/10-br0-dhcp' -sudo bash -c 'echo "dhcp-range=172.16.0.2,172.16.0.127,10m" >> /etc/dnsmasq.d/10-br0-dhcp' -sudo service dnsmasq restart -``` - -### enable ip forwarding. -``` -sudo sed -i "s/.*net.ipv4.ip_forward.*/net.ipv4.ip_forward=1/" /etc/sysctl.conf -sudo sysctl -p -``` - -### enable network masquerading -Here, you should check the ethernet interface name for your outbound IP. - -``` -ip ro | sed -n "/default/s/.* dev \([en\(ps|o)0-9]*\) .*/export OUTBOUNDINTERFACE=\1/p" -``` -This will return a shell command setting a variable to your default interface. copy and paste it into the command prompt, hit enter to run it, then run the following - -``` -sudo sed -i 's/.*DEFAULT_FORWARD_POLICY=.*/DEFAULT_FORWARD_POLICY="ACCEPT"/' /etc/default/ufw -sudo sed -i "1i *nat\n:POSTROUTING ACCEPT [0:0]\n-A POSTROUTING -s 172.16.0.0/24 -o $OUTBOUNDINTERFACE -j MASQUERADE\nCOMMIT" /etc/ufw/before.rules -sudo service ufw restart -``` - -### add static IPs for VMs. -``` -sudo bash -c 'echo "dhcp-host=assethost,172.16.0.128,10h" > /etc/dnsmasq.d/20-hosts' -sudo bash -c 'echo "dhcp-host=kubenode1,172.16.0.129,10h" >> /etc/dnsmasq.d/20-hosts' -sudo bash -c 'echo "dhcp-host=kubenode2,172.16.0.130,10h" >> /etc/dnsmasq.d/20-hosts' -sudo bash -c 'echo "dhcp-host=kubenode3,172.16.0.131,10h" >> /etc/dnsmasq.d/20-hosts' -sudo bash -c 'echo "dhcp-host=ansnode1,172.16.0.132,10h" >> /etc/dnsmasq.d/20-hosts' -sudo bash -c 'echo "dhcp-host=ansnode2,172.16.0.133,10h" >> /etc/dnsmasq.d/20-hosts' -sudo bash -c 'echo "dhcp-host=ansnode3,172.16.0.134,10h" >> /etc/dnsmasq.d/20-hosts' -sudo service dnsmasq restart -``` - -### Acquire ubuntu 18.04 server installation CD (netboot). -For the purposes of our text-only demo, we are going to use one of the netboot ISOs. this allows us to control the install from an SSH prompt. -``` -curl http://archive.ubuntu.com/ubuntu/dists/bionic-updates/main/installer-amd64/current/images/netboot/mini.iso -o ubuntu.iso -``` - -### create assethost -``` -./bin/newvm.sh -d 40 -m 1024 -c 1 assethost -``` - -### create kubenode1 -``` -./bin/newvm.sh -d 80 -m 8192 -c 6 kubenode1 -``` - -### create kubenode2 -``` -./bin/newvm.sh -d 80 -m 8192 -c 6 kubenode2 -``` - -### create kubenode3 -``` -./bin/newvm.sh -d 80 -m 8192 -c 6 kubenode3 -``` - -### create ansnode1 -``` -./bin/newvm.sh -d 80 -m 8192 -c 6 ansnode1 -``` - -### create ansnode2 -``` -./bin/newvm.sh -d 80 -m 8192 -c 6 ansnode2 -``` - -### create ansnode3 -``` -./bin/newvm.sh -d 80 -m 8192 -c 6 ansnode3 -``` - -### Start a node -Specify NOREBOOT, so the VM powers off after the install. -``` -cd -NOREBOOT=1 ./start_kvm.sh -``` - -when qemu starts (you see H Peter Anvin's name), hit escape. -at the " oot:" prompt, type 'expert console=ttyS0', and hit enter. - -### install node -select 'choose language' - * english - * united states - * united states - * hit tab and enter to add no additional locales. -select 'Detect network hardware' - * use tab and enter to select 'Continue' to let it install usb-storage. -select 'Configure the network' - * no, no vlan trunking. - * yes, Auto-configure networking. - * use tab and enter to hit 'Continue' to select the (default) 3 seconds to detect a link. - * supply the hostname. - * for the assethost, type assethost - * for the first kubernenes node, type 'kubenode1'. - * ... etc - * supply the domain name - * domain name: fake.domain -Select "Choose a mirror of the ubuntu archive" - * select http - * select united states - * select us.archive.ubuntu.com - * use tab and enter to select 'Continue' for no http proxy information -select "Download installer components" - * use tab and enter to continue, selecting no components -select "Set up Users and Passwords" - * yes, enable shadow passwords - * no, do not allow root login. - * full name: demo - * username: demo - * password: (given by julia, same for all VMs) - * yes, use a weak password. - * do not encrypt home directory. -select 'configure the clock' - * yes, set the clock using NTP - * yes, ntp.ubuntu.com - * yes, a berlin timezone is correct. -select 'detect disks' -select 'partition disks' - * hit down and enter to use 'guided, use entire disk and set up LVM'. - * pick the only option they give you for disks. - * select 'All files in one partition' - * yes, write the changes to disk. - * accept the default volume group name "-vg" - * select 'Continue' to consume the entire disk. - * yes, write the changes to disk. -select 'Install the base system' - * hit enter to install the 'linux generic' kernel. - * hit enter to chose 'generic' to install all of the available drivers. -select 'Configure the package manager' - * Use restricted software? Yes - * Use software from the "Universe" component? yes - * Use software from the "Multiverse" component? yes - * Use backported software? yes - * Use software from the "Partner" repository? no - * enable source repositories? No. - * Select continue to use security archive. -select 'Select and install software' - * use down and enter to select "Install security updates automatically" - * scroll to the second to last item, and use space to select "OpenSSH Server", and hit continue. -select "Install the GRUB bootloader on a hard disk" - * install the GRUB bootloader to the master boot record? yes. - * select only device displayed (/dev/sda). - * no to installing Extra EFI just-in-case. -select "Finish the installation" - * yes, the clock is set to UTC - * select continue to reboot. - -### first boot -In order to 'step back' if something goes wrong later in the install, i recommend copying the empty VMs after they have shut down: -``` -cp -a assethost assethost-new -cp -a ansnode1 ansnode1-new -cp -a ansnode2 ansnode2-new -cp -a ansnode3 ansnode3-new -cp -a kubenode1 kubenode1-new -cp -a kubenode2 kubenode2-new -cp -a kubenode3 kubenode3-new -``` - -You must have each of the virtual machines running, while installing and using wire. -I recommend using screen, and performing the following step for each image: - * change directory to the location your VM is deployed in. - * run "DRIVE=c ./start_kvm.sh" - * hit escape if you want to see the boot menu. - -### From this point: - -switch to docs.md. - -skip down to 'Making tooling available in your environment' - -#### Editing the ansible inventory - -##### Adding host entries -when editing the inventory, we only need seven entries in the '[all]' section. one entry for each of the VMs we are running. -Edit the 'kubenode' entries, and the 'assethost' entry like normal. - -Instead of creating separate cassandra, elasticsearch, and minio entries, create three 'ansnode' entries, similar to the following: -``` -ansnode1 ansible_host=172.16.0.132 -ansnode2 ansible_host=172.16.0.133 -ansnode3 ansible_host=172.16.0.134 -``` - -##### Updating Group Membership -Afterwards, we need to update the lists of what nodes belong to which group, so ansible knows what to install on these nodes. - -Add all three ansnode entries into the `cassandra` `elasticsearch`, and `minio` sections. They should look like the following: -``` -[elasticsearch] -# elasticsearch1 -# elasticsearch2 -# elasticsearch3 -ansnode1 -ansnode2 -ansnode3 - - -[minio] -# minio1 -# minio2 -# minio3 -ansnode1 -ansnode2 -ansnode3 - -[cassandra] -# cassandra1 -# cassandra2 -# cassandra3 -``` - -Add two of the ansnode entries into the `restund` section -``` -[restund] -ansnode1 -ansnode2 -``` - -Add one of the ansnode entries into the `cassandra_seed` section. -``` -[cassandra_seed] -ansnode1 -``` - -### ERROR: after you install restund, the restund firewall will fail to start. - -delete the outbound rule to 172.16.0.0/12 -``` -sudo ufw status numbered -sudo ufw delete -``` - -#### enable the ports colocated services run on: -cassandra: -``` -sudo ufw allow 9042/tcp -sudo ufw allow 9160/tcp -sudo ufw allow 7000/tcp -sudo ufw allow 7199/tcp -``` - -elasticsearch: -``` -sudo ufw allow 9300/tcp -sudo ufw allow 9200/tcp -``` - -minio: -``` -sudo ufw allow 9000/tcp -sudo ufw allow 9092/tcp -``` - -#### install turn pointing to port 8080 - - - diff --git a/offline/ubuntu22.04_installation.md b/offline/single_hetzner_machine_installation.md similarity index 100% rename from offline/ubuntu22.04_installation.md rename to offline/single_hetzner_machine_installation.md diff --git a/terraform/environment/hcloud.vars.tf b/terraform/environment/hcloud.vars.tf index f9b2c2cea..c88a03760 100644 --- a/terraform/environment/hcloud.vars.tf +++ b/terraform/environment/hcloud.vars.tf @@ -1,5 +1,5 @@ variable "hcloud_image" { - default = "ubuntu-18.04" + default = "ubuntu-22.04" } variable "hcloud_location" {