Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update documentation and automatize some steps #650

Merged
merged 6 commits into from
Sep 22, 2023
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 10 additions & 3 deletions ansible/inventory/offline/99-static
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,9 @@
# Below variables are set for all machines in the inventory.
[all:vars]
# If you need to ssh as a user that's not the same user as the one running ansible
# set ansible_user=<some_user>
# set ansible_password=<some password>
# set ansible_sudo_pass=<some password>
# ansible_user=<some_user>
# ansible_password=<some password>
# ansible_sudo_pass=<some password>
# Keep in mind this user needs to be able to sudo passwordless.
# ansible_user = root
#
Expand Down Expand Up @@ -87,6 +87,13 @@
# restund_network_interface = enp1s0
# Uncomment and set to the true public IP if you are behind 1:1 NAT
# restund_peer_udp_advertise_addr = a.b.c.d
#
# Uncomment to create firewall exception for private networks
# restund_allowed_private_network_cidrs = a.b.c.d/24
# If you install restund together with other services on the same machine
# you need to restund_allowed_private_network_cidrs to allow these services
# to communicate on the private network. E.g. If your private network is 172.16.0.1/24
# restund_allowed_private_network_cidrs = 172.16.0.1/24
smatting marked this conversation as resolved.
Show resolved Hide resolved

# Explicitely specify the restund user id to be "root" to override the default of "997"
restund_uid = root
Expand Down
44 changes: 32 additions & 12 deletions bin/offline-cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,36 @@ set -eou pipefail
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ANSIBLE_DIR="$( cd "$SCRIPT_DIR/../ansible" && pwd )"

ansible-playbook -i $ANSIBLE_DIR/inventory/offline $ANSIBLE_DIR/setup-offline-sources.yml
ansible-playbook -i $ANSIBLE_DIR/inventory/offline $ANSIBLE_DIR/kubernetes.yml --tags bastion,bootstrap-os,preinstall,container-engine
ansible-playbook -i $ANSIBLE_DIR/inventory/offline $ANSIBLE_DIR/restund.yml --tags docker
#ansible-playbook -i $ANSIBLE_DIR/inventory/offline $ANSIBLE_DIR/seed-offline-docker.yml
ansible-playbook -i $ANSIBLE_DIR/inventory/offline $ANSIBLE_DIR/seed-offline-containerd.yml
ansible-playbook -i $ANSIBLE_DIR/inventory/offline $ANSIBLE_DIR/sync_time.yml -v
ansible-playbook -i $ANSIBLE_DIR/inventory/offline $ANSIBLE_DIR/kubernetes.yml --skip-tags bootstrap-os,preinstall,container-engine
set -x

# Populate the assethost, and prepare to install images from it.
#
# Copy over binaries and debs, serves assets from the asset host, and configure
# other hosts to fetch debs from it.
#
# If this step fails partway, and you know that parts of it completed, the `--skip-tags debs,binaries,containers,containers-helm,containers-other` tags may come in handy.
ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/setup-offline-sources.yml

# Run kubespray until docker is installed and runs. This allows us to preseed the docker containers that
# are part of the offline bundle
ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/kubernetes.yml --tags bastion,bootstrap-os,preinstall,container-engine

# Install docker on the restund nodes
ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/restund.yml --tags docker

# With ctr being installed on all nodes that need it, seed all container images:
ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/seed-offline-containerd.yml

# Install NTP
ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/sync_time.yml -v

# Run the rest of kubespray. This should bootstrap a kubernetes cluster successfully:
ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/kubernetes.yml --skip-tags bootstrap-os,preinstall,container-engine

./bin/fix_default_router.sh
ansible-playbook -i $ANSIBLE_DIR/inventory/offline $ANSIBLE_DIR/cassandra.yml
ansible-playbook -i $ANSIBLE_DIR/inventory/offline $ANSIBLE_DIR/elasticsearch.yml
ansible-playbook -i $ANSIBLE_DIR/inventory/offline $ANSIBLE_DIR/restund.yml
ansible-playbook -i $ANSIBLE_DIR/inventory/offline $ANSIBLE_DIR/minio.yml
ansible-playbook -i $ANSIBLE_DIR/inventory/offline $ANSIBLE_DIR/helm_external.yml

# Deploy all other services which don't run in kubernetes.
ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/cassandra.yml
ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/elasticsearch.yml
ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/restund.yml
ansible-playbook -i $ANSIBLE_DIR/inventory/offline/hosts.ini $ANSIBLE_DIR/minio.yml
94 changes: 94 additions & 0 deletions bin/offline-vm-setup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
#!/usr/bin/env bash

set -eo pipefail

nocloud_basedir=/home/demo/wire-server-deploy/nocloud

prepare_config() {
# Run
# export OFFLINE_PASSWORD="$(mkpasswd)"
# to set the hashed password
set -u
offline_username=$OFFLINE_USERNAME
offline_password=$OFFLINE_PASSWORD
set +u

name="$1"
d=$nocloud_basedir/$name
mkdir -p "$d"
touch "$d"/vendor-data
touch "$d"/meta-data
cat >"$d/user-data"<<EOF
#cloud-config
autoinstall:
version: 1
id: ubuntu-server-minimized
network:
version: 2
ethernets:
enp1s0:
dhcp4: yes
identity:
hostname: $name
password: $offline_password
username: $offline_username
ssh:
install-server: yes
EOF
}

create_assethost () {
name="$1"

prepare_config "$name"

# if you want to run the installation manually remove the `--noautoconsole` flag and the ds= part from `--extra-args`
sudo virt-install \
--name "$name" \
--ram 8192 \
--disk path=/var/kvm/images/"$name".img,size=100 \
--vcpus 4 \
--network bridge=br0 \
--graphics none \
--osinfo detect=on,require=off \
--noautoconsole \
--location /home/demo/wire-server-deploy/ubuntu.iso,kernel=casper/vmlinuz,initrd=casper/initrd \
--extra-args "console=ttyS0,115200n8 autoinstall ds=nocloud-net;s=http://172.16.0.1:3003/$name"
}

create_node () {
name="$1"

prepare_config "$name"

# if you want to run the installation manually remove the `--noautoconsole` flag and the ds= part from `--extra-args`
sudo virt-install \
--name "$name" \
--ram 8192 \
--disk path=/var/kvm/images/"$name".img,size=80 \
--vcpus 6 \
--network bridge=br0 \
--graphics none \
--osinfo detect=on,require=off \
--noautoconsole \
--location /home/demo/wire-server-deploy/ubuntu.iso,kernel=casper/vmlinuz,initrd=casper/initrd \
--extra-args "console=ttyS0,115200n8 autoinstall ds=nocloud-net;s=http://172.16.0.1:3003/$name"
}

if [ "$1" = "serve_nocloud" ]; then
mkdir -p "$nocloud_basedir"
cd "$nocloud_basedir"
python3 -m http.server 3003
fi

if [ "$1" = "create_node" ]; then
set -u
name="$2"
create_node "$name"
fi

if [ "$1" = "create_assethost" ]; then
set -u
name="$2"
create_assethost "$name"
fi
Loading