-
Notifications
You must be signed in to change notification settings - Fork 0
/
install-slate-cvmfs.sh
203 lines (158 loc) · 5.53 KB
/
install-slate-cvmfs.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
#!/bin/bash
# First, go here: https://portal.slateci.io/
# And visit the "CLI Access" page to get your token and install it to your
# server.
### You should change these:
# The name of your cluster as registered in SLATE
CLUSTERNAME="CLUSTER NAME"
# The initial group who will be able to access this cluster.
# Supplementary groups can be added later.
INITIALGROUP="GROUP NAME"
# Organization name. Simplest to keep this as a single word or acronym for now
ORGNAME="ORG NAME"
# The pool of IP address you want the 'load balancer' to allocate.
IPPOOL="IP RANGE OR CIDR"
############################
if [[ -f ~/.slate/token ]]; then
echo "token exists, continuing.."
else
echo "SLATE token doesn't exist. Please view the header of this script before continuing."
exit 1
fi
echo -n "Checking OS vendor... "
VENDOR=$(hostnamectl | grep 'Operating System')
echo $VENDOR | grep -e 'CentOS\|Scientific\|Red\ Hat' 2>&1 >/dev/null
if [ $? -eq 0 ]; then
echo "Seems to be a Red Hat variant"
echo -n "Checking version... "
echo $VENDOR | grep 7 2>&1 > /dev/null
if [ $? -ne 0 ]; then
echo "Doesn't seem to be EL7. Cowardly refusing to continue."
exit 1
else
echo "Seems to be EL7"
fi
else
echo "Doesn't seem to be Red Hat variant. Cowardly refusing to continue."
exit 1
fi
echo -n "Checking SELinux status... "
SESTATUS=$(sestatus | awk '{print $3}')
if [[ $SESTATUS == "enabled" ]]; then
echo "SELinux is enabled. Disabling... may require reboot"
# Set SELinux in permissive mode (effectively disabling it)
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
else
echo "SELinux is disabled or permissive. Continuing..."
fi
echo "Disabling swap..."
swapoff -a
sed -e '/swap/s/^/#/g' -i /etc/fstab
echo "Installing Docker CE..."
yum install -y yum-utils \
device-mapper-persistent-data \
lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install docker-ce docker-ce-cli containerd.io -y
systemctl enable --now docker
echo "Installing Kubernetes YUM repo..."
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
echo "Installing Kubelet, Kubeadm, Kubectl..."
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
echo "Enabling Kubelet..."
systemctl enable --now kubelet
echo "Adding sysctl tweaks for EL7..."
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
echo "Initializing Kubernetes cluster with 192.168 RFC1918 range for Pod CIDR..."
kubeadm init --pod-network-cidr=192.168.0.0/16
if [ "$?" -ne 0 ]; then
echo "kubeadm init failed" 1>&2
exit 1
fi
echo "Copying Kubernetes config to root's homedir..."
mkdir -p ~/.kube/
cp -f /etc/kubernetes/admin.conf ~/.kube/config
echo "Installing Calico networking plugin..."
kubectl apply -f https://docs.projectcalico.org/v3.8/manifests/calico.yaml
echo "Removing Master taint, so we can run pods on a single-node cluster..."
kubectl taint nodes --all node-role.kubernetes.io/master-
echo "Installing MetalLB load balancer..."
kubectl apply -f https://raw.githubusercontent.com/google/metallb/v0.7.3/manifests/metallb.yaml
cat << EOF > metallb-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- ${IPPOOL}
EOF
kubectl create -f metallb-config.yaml
rm -f metallb-config.yaml
echo "Installing SLATE repository and client..."
cat << EOF > /etc/yum.repos.d/slate.repo
[slate-client]
name=SLATE-client
baseurl=https://jenkins.slateci.io/artifacts/client/
enabled=1
gpgcheck=0
repo_gpgcheck=0
EOF
yum install slate-client -y
slate cluster create --group $INITIALGROUP $CLUSTERNAME --org $ORGNAME -y
echo "Deploying squid proxy instance"
cat << EOF > squidconfig
Instance: cvmfs
Service:
Port: 3128
ExternalVisibility: ClusterIP
SquidConf:
CacheMem: 128
CacheSize: 10000
IPRange: 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16
EOF
slate app install osg-frontier-squid --group $INITIALGROUP --cluster $CLUSTERNAME --conf squidconfig
rm -rf squidconfig
echo "Adding CVMFS"
kubectl create namespace cvmfs
export CLUSTER_IP=$(kubectl get --namespace slate-group-$INITIALGROUP -o jsonpath="{.items[0].spec.clusterIP}" services)
yum -y install git
git clone https://github.com/Mansalu/prp-osg-cvmfs.git
cd prp-osg-cvmfs
git checkout slate
cd ..
cat << EOF > default.local
CVMFS_SERVER_URL="http://cvmfs-s1bnl.opensciencegrid.org:8000/cvmfs/@fqrn@;http://cvmfs-s1fnal.opensciencegrid.org:8000/cvmfs/@fqrn@;http://cvmfs-s1goc.opensciencegrid.org:8000/cvmfs/@fqrn@"
CVMFS_KEYS_DIR=/etc/cvmfs/keys/opensciencegrid.org/
CVMFS_USE_GEOAPI=yes
CVMFS_HTTP_PROXY="http://$CLUSTER_IP:3128"
CVMFS_QUOTA_LIMIT=5000
CVMFS_REPOSITORIES=atlas.cern.ch,atlas-condb.cern.ch,atlas-nightlies.cern.ch,sft.cern.ch,geant4.cern.ch,grid.cern.ch,cms.cern.ch,oasis.opensciencegrid.org
EOF
kubectl create configmap cvmfs-osg-config -n cvmfs --from-file=default.local
rm -rf default.local
kubectl create -f prp-osg-cvmfs/k8s/cvmfs/accounts/
kubectl create -f prp-osg-cvmfs/k8s/cvmfs/csi-processes/
kubectl create -f prp-osg-cvmfs/k8s/cvmfs/storageclasses/
rm -rf prp-osg-cvmfs
kubectl get all -n cvmfs
echo "Your SLATE cluster has been successfully installed"