forked from canonical/microk8s
-
Notifications
You must be signed in to change notification settings - Fork 0
/
snapcraft.yaml
263 lines (238 loc) · 7.2 KB
/
snapcraft.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
name: microk8s
version-script: |
. build-scripts/prepare-env.sh > /dev/null
echo $KUBE_VERSION
version: "latest"
summary: Kubernetes for workstations and appliances
description: |-
MicroK8s is a small, fast, secure, single node Kubernetes that installs on
just about any Linux box. Use it for offline development, prototyping,
testing, or use it on a VM as a small, cheap, reliable k8s for CI/CD. It's
also a great k8s for appliances - develop your IoT apps for k8s and deploy
them to MicroK8s on your boxes.
grade: stable
confinement: classic
apps:
daemon-etcd:
command: run-etcd-with-args
daemon: simple
daemon-docker:
command: run-docker-with-args
daemon: simple
daemon-apiserver:
command: run-with-config-args kube-apiserver
daemon: simple
daemon-apiserver-kicker:
command: apiservice-kicker
daemon: simple
daemon-controller-manager:
command: run-with-config-args kube-controller-manager
daemon: simple
daemon-scheduler:
command: run-with-config-args kube-scheduler
daemon: simple
daemon-kubelet:
command: run-with-config-args kubelet
daemon: simple
daemon-proxy:
command: run-with-config-args kube-proxy
daemon: simple
kubectl:
command: microk8s-kubectl.wrapper
docker:
command: microk8s-docker.wrapper
inspect:
command: sudo SNAP_DATA=${SNAP_DATA} ${SNAP}/inspect.sh
enable:
command: microk8s-enable.wrapper
disable:
command: microk8s-disable.wrapper
start:
command: microk8s-start.wrapper
stop:
command: microk8s-stop.wrapper
status:
command: microk8s-status.wrapper
config:
command: microk8s-config.wrapper
reset:
command: microk8s-reset.wrapper
istioctl:
command: microk8s-istioctl.wrapper
parts:
libnftnl:
plugin: autotools
source: https://www.netfilter.org/projects/libnftnl/files/libnftnl-1.0.9.tar.bz2
build-packages:
- libjansson-dev
- libmnl-dev
iptables:
after:
- libnftnl
source: https://www.netfilter.org/projects/iptables/files/iptables-1.6.1.tar.bz2
plugin: autotools
build-packages:
- bison
- flex
- libmnl-dev
- libnfnetlink-dev
- libnetfilter-conntrack3
- libnetfilter-conntrack-dev
configflags:
- "--disable-shared"
- "--enable-static"
prime: [ -bin/iptables-xml ]
docker:
after: [iptables]
plugin: dump
stage-packages:
- conntrack
- docker.io
- aufs-tools
- gawk
- sed
source: .
stage:
- -sbin/xtables-multi
- -sbin/iptables*
- -lib/xtables
override-build: "true"
microk8s:
after: [docker]
plugin: dump
build-attributes: [no-patchelf]
build-packages:
- curl
- openssl
- file
stage-packages:
- net-tools
- util-linux
- zfsutils-linux
- socat
- iproute2
- dpkg
source: .
override-build: |
set -eu
. build-scripts/prepare-env.sh
if [ -z "$KUBE_SNAP_BINS" ]; then
echo "Downloading binaries from upstream"
# Also sets KUBE_SNAP_BINS
. build-scripts/fetch-binaries.sh
fi
echo "Setting default daemon configs"
cp -r $KUBE_SNAP_ROOT/microk8s-resources/default-args .
echo "Building certs"
build-scripts/build-certs.sh
cp -r $KUBE_SNAP_ROOT/microk8s-resources/certs-beta .
echo "Preparing cni"
mkdir -p opt/cni/bin/
cp $KUBE_SNAP_BINS/cni/* opt/cni/bin/
echo "Preparing dockerd"
cp $KUBE_SNAP_ROOT/microk8s-resources/docker-profile .
echo "Preparing etcd"
cp $KUBE_SNAP_BINS/etcd/etcd .
cp $KUBE_SNAP_BINS/etcd/etcdctl .
echo "Preparing kube-apiserver"
cp $KUBE_SNAP_BINS/$KUBE_ARCH/kube-apiserver .
cp $KUBE_SNAP_ROOT/microk8s-resources/known_token.csv .
cp $KUBE_SNAP_ROOT/microk8s-resources/basic_auth.csv .
echo "Preparing kube-controller-manager"
cp $KUBE_SNAP_BINS/$KUBE_ARCH/kube-controller-manager .
echo "Preparing kube-scheduler"
cp $KUBE_SNAP_BINS/$KUBE_ARCH/kube-scheduler .
echo "Preparing kubelet"
mkdir -p configs
cp $KUBE_SNAP_BINS/$KUBE_ARCH/kubelet .
cp $KUBE_SNAP_ROOT/microk8s-resources/kubelet.config ./configs/
echo "Preparing kube-proxy"
cp $KUBE_SNAP_BINS/$KUBE_ARCH/kube-proxy .
cp $KUBE_SNAP_ROOT/microk8s-resources/kubeproxy.config .
echo "Preparing kubelet"
cp $KUBE_SNAP_BINS/$KUBE_ARCH/kubectl .
echo "Preparing user config"
cp $KUBE_SNAP_ROOT/microk8s-resources/client.config .
echo "Creating commands and wrappers"
cp $KUBE_SNAP_ROOT/microk8s-resources/wrappers/* .
cp -r $KUBE_SNAP_ROOT/microk8s-resources/actions .
if [ "${ARCH}" = "arm64" ]
then
# Some actions are not available on arm64
# Nvidia support
rm "actions/enable.gpu.sh"
rm "actions/disable.gpu.sh"
rm "actions/gpu.yaml"
# Istio support
rm "actions/enable.istio.sh"
rm "actions/disable.istio.sh"
else
# Istio addon
echo "Preparing istio"
cp $KUBE_SNAP_BINS/istioctl .
cp -r $KUBE_SNAP_BINS/istio-yaml ./actions/istio
fi
echo "Creating inspect hook"
cp $KUBE_SNAP_ROOT/scripts/inspect.sh .
snapcraftctl build
# Unfortunately we cannot add package repositories to our snaps
# https://forum.snapcraft.io/t/proposal-additional-package-sources/2199
# We handpick the debs we need.
# To update these debs add the repository
# Follow the instructions in https://github.com/NVIDIA/nvidia-docker and
# install the https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list repo.
# Use apt-cache show <package> to make sure
# a) the dockerd we ship in this snap is supported by nvidia-container-runtime
# b) the version dependencies of each package is met.
nvidia-runtime:
plugin: dump
source: https://nvidia.github.io/nvidia-container-runtime/ubuntu16.04/amd64/nvidia-container-runtime_2.0.0+docker18.06.1-1_amd64.deb
source-type: deb
override-build: |
set -eu
ARCH=$(dpkg --print-architecture)
if ! [ "${ARCH}" = "arm64" ]
then
snapcraftctl build
else
echo "Skipped"
fi
nvidia-runtime-hook:
plugin: dump
source: https://nvidia.github.io/nvidia-container-runtime/ubuntu16.04/amd64/nvidia-container-runtime-hook_1.4.0-1_amd64.deb
source-type: deb
override-build: |
set -eu
ARCH=$(dpkg --print-architecture)
if ! [ "${ARCH}" = "arm64" ]
then
snapcraftctl build
else
echo "Skipped"
fi
libnvidia:
plugin: dump
source: https://nvidia.github.io/libnvidia-container/ubuntu16.04/amd64/libnvidia-container1_1.0.0-1_amd64.deb
source-type: deb
override-build: |
set -eu
ARCH=$(dpkg --print-architecture)
if ! [ "${ARCH}" = "arm64" ]
then
snapcraftctl build
else
echo "Skipped"
fi
libnvidia-tools:
plugin: dump
source: https://nvidia.github.io/libnvidia-container/ubuntu16.04/amd64/libnvidia-container-tools_1.0.0-1_amd64.deb
source-type: deb
override-build: |
set -eu
ARCH=$(dpkg --print-architecture)
if ! [ "${ARCH}" = "arm64" ]
then
snapcraftctl build
else
echo "Skipped"
fi