forked from kubealex/libvirt-k8s-provisioner
-
Notifications
You must be signed in to change notification settings - Fork 0
/
25_complete_setup.yml
45 lines (42 loc) · 1.57 KB
/
25_complete_setup.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
---
- name: Complete cluster setup
hosts: vm_host
vars_files:
- vars/k8s_cluster.yml
tasks:
- name: Prepare playbook for cluster deletion
ansible.builtin.template:
src: templates/cleanup-playbook.yml.j2
dest: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/{{ k8s.cluster_name | default('k8s-test', true) }}-cleanup-playbook.yml" # noqa yaml[line-length]
mode: "0755"
- name: Delete image file
ansible.builtin.file:
path: "{{ item }}"
state: absent
mode: "0755"
loop:
- /tmp/{{ image_name }}.qcow2
- name: Label worker nodes
kubernetes.core.k8s_json_patch:
kind: Node
name: "{{ hostvars[item].host_fqdn }}"
kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig"
patch:
- op: add
path: /metadata/labels/node-role.kubernetes.io~1worker
value: ""
loop: "{{ groups['workers'] }}"
- name: Remove taint from master nodes
kubernetes.core.k8s_json_patch:
kind: Node
name: "{{ hostvars[item].host_fqdn }}"
kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig"
patch:
- op: remove
path: /spec/taints/0
loop: "{{ groups['masters'] }}"
when: k8s.master_schedulable
register: result
failed_when:
- result.status is defined
- result.status != 422