diff --git a/packer/docker/centos/Dockerfile b/packer/docker/centos/Dockerfile new file mode 100644 index 0000000..e8ffeb3 --- /dev/null +++ b/packer/docker/centos/Dockerfile @@ -0,0 +1,22 @@ +FROM centos:7 + +# This Dockerfile is used to build the contiv/centos-systemd image, used by the packer script. +# This is based on the Dockerfile sample here: +# https://github.com/docker-library/docs/tree/master/centos#dockerfile-for-systemd-base-image +MAINTAINER "Madhav Puri" + +ENV container docker + +RUN (cd /lib/systemd/system/sysinit.target.wants/; \ +for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \ +rm -f /lib/systemd/system/multi-user.target.wants/*;\ +rm -f /etc/systemd/system/*.wants/*;\ +rm -f /lib/systemd/system/local-fs.target.wants/*; \ +rm -f /lib/systemd/system/sockets.target.wants/*udev*; \ +rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \ +rm -f /lib/systemd/system/basic.target.wants/*;\ +rm -f /lib/systemd/system/anaconda.target.wants/*; + +VOLUME [ "/sys/fs/cgroup" ] + +CMD ["/usr/sbin/init"] diff --git a/packer/docker/centos/Makefile b/packer/docker/centos/Makefile new file mode 100644 index 0000000..d9433c7 --- /dev/null +++ b/packer/docker/centos/Makefile @@ -0,0 +1,13 @@ +all: build start stop + +start: + docker run -it contiv/centos7 /bin/bash + +stop: + docker rm -f contiv/centos7 + +build: + version=$$(cat VERSION) atlas_token="dummy" packer build --only build --force centos.json + +release-build: + version=$$(cat VERSION) atlas_token=${ATLAS_TOKEN} packer build --only release --force centos.json diff --git a/packer/docker/centos/VERSION b/packer/docker/centos/VERSION new file mode 100644 index 0000000..49d5957 --- /dev/null +++ b/packer/docker/centos/VERSION @@ -0,0 +1 @@ +0.1 diff --git a/packer/docker/centos/centos.json b/packer/docker/centos/centos.json new file mode 100644 index 0000000..5d75b20 --- /dev/null +++ b/packer/docker/centos/centos.json @@ -0,0 +1,85 @@ +{ + "builders": [ + { + "name": "build", + "type": "docker", + "image": "contiv/centos-systemd:latest", + "pull": true, + "commit": true, + "volumes": { + "/sys/fs/cgroup":"/sys/fs/cgroup:ro", + "/tmp/$(mktemp -d)":"/run" + }, + "run_command": ["--privileged", "-d", "-i", "-t", "{{.Image}}", "/usr/sbin/init"] + }, + { + "name": "release", + "type": "docker", + "image": "contiv/centos-systemd:latest", + "pull": true, + "commit": true, + "volumes": { + "/sys/fs/cgroup":"/sys/fs/cgroup:ro", + "/tmp/systemdtest":"/run" + }, + "run_command": ["-privileged", "-d", "-i", "-t", "{{.Image}}", "/usr/sbin/init"] + } + ], + "post-processors": [[ + { + "type": "docker-tag", + "repository": "contiv/centos7", + "tag": "{{ user `version` }}", + "only": ["build", "release"] + }, + { + "type": "docker-push", + "only": ["release"] + } + ]], + "provisioners": [ + { + "type": "shell", + "environment_vars": [ + "http_proxy={{user `http_proxy`}}", + "https_proxy={{user `https_proxy`}}", + "ftp_proxy={{user `ftp_proxy`}}", + "rsync_proxy={{user `rsync_proxy`}}", + "no_proxy={{user `no_proxy`}}" + ], + "scripts": [ + "../../centos/script/ansible.sh", + "script/packer.sh" + ] + }, + { + "type": "ansible-local", + "playbook_dir": "../../../vendor/ansible", + "playbook_file": "../../../vendor/ansible/site.yml", + "inventory_groups": "devtest", + "extra_arguments": [ + "--extra-vars", + "'{\"env\":{ \"http_proxy\":\"{{user `http_proxy`}}\", \"https_proxy\":\"{{user `https_proxy`}}\", \"no_proxy\":\"{{user `no_proxy`}}\", \"ftp_proxy\":\"{{user `ftp_proxy`}}\", \"rsync_proxy\":\"{{user `rsync_proxy`}}\" }, \"validate_certs\":\"no\", \"docker_version\":\"1.11.1\"}'", + "--tags", + "prebake-for-dev,prebake-for-test" + ] + }, + { + "type": "shell", + "scripts": [ + "script/post_provision.sh" + ] + } + ], + "variables": { + "ftp_proxy": "{{env `ftp_proxy`}}", + "http_proxy": "{{env `http_proxy`}}", + "https_proxy": "{{env `https_proxy`}}", + "no_proxy": "{{env `no_proxy`}}", + "rsync_proxy": "{{env `rsync_proxy`}}", + "ssh_password": "vagrant", + "ssh_username": "vagrant", + "atlas_token": "{{ env `atlas_token` }}", + "version": "{{ env `version` }}" + } +} diff --git a/packer/docker/centos/script/packer.sh b/packer/docker/centos/script/packer.sh new file mode 100644 index 0000000..f6178e2 --- /dev/null +++ b/packer/docker/centos/script/packer.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +set -xe + +rm -f /usr/sbin/packer diff --git a/packer/docker/centos/script/post_provision.sh b/packer/docker/centos/script/post_provision.sh new file mode 100644 index 0000000..21d619b --- /dev/null +++ b/packer/docker/centos/script/post_provision.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -xe + +# enable docker for auto-restart, and cleanly shut it down +# before stopping the container +systemctl enable docker +systemctl stop docker + +# cleanup any temp files created as part of provision +rm -rf /tmp/* diff --git a/vendor/ansible/Makefile b/vendor/ansible/Makefile new file mode 100644 index 0000000..9a718b8 --- /dev/null +++ b/vendor/ansible/Makefile @@ -0,0 +1,10 @@ +.PHONY: test-up test-provision test-cleanup + +test-up: + vagrant up + +test-provision: + vagrant provision + +test-cleanup: + CONTIV_ANSIBLE_PLAYBOOK="./cleanup.yml" CONTIV_ANSIBLE_TAGS="all" vagrant provision diff --git a/vendor/ansible/cleanup.yml b/vendor/ansible/cleanup.yml index 4fd7d7b..ddb335a 100644 --- a/vendor/ansible/cleanup.yml +++ b/vendor/ansible/cleanup.yml @@ -4,7 +4,7 @@ # Note: cleanup is not expected to fail, so we set ignore_errors to yes here - hosts: all - sudo: true + become: true tasks: - include_vars: roles/{{ item }}/defaults/main.yml with_items: @@ -20,7 +20,7 @@ - contiv_storage - swarm - ucp - - docker - etcd - ucarp + - docker ignore_errors: yes diff --git a/vendor/ansible/roles/base/tasks/redhat_tasks.yml b/vendor/ansible/roles/base/tasks/redhat_tasks.yml index bd3fb18..78c4154 100644 --- a/vendor/ansible/roles/base/tasks/redhat_tasks.yml +++ b/vendor/ansible/roles/base/tasks/redhat_tasks.yml @@ -10,6 +10,7 @@ update_cache: true state: latest with_items: + - yum-utils - ntp - unzip - bzip2 @@ -18,6 +19,8 @@ - bash-completion - kernel #keep kernel up to date - libselinux-python + - e2fsprogs + - openssh-server - name: install and start ntp service: name=ntpd state=started enabled=yes diff --git a/vendor/ansible/roles/base/tasks/ubuntu_tasks.yml b/vendor/ansible/roles/base/tasks/ubuntu_tasks.yml index 0e597f2..1b81a33 100644 --- a/vendor/ansible/roles/base/tasks/ubuntu_tasks.yml +++ b/vendor/ansible/roles/base/tasks/ubuntu_tasks.yml @@ -15,3 +15,5 @@ - python-software-properties - bash-completion - python-selinux + - e2fsprogs + - openssh-server diff --git a/vendor/ansible/roles/contiv_cluster/defaults/main.yml b/vendor/ansible/roles/contiv_cluster/defaults/main.yml index 729ee12..44c58f4 100644 --- a/vendor/ansible/roles/contiv_cluster/defaults/main.yml +++ b/vendor/ansible/roles/contiv_cluster/defaults/main.yml @@ -9,7 +9,7 @@ collins_guest_port: 9000 clusterm_args_file: "clusterm.args" clusterm_conf_file: "clusterm.conf" -contiv_cluster_version: "v0.1-05-12-2016.08-27-16.UTC" +contiv_cluster_version: "v0.1-05-14-2016.00-33-02.UTC" contiv_cluster_tar_file: "cluster-{{ contiv_cluster_version }}.tar.bz2" contiv_cluster_src_file: "https://github.com/contiv/cluster/releases/download/{{ contiv_cluster_version }}/{{ contiv_cluster_tar_file }}" contiv_cluster_dest_file: "/tmp/{{ contiv_cluster_tar_file }}" diff --git a/vendor/ansible/roles/contiv_cluster/tasks/main.yml b/vendor/ansible/roles/contiv_cluster/tasks/main.yml index 717a03f..f9e3740 100644 --- a/vendor/ansible/roles/contiv_cluster/tasks/main.yml +++ b/vendor/ansible/roles/contiv_cluster/tasks/main.yml @@ -45,12 +45,12 @@ - name: copy conf files for clusterm copy: - src: "{{ item }}" - dest: /etc/default/clusterm/{{ item }} - force: yes + src: "{{ item.file }}" + dest: "/etc/default/clusterm/{{ item.file }}" + force: "{{ item.force }}" with_items: - - "{{ clusterm_args_file }}" - - "{{ clusterm_conf_file }}" + - { file: "{{ clusterm_args_file }}", force: "yes" } + - { file: "{{ clusterm_conf_file }}", force: "no" } - name: copy systemd units for clusterm template: src=clusterm.j2 dest=/etc/systemd/system/clusterm.service diff --git a/vendor/ansible/roles/contiv_network/defaults/main.yml b/vendor/ansible/roles/contiv_network/defaults/main.yml index 4d6add5..79b9af8 100644 --- a/vendor/ansible/roles/contiv_network/defaults/main.yml +++ b/vendor/ansible/roles/contiv_network/defaults/main.yml @@ -16,15 +16,16 @@ bgp_port: 179 vxlan_port: 4789 netplugin_rule_comment: "contiv network traffic" -contiv_network_version: "v0.1-05-08-2016.20-28-46.UTC" +contiv_network_version: "v0.1-05-16-2016.08-29-25.UTC" contiv_network_tar_file: "netplugin-{{ contiv_network_version }}.tar.bz2" contiv_network_src_file: "https://github.com/contiv/netplugin/releases/download/{{ contiv_network_version }}/{{ contiv_network_tar_file }}" contiv_network_dest_file: "/tmp/{{ contiv_network_tar_file }}" -contivctl_version: "v0.0.0-03-10-2016.22-13-24.UTC" -contivctl_tar_file: "contivctl-{{ contivctl_version }}.tar.bz2" -contivctl_src_file: "https://github.com/contiv/contivctl/releases/download/{{ contivctl_version }}/{{ contivctl_tar_file }}" -contivctl_dest_file: "/tmp/{{ contivctl_tar_file }}" +contivctl_version_no_v: "0.1-05-26-2016.22-31-22.UTC" +contivctl_version: "v{{ contivctl_version_no_v }}" +contivctl_tar_file: "{{ contivctl_version }}.tar.gz" +contivctl_src_file: "https://github.com/contiv/contivctl/archive/{{ contivctl_tar_file }}" +contivctl_dest_file: "/tmp/contivctl-{{ contivctl_tar_file }}" apic_epg_bridge_domain: "not_specified" apic_contracts_unrestricted_mode: "no" diff --git a/vendor/ansible/roles/contiv_network/files/netplugin.service b/vendor/ansible/roles/contiv_network/files/netplugin.service index 8f74594..4b83e22 100644 --- a/vendor/ansible/roles/contiv_network/files/netplugin.service +++ b/vendor/ansible/roles/contiv_network/files/netplugin.service @@ -5,4 +5,5 @@ After=auditd.service systemd-user-sessions.service time-sync.target etcd.service [Service] EnvironmentFile=/etc/default/netplugin ExecStart=/usr/bin/netplugin $NETPLUGIN_ARGS +ExecStopPost=/usr/bin/rm -f /run/docker/plugins/netplugin.sock KillMode=control-group diff --git a/vendor/ansible/roles/contiv_network/tasks/aci_tasks.yml b/vendor/ansible/roles/contiv_network/tasks/aci_tasks.yml index f6201fd..c3a1af8 100644 --- a/vendor/ansible/roles/contiv_network/tasks/aci_tasks.yml +++ b/vendor/ansible/roles/contiv_network/tasks/aci_tasks.yml @@ -13,4 +13,4 @@ service: name=aci-gw state=started - name: set aci mode - shell: contivctl net global set --fabric-mode aci + shell: contivctl network global set --fabric-mode aci diff --git a/vendor/ansible/roles/contiv_network/tasks/main.yml b/vendor/ansible/roles/contiv_network/tasks/main.yml index c75d8f5..c3df84b 100644 --- a/vendor/ansible/roles/contiv_network/tasks/main.yml +++ b/vendor/ansible/roles/contiv_network/tasks/main.yml @@ -93,7 +93,7 @@ force: no - name: install contivctl - shell: tar vxjf {{ contivctl_dest_file }} + shell: tar vxzf {{ contivctl_dest_file }} --strip-components=1 contivctl-{{ contivctl_version_no_v }}/contivctl args: chdir: /usr/bin/ diff --git a/vendor/ansible/roles/contiv_network/tasks/ovs.yml b/vendor/ansible/roles/contiv_network/tasks/ovs.yml index 53ad5b7..45101c1 100644 --- a/vendor/ansible/roles/contiv_network/tasks/ovs.yml +++ b/vendor/ansible/roles/contiv_network/tasks/ovs.yml @@ -8,15 +8,15 @@ url: "{{ item.url }}" with_items: - { - url: "https://cisco.box.com/shared/static/51eo9dcw04qx2y1f14n99y4yt5kug3q4.rpm", - dest: /tmp/openvswitch-2.3.1-1.x86_64.rpm + url: "https://cisco.box.com/shared/static/zzmpe1zesdpf270k9pml40rlm4o8fs56.rpm", + dest: /tmp/openvswitch-2.3.1-2.el7.x86_64.rpm } when: ansible_os_family == "RedHat" tags: - prebake-for-dev - name: install ovs (redhat) - yum: name=/tmp/openvswitch-2.3.1-1.x86_64.rpm state=present + yum: name=/tmp/openvswitch-2.3.1-2.el7.x86_64.rpm state=present when: ansible_os_family == "RedHat" tags: - prebake-for-dev diff --git a/vendor/ansible/roles/contiv_storage/defaults/main.yml b/vendor/ansible/roles/contiv_storage/defaults/main.yml index ac0adb2..2764763 100644 --- a/vendor/ansible/roles/contiv_storage/defaults/main.yml +++ b/vendor/ansible/roles/contiv_storage/defaults/main.yml @@ -2,7 +2,7 @@ # Role defaults for contiv_storage -contiv_storage_version: "v0.0.0-05-12-2016.08-24-33.UTC" +contiv_storage_version: "v0.0.0-05-12-2016.07-23-53.UTC" contiv_storage_tar_file: "volplugin-{{ contiv_storage_version }}.tar.bz2" contiv_storage_src_file: "https://github.com/contiv/volplugin/releases/download/{{ contiv_storage_version }}/{{ contiv_storage_tar_file }}" contiv_storage_dest_file: "/tmp/{{ contiv_storage_tar_file }}" diff --git a/vendor/ansible/roles/contiv_storage/files/volplugin.service b/vendor/ansible/roles/contiv_storage/files/volplugin.service index 99f0bf0..3047086 100644 --- a/vendor/ansible/roles/contiv_storage/files/volplugin.service +++ b/vendor/ansible/roles/contiv_storage/files/volplugin.service @@ -5,4 +5,5 @@ After=auditd.service systemd-user-sessions.service time-sync.target etcd.service [Service] EnvironmentFile=/etc/default/volplugin ExecStart=/usr/bin/volplugin $VOLPLUGIN_ARGS +ExecStopPost=/usr/bin/rm -f /run/docker/plugins/volplugin.sock KillMode=control-group diff --git a/vendor/ansible/roles/dev/meta/main.yml b/vendor/ansible/roles/dev/meta/main.yml index 568f773..1ffcb3f 100644 --- a/vendor/ansible/roles/dev/meta/main.yml +++ b/vendor/ansible/roles/dev/meta/main.yml @@ -14,8 +14,8 @@ dependencies: - { role: ceph-install, tags: 'prebake-for-dev' } - { role: ansible, tags: 'prebake-for-dev' } -- { role: etcd } - { role: docker } +- { role: etcd } - { role: swarm } - { role: ucp } - { role: contiv_cluster } diff --git a/vendor/ansible/roles/docker/tasks/main.yml b/vendor/ansible/roles/docker/tasks/main.yml index 4564552..337abdc 100644 --- a/vendor/ansible/roles/docker/tasks/main.yml +++ b/vendor/ansible/roles/docker/tasks/main.yml @@ -9,20 +9,12 @@ tags: - prebake-for-dev -- name: install docker (debian) - shell: curl https://get.docker.com | sed 's/docker-engine/--force-yes docker-engine={{ docker_version }}-0~{{ ansible_distribution_release }}/' | bash +- include: ubuntu_install_tasks.yml when: (ansible_os_family == "Debian") and not (docker_installed_version.stdout | match("Docker version {{ docker_version }}, build.*")) tags: - prebake-for-dev -- name: remove docker (redhat) - yum: name=docker-engine state=absent - when: (ansible_os_family == "RedHat") and not (docker_installed_version.stdout | match("Docker version {{ docker_version }}, build.*")) - tags: - - prebake-for-dev - -- name: install docker (redhat) - shell: curl https://get.docker.com | sed 's/docker-engine/docker-engine-{{ docker_version }}/' | bash +- include: redhat_install_tasks.yml when: (ansible_os_family == "RedHat") and not (docker_installed_version.stdout | match("Docker version {{ docker_version }}, build.*")) tags: - prebake-for-dev @@ -41,7 +33,6 @@ shell: > ( iptables -L INPUT | grep "{{ docker_rule_comment }} ({{ item }})" ) || \ iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ docker_rule_comment }} ({{ item }})" - become: true with_items: - "{{ docker_api_port }}" @@ -67,7 +58,7 @@ # tcp socket service requires docker service to be started after it - name: reload systemd configuration - shell: sudo systemctl daemon-reload + shell: systemctl daemon-reload when: "(docker_tcp_socket | changed) or (docker_tcp_socket_state.stdout != 'Active: active')" - name: stop docker @@ -83,7 +74,7 @@ when: "(docker_tcp_socket | changed) or (docker_tcp_socket_state.stdout != 'Active: active')" - name: check docker service state - shell: sudo systemctl status docker | grep 'Active.*active' -o + shell: systemctl status docker | grep 'Active.*active' -o ignore_errors: true register: docker_service_state tags: @@ -97,7 +88,7 @@ # https://github.com/ansible/ansible-modules-core/issues/191 - name: reload docker systemd configuration #service: name=docker state=restarted - shell: sudo systemctl daemon-reload + shell: systemctl daemon-reload when: "(docker_service_state.stderr | match('.*docker.service changed on disk.*')) or (docker_service_state.stdout != 'Active: active')" tags: - prebake-for-dev diff --git a/vendor/ansible/roles/docker/tasks/redhat_install_tasks.yml b/vendor/ansible/roles/docker/tasks/redhat_install_tasks.yml new file mode 100644 index 0000000..1dafbb3 --- /dev/null +++ b/vendor/ansible/roles/docker/tasks/redhat_install_tasks.yml @@ -0,0 +1,22 @@ +--- +# This role contains tasks for installing docker service +# + +- name: add docker's public key for CS-engine (redhat) + rpm_key: + key: "https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e" + state: present + validate_certs: "{{ validate_certs }}" + +- name: add docker CS-engine repos (redhat) + shell: yum-config-manager --add-repo https://packages.docker.com/{{ item }}/yum/repo/main/centos/7 + become: true + with_items: + - "1.10" + - "1.11" + +- name: remove docker (redhat) + yum: name=docker-engine state=absent + +- name: install docker (redhat) + shell: curl https://get.docker.com | sed 's/docker-engine/docker-engine-{{ docker_version }}/' | bash diff --git a/vendor/ansible/roles/docker/tasks/ubuntu_install_tasks.yml b/vendor/ansible/roles/docker/tasks/ubuntu_install_tasks.yml new file mode 100644 index 0000000..4172a29 --- /dev/null +++ b/vendor/ansible/roles/docker/tasks/ubuntu_install_tasks.yml @@ -0,0 +1,20 @@ +--- +# This role contains tasks for installing docker service +# + +- name: add docker's public key for CS-engine (debian) + apt_key: + url: "https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e" + state: present + validate_certs: "{{ validate_certs }}" + +- name: add docker CS-engine repos (debian) + apt_repository: + repo: "deb https://packages.docker.com/{{ item }}/apt/repo ubuntu-{{ ansible_distribution_release }} main" + state: present + with_items: + - "1.10" + - "1.11" + +- name: install docker (debian) + shell: curl https://get.docker.com | sed 's/docker-engine/--force-yes docker-engine={{ docker_version }}-0~{{ ansible_distribution_release }}/' | bash diff --git a/vendor/ansible/roles/etcd/files/etcd.service b/vendor/ansible/roles/etcd/files/etcd.service index baf377c..094bd78 100644 --- a/vendor/ansible/roles/etcd/files/etcd.service +++ b/vendor/ansible/roles/etcd/files/etcd.service @@ -1,9 +1,10 @@ [Unit] Description=Etcd -After=auditd.service systemd-user-sessions.service time-sync.target +After=auditd.service systemd-user-sessions.service time-sync.target docker.service [Service] +Restart=on-failure +RestartSec=10s ExecStart=/usr/bin/etcd.sh start ExecStop=/usr/bin/etcd.sh stop KillMode=control-group -ExecStopPost=/usr/bin/etcd.sh post-stop diff --git a/vendor/ansible/roles/etcd/tasks/main.yml b/vendor/ansible/roles/etcd/tasks/main.yml index 0c86888..16f7061 100644 --- a/vendor/ansible/roles/etcd/tasks/main.yml +++ b/vendor/ansible/roles/etcd/tasks/main.yml @@ -1,7 +1,7 @@ --- # This role contains tasks for configuring and starting etcd service -- name: download etcd {{ etcd_version }} +- name: download etcdctl {{ etcd_version }} get_url: validate_certs: "{{ validate_certs }}" url: https://github.com/coreos/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz @@ -9,13 +9,18 @@ tags: - prebake-for-dev -- name: install etcd +- name: install etcdctl shell: > tar vxzf /tmp/etcd-{{ etcd_version }}-linux-amd64.tar.gz && \ mv etcd-{{ etcd_version }}-linux-amd64/etcd* /usr/bin tags: - prebake-for-dev +- name: install etcd {{ etcd_version }} + shell: docker pull quay.io/coreos/etcd:{{ etcd_version }} + tags: + - prebake-for-dev + - name: setup iptables for etcd shell: > ( iptables -L INPUT | grep "{{ etcd_rule_comment }} ({{ item }})" ) || \ diff --git a/vendor/ansible/roles/etcd/templates/etcd.j2 b/vendor/ansible/roles/etcd/templates/etcd.j2 index 83b2f72..0368882 100644 --- a/vendor/ansible/roles/etcd/templates/etcd.j2 +++ b/vendor/ansible/roles/etcd/templates/etcd.j2 @@ -15,7 +15,7 @@ http://{{ addr }}:{{ etcd_client_port1 }},http://{{ addr }}:{{ etcd_client_port2 {%- macro get_peer_addr() -%} {# we can't use a simple filter as shown, as it needs python 2.8. # So resorting to loop below to get a peer. - {%- set peer_name=groups[etcd_peers_group]|reject("equalto", node_name)|first -%} #} + #{%- set peer_name=groups[etcd_peers_group]|reject("equalto", node_name)|first -%} #} {%- set peers=[] -%} {%- for host in groups[etcd_peers_group] -%} {%- if host != node_name -%} @@ -124,6 +124,15 @@ export ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }} set -x case $1 in start) + # check if docker is running, else fail early. + # this is done instead of adding a 'Requires' dependency for docker in + # unit file to ensure that the etcd service starts as soon as docker starts + # even after a manual restart of docker. + out=$(/usr/bin/docker ps 2>&1 | grep -o "Cannot connect to the Docker daemon") + if [ "${out}" == "Cannot connect to the Docker daemon" ]; then + echo "docker is not running." + exit 1 + fi {% if run_as == "worker" -%} {{ add_proxy() }} {% elif etcd_init_cluster -%} @@ -135,7 +144,20 @@ start) #start etcd echo "==> starting etcd with environment:" `env` - /usr/bin/etcd + /usr/bin/docker run -t --rm --net=host --name etcd \ + -e ETCD_NAME=${ETCD_NAME} \ + -e ETCD_DATA_DIR=${ETCD_DATA_DIR} \ + -e ETCD_INITIAL_CLUSTER_TOKEN=${ETCD_INITIAL_CLUSTER_TOKEN} \ + -e ETCD_LISTEN_CLIENT_URLS=${ETCD_LISTEN_CLIENT_URLS} \ + -e ETCD_ADVERTISE_CLIENT_URLS=${ETCD_ADVERTISE_CLIENT_URLS} \ + -e ETCD_INITIAL_ADVERTISE_PEER_URLS=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \ + -e ETCD_LISTEN_PEER_URLS=${ETCD_LISTEN_PEER_URLS} \ + -e ETCD_HEARTBEAT_INTERVAL=${ETCD_HEARTBEAT_INTERVAL} \ + -e ETCD_ELECTION_TIMEOUT=${ETCD_ELECTION_TIMEOUT} \ + -e ETCD_INITIAL_CLUSTER=${ETCD_INITIAL_CLUSTER} \ + -e ETCD_INITIAL_CLUSTER_STATE=${ETCD_INITIAL_CLUSTER_STATE} \ + -e ETCD_PROXY=${ETCD_PROXY} \ + quay.io/coreos/etcd:{{ etcd_version }} ;; stop) @@ -145,18 +167,15 @@ stop) {% set peer_addr=get_peer_addr() -%} {% if peer_addr == "" -%} echo "==> no peer found or single member cluster at time of commission" - exit 1 {% else -%} {{ remove_member(peer_addr=peer_addr) }} {% endif %} {% else -%} {{ remove_member(peer_addr=etcd_master_addr) }} {% endif -%} - ;; -post-stop) - #XXX: is there a case when we should not cleanup the data dir on stop? - rm -rf $ETCD_DATA_DIR + /usr/bin/docker stop etcd + /usr/bin/docker rm etcd ;; *) diff --git a/vendor/ansible/roles/test/defaults/main.yml b/vendor/ansible/roles/test/defaults/main.yml new file mode 100644 index 0000000..13c2164 --- /dev/null +++ b/vendor/ansible/roles/test/defaults/main.yml @@ -0,0 +1,7 @@ +--- +# role variable for the test environment packages + +vbox_major_version: "5.0.20" +vbox_version: "5.0-{{ vbox_major_version }}_106931" +vagrant_version: "1.8.1" +packer_version: "0.10.0" diff --git a/vendor/ansible/roles/test/tasks/os_agnostic_tasks.yml b/vendor/ansible/roles/test/tasks/os_agnostic_tasks.yml index 9ea05e7..fe64525 100644 --- a/vendor/ansible/roles/test/tasks/os_agnostic_tasks.yml +++ b/vendor/ansible/roles/test/tasks/os_agnostic_tasks.yml @@ -1,18 +1,18 @@ - name: check packer's version shell: packer --version - register: packer_version + register: packer_installed_version ignore_errors: yes - name: download packer get_url: validate_certs: "{{ validate_certs }}" - url: https://releases.hashicorp.com/packer/0.8.6/packer_0.8.6_linux_amd64.zip - dest: /tmp/packer_0.8.6_linux_amd64.zip + url: "https://releases.hashicorp.com/packer/{{ packer_version }}/packer_{{ packer_version }}_linux_amd64.zip" + dest: "/tmp/packer_{{ packer_version }}_linux_amd64.zip" force: no - when: packer_version.stdout != "0.8.6" + when: packer_installed_version.stdout != "{{ packer_version }}" - name: install packer - shell: rm -f packer* && unzip /tmp/packer_0.8.6_linux_amd64.zip + shell: rm -f packer* && unzip /tmp/packer_{{ packer_version }}_linux_amd64.zip args: chdir: /usr/local/bin - when: packer_version.stdout != "0.8.6" + when: packer_installed_version.stdout != "{{ packer_version }}" diff --git a/vendor/ansible/roles/test/tasks/redhat_tasks.yml b/vendor/ansible/roles/test/tasks/redhat_tasks.yml index 48a7a5e..042724f 100644 --- a/vendor/ansible/roles/test/tasks/redhat_tasks.yml +++ b/vendor/ansible/roles/test/tasks/redhat_tasks.yml @@ -1,12 +1,12 @@ - name: download VBox (redhat) get_url: validate_certs: "{{ validate_certs }}" - url: http://download.virtualbox.org/virtualbox/5.0.12/VirtualBox-5.0-5.0.12_104815_el7-1.x86_64.rpm - dest: /tmp/VirtualBox-5.0-5.0.12_104815_el7-1.x86_64.rpm + url: http://download.virtualbox.org/virtualbox/{{ vbox_major_version }}/VirtualBox-{{ vbox_version }}_el7-1.x86_64.rpm + dest: /tmp/VirtualBox-{{ vbox_major_version }}.rpm force: no - name: install VBox (redhat) - yum: name=/tmp/VirtualBox-5.0-5.0.12_104815_el7-1.x86_64.rpm state=present + yum: name=/tmp/VirtualBox-{{ vbox_major_version }}.rpm state=present - name: install VBox dkms and dependencies (redhat) yum: name={{ item }} state=latest @@ -25,9 +25,9 @@ - name: download vagrant (redhat) get_url: validate_certs: "{{ validate_certs }}" - url: https://releases.hashicorp.com/vagrant/1.8.1/vagrant_1.8.1_x86_64.rpm - dest: /tmp/vagrant_1.8.1_x86_64.rpm + url: https://releases.hashicorp.com/vagrant/{{ vagrant_version }}/vagrant_{{ vagrant_version }}_x86_64.rpm + dest: /tmp/vagrant_{{ vagrant_version }}.rpm force: no - name: install vagrant (redhat) - yum: name=/tmp/vagrant_1.8.1_x86_64.rpm state=present + yum: name=/tmp/vagrant_{{ vagrant_version }}.rpm state=present diff --git a/vendor/ansible/roles/test/tasks/ubuntu_tasks.yml b/vendor/ansible/roles/test/tasks/ubuntu_tasks.yml index b26941b..d3e238a 100644 --- a/vendor/ansible/roles/test/tasks/ubuntu_tasks.yml +++ b/vendor/ansible/roles/test/tasks/ubuntu_tasks.yml @@ -1,22 +1,22 @@ - name: download VBox (debian) get_url: validate_certs: "{{ validate_certs }}" - url: http://download.virtualbox.org/virtualbox/5.0.12/virtualbox-5.0_5.0.12-104815~Ubuntu~trusty_amd64.deb - dest: /tmp/virtualbox-5.0_5.0.12-104815~Ubuntu~trusty_amd64.deb + url: http://download.virtualbox.org/virtualbox/{{ vbox_major_version }}/virtualbox-{{ vbox_version }}~{{ ansible_distribution }}~{{ ansible_distribution_release }}_amd64.deb + dest: /tmp/virtualbox-{{ vbox_major_version }}.deb force: no - name: install VBox (debian) - apt: deb=/tmp/virtualbox-5.0_5.0.12-104815~Ubuntu~trusty_amd64.deb state=present + apt: deb=/tmp/virtualbox-{{ vbox_major_version }}.deb state=present - name: install VBox dkms (debian) - apt: name=dkms state=present + apt: name=dkms state=latest - name: download vagrant (debian) get_url: validate_certs: "{{ validate_certs }}" - url: https://releases.hashicorp.com/vagrant/1.8.1/vagrant_1.8.1_x86_64.deb - dest: /tmp/vagrant_1.8.1_x86_64.deb + url: https://releases.hashicorp.com/vagrant/1.8.1/vagrant_{{ vagrant_version }}_x86_64.deb + dest: /tmp/vagrant_{{ vagrant_version }}.deb force: no - name: install vagrant (debian) - apt: deb=/tmp/vagrant_1.8.1_x86_64.deb state=present + apt: deb=/tmp/vagrant_{{ vagrant_version }}.deb state=present diff --git a/vendor/ansible/roles/ucp/tasks/cleanup.yml b/vendor/ansible/roles/ucp/tasks/cleanup.yml index 3ce941e..e579439 100644 --- a/vendor/ansible/roles/ucp/tasks/cleanup.yml +++ b/vendor/ansible/roles/ucp/tasks/cleanup.yml @@ -9,7 +9,6 @@ with_items: - "{{ ucp_fingerprint_file }}" - "{{ ucp_instance_id_file }}" - - "{{ ucp_fifo_file }}" # XXX: temporary fix for issue with ucp 1.1.0 where it doesn't cleanup this file # remove this once it is fixed. Target fix version is 1.1.2 diff --git a/vendor/ansible/roles/ucp/tasks/main.yml b/vendor/ansible/roles/ucp/tasks/main.yml index 7fafb61..aaa8adf 100644 --- a/vendor/ansible/roles/ucp/tasks/main.yml +++ b/vendor/ansible/roles/ucp/tasks/main.yml @@ -50,6 +50,7 @@ wait_for: path: "{{ ucp_remote_dir }}/{{ item }}" state: present + timeout: 600 with_items: - "{{ ucp_fingerprint_file }}" - "{{ ucp_instance_id_file }}" diff --git a/vendor/ansible/roles/ucp/templates/ucp.j2 b/vendor/ansible/roles/ucp/templates/ucp.j2 index f713ce6..7e2bf65 100644 --- a/vendor/ansible/roles/ucp/templates/ucp.j2 +++ b/vendor/ansible/roles/ucp/templates/ucp.j2 @@ -15,10 +15,10 @@ start) {% if ucp_bootstrap_node_name != node_name -%} # if node is running as a replica or worker make sure fingerprint file # exists before procceeding - for true; do + while [ 1 ]; do if [ ! -f "{{ ucp_remote_dir }}/{{ ucp_fingerprint_file }}" ]; then echo `date` ": waiting for fingerprint file..." - sleep 2s + sleep 5s else break fi @@ -26,7 +26,7 @@ start) {% endif %} {% if ucp_bootstrap_node_name == node_name -%} - out=$(/usr/bin/docker run --rm -t --name ucp \ + /usr/bin/docker run --rm -t --name ucp \ -v /var/run/docker.sock:/var/run/docker.sock \ {% if ucp_license_file is defined -%} -v {{ ucp_license_remote }}:/docker_subscription.lic \ @@ -36,13 +36,17 @@ start) {% if ucp_swarm_strategy != "spread" -%} --{{ ucp_swarm_strategy }} \ {% endif -%} - --image-version={{ ucp_version }} --fresh-install | tee /tmp/ucp.log) + --image-version={{ ucp_version }} --fresh-install - cat /tmp/ucp.log - rm /tmp/ucp.log # copy out the instance ID - instanceId=$(echo ${out} | egrep -o 'UCP instance ID: [a-zA-Z0-9:_]*' | \ - awk --field-separator='UCP instance ID: ' '{print $2}') + out=$(/usr/bin/docker run --rm --name ucp \ + -v /var/run/docker.sock:/var/run/docker.sock \ + docker/ucp id) + instanceId=$(echo ${out} | egrep -o '[A-Z0-9:]*') + if [ "${instanceId}" == "" ]; then + echo failed to parse instance-id + exit 1 + fi echo instance-id: ${instanceId} echo ${instanceId} > "{{ ucp_remote_dir }}/{{ ucp_instance_id_file }}" @@ -57,8 +61,12 @@ start) out=$(/usr/bin/docker run --rm --name ucp \ -v /var/run/docker.sock:/var/run/docker.sock \ docker/ucp fingerprint) - fingerprint=$(echo ${out} | egrep -o 'Fingerprint=[a-zA-Z0-9:]*' | \ + fingerprint=$(echo ${out} | egrep -o 'Fingerprint=[A-Z0-9:]*' | \ awk --field-separator='=' '{print $2}') + if [ "${fingerprint}" == "" ]; then + echo failed to parse fingerprint + exit 1 + fi echo fingerprint: ${fingerprint} echo ${fingerprint} > "{{ ucp_remote_dir }}/{{ ucp_fingerprint_file }}" {% else -%} @@ -84,6 +92,9 @@ start) stop) # don't `set -e` as we shouldn't stop on error + #remove the fifo file + rm -f "{{ ucp_remote_dir }}/{{ ucp_fifo_file }}" + #stop the ucp containers and associated volumes docker ps -a | grep 'ucp-' | awk '{print $1}' | xargs docker stop diff --git a/vendor/ansible/site.yml b/vendor/ansible/site.yml index f7cc569..c921a5a 100644 --- a/vendor/ansible/site.yml +++ b/vendor/ansible/site.yml @@ -8,7 +8,7 @@ # - pre-bake some binaries that are otherwise installed as part of contiv # service deployments like collins, sky-dns, ceph etc. - hosts: devtest - sudo: true + become: true environment: '{{ env }}' roles: - { role: base } @@ -16,15 +16,15 @@ - { role: test } - hosts: volplugin-test - sudo: true + become: true environment: '{{ env }}' roles: - { role: base } - { role: nfs } - { role: vagrant } - { role: ucarp } - - { role: etcd, run_as: master } - { role: docker, etcd_client_port1: 2379 } + - { role: etcd, run_as: master } - { role: ceph-mon, mon_group_name: volplugin-test } - { role: ceph-osd, mon_group_name: volplugin-test, osd_group_name: volplugin-test } - { role: scheduler_stack, run_as: master } @@ -34,7 +34,7 @@ # This host group shall provision a host with all required packages needed to make # the node ready to be managed by cluster-manager - hosts: cluster-node - sudo: true + become: true environment: '{{ env }}' roles: - { role: base } @@ -43,7 +43,7 @@ # cluster-control hosts corresponds to the first machine in the cluster that is provisioned # to bootstrap the cluster by starting cluster manager and inventory database (collins) - hosts: cluster-control - sudo: true + become: true environment: '{{ env }}' roles: - { role: base } @@ -54,7 +54,7 @@ # service-master hosts correspond to cluster machines that run the master/controller # logic of the infra services - hosts: service-master - sudo: true + become: true environment: '{{ env }}' roles: - { role: base } @@ -70,7 +70,7 @@ # service-worker hosts correspond to cluster machines that run the worker/driver # logic of the infra services. - hosts: service-worker - sudo: true + become: true environment: '{{ env }}' roles: - { role: base } @@ -83,11 +83,11 @@ # netplugin-node hosts set up netmast/netplugin in a cluster - hosts: netplugin-node - sudo: true + become: true environment: '{{ env }}' roles: - { role: base } - - { role: etcd, run_as: master } - { role: docker, etcd_client_port1: 2379 } + - { role: etcd, run_as: master } - { role: scheduler_stack, run_as: master } - { role: contiv_network, run_as: master }