summaryrefslogtreecommitdiffstats
path: root/ci/ansible/roles
diff options
context:
space:
mode:
authorleonwang <wanghui71@huawei.com>2018-01-10 03:44:46 +0000
committerleonwang <wanghui71@huawei.com>2018-01-10 08:43:38 +0000
commit64df7bc3bc70d49153409436b411fb327691a4d5 (patch)
treec078dda45831938f0268e66f774390b4079309c7 /ci/ansible/roles
parent0786fde30eba926b097617dea9ca4683ac2fa1b7 (diff)
Push zealand version of opensds ansible as base-code of Stor4NFV
As we discussed on last meeting, the installer script of stor4nfv will be based on opensds ansible, so in this patch I download the first release (zealand) of opensds code and push the ansible script into stor4nfv repo so that we don't need to modify opensds code. Please be free to ask if you have any question. Change-Id: I7b50729977b195fa64e8d9a09f415d9f3329d71f Signed-off-by: leonwang <wanghui71@huawei.com>
Diffstat (limited to 'ci/ansible/roles')
-rw-r--r--ci/ansible/roles/cleaner/tasks/main.yml142
-rw-r--r--ci/ansible/roles/common/tasks/main.yml135
-rw-r--r--ci/ansible/roles/osdsdb/scenarios/etcd.yml38
-rw-r--r--ci/ansible/roles/osdsdb/tasks/main.yml4
-rw-r--r--ci/ansible/roles/osdsdock/scenarios/ceph.yml69
-rw-r--r--ci/ansible/roles/osdsdock/scenarios/cinder.yml1
-rw-r--r--ci/ansible/roles/osdsdock/scenarios/cinder_standalone.yml141
-rw-r--r--ci/ansible/roles/osdsdock/scenarios/lvm.yml22
-rw-r--r--ci/ansible/roles/osdsdock/tasks/main.yml31
-rw-r--r--ci/ansible/roles/osdslet/tasks/main.yml15
10 files changed, 598 insertions, 0 deletions
diff --git a/ci/ansible/roles/cleaner/tasks/main.yml b/ci/ansible/roles/cleaner/tasks/main.yml
new file mode 100644
index 0000000..93aeb59
--- /dev/null
+++ b/ci/ansible/roles/cleaner/tasks/main.yml
@@ -0,0 +1,142 @@
+---
+- name: kill etcd daemon service
+ shell: killall etcd
+ ignore_errors: yes
+ when: db_driver == "etcd"
+
+- name: remove etcd service data
+ file:
+ path: "{{ etcd_dir }}"
+ state: absent
+ force: yes
+ ignore_errors: yes
+ when: db_driver == "etcd"
+
+- name: remove etcd tarball
+ file:
+ path: "/tmp/{{ etcd_tarball }}"
+ state: absent
+ force: yes
+ ignore_errors: yes
+ when: db_driver == "etcd"
+
+- name: kill osdslet daemon service
+ shell: killall osdslet
+ ignore_errors: yes
+
+- name: kill osdsdock daemon service
+ shell: killall osdsdock
+ ignore_errors: yes
+
+- name: clean all opensds build files
+ file:
+ path: "{{ opensds_build_dir }}"
+ state: absent
+ force: yes
+ ignore_errors: yes
+
+- name: clean all opensds configuration files
+ file:
+ path: "{{ opensds_config_dir }}"
+ state: absent
+ force: yes
+ ignore_errors: yes
+
+- name: clean all opensds log files
+ file:
+ path: "{{ opensds_log_dir }}"
+ state: absent
+ force: yes
+ ignore_errors: yes
+
+- name: check if it existed before cleaning a volume group
+ shell: vgdisplay {{ vg_name }}
+ ignore_errors: yes
+ register: vg_existed
+ when: enabled_backend == "lvm"
+
+- name: remove a volume group if lvm backend specified
+ shell: vgremove {{ vg_name }}
+ when: enabled_backend == "lvm" and vg_existed.rc == 0
+
+- name: check if it existed before cleaning a physical volume
+ shell: pvdisplay {{ pv_device }}
+ ignore_errors: yes
+ register: pv_existed
+ when: enabled_backend == "lvm"
+
+- name: remove a physical volume if lvm backend specified
+ shell: pvremove {{ pv_device }}
+ when: enabled_backend == "lvm" and pv_existed.rc == 0
+
+- name: stop cinder-standalone service
+ shell: docker-compose down
+ become: true
+ args:
+ chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box"
+ when: enabled_backend == "cinder"
+
+- name: clean the volume group of cinder
+ shell:
+ _raw_params: |
+
+ # _clean_lvm_volume_group removes all default LVM volumes
+ #
+ # Usage: _clean_lvm_volume_group $vg
+ function _clean_lvm_volume_group {
+ local vg=$1
+
+ # Clean out existing volumes
+ sudo lvremove -f $vg
+ }
+
+ # _remove_lvm_volume_group removes the volume group
+ #
+ # Usage: _remove_lvm_volume_group $vg
+ function _remove_lvm_volume_group {
+ local vg=$1
+
+ # Remove the volume group
+ sudo vgremove -f $vg
+ }
+
+ # _clean_lvm_backing_file() removes the backing file of the
+ # volume group
+ #
+ # Usage: _clean_lvm_backing_file() $backing_file
+ function _clean_lvm_backing_file {
+ local backing_file=$1
+
+ # If the backing physical device is a loop device, it was probably setup by DevStack
+ if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
+ local vg_dev
+ vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'.img'/ { print $1}')
+ if [[ -n "$vg_dev" ]]; then
+ sudo losetup -d $vg_dev
+ fi
+ rm -f $backing_file
+ fi
+ }
+
+ # clean_lvm_volume_group() cleans up the volume group and removes the
+ # backing file
+ #
+ # Usage: clean_lvm_volume_group $vg
+ function clean_lvm_volume_group {
+ local vg=$1
+
+ _clean_lvm_volume_group $vg
+ _remove_lvm_volume_group $vg
+ # if there is no logical volume left, it's safe to attempt a cleanup
+ # of the backing file
+ if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then
+ _clean_lvm_backing_file {{ cinder_data_dir }}/${vg}.img
+ fi
+ }
+
+ clean_lvm_volume_group {{cinder_volume_group}}
+
+ args:
+ executable: /bin/bash
+ become: true
+ when: enabled_backend == "cinder"
diff --git a/ci/ansible/roles/common/tasks/main.yml b/ci/ansible/roles/common/tasks/main.yml
new file mode 100644
index 0000000..20f5381
--- /dev/null
+++ b/ci/ansible/roles/common/tasks/main.yml
@@ -0,0 +1,135 @@
+---
+# If we can't get golang installed before any module is used we will fail
+# so just try what we can to get it installed
+- name: check for golang
+ stat:
+ path: /usr/local/go
+ ignore_errors: yes
+ register: systemgolang
+
+- name: install golang for debian based systems
+ shell:
+ cmd: |
+ set -e
+ set -x
+
+ wget https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz
+ tar xvf go1.9.linux-amd64.tar.gz -C /usr/local/
+ cat >> /etc/profile <<GOLANG__CONFIG_DOC
+ export GOROOT=/usr/local/go
+ export GOPATH=\$HOME/gopath
+ export PATH=\$PATH:\$GOROOT/bin:\$GOPATH/bin
+ GOLANG__CONFIG_DOC
+
+ executable: /bin/bash
+ ignore_errors: yes
+ when:
+ - systemgolang.stat.exists is undefined or systemgolang.stat.exists == false
+
+- name: Run the equivalent of "apt-get update" as a separate step
+ apt:
+ update_cache: yes
+
+- name: install librados-dev external package
+ apt:
+ name: librados-dev
+
+- name: install librbd-dev external package
+ apt:
+ name: librbd-dev
+
+- name: check for opensds source code existed
+ stat:
+ path: "{{ opensds_root_dir }}"
+ ignore_errors: yes
+ register: opensdsexisted
+
+- name: download opensds source code
+ git:
+ repo: "{{ remote_url }}"
+ dest: "{{ opensds_root_dir }}"
+ when:
+ - opensdsexisted.stat.exists is undefined or opensdsexisted.stat.exists == false
+
+- name: check for opensds binary file existed
+ stat:
+ path: "{{ opensds_build_dir }}"
+ ignore_errors: yes
+ register: opensdsbuilt
+
+- name: build opensds binary file
+ shell: . /etc/profile; make
+ args:
+ chdir: "{{ opensds_root_dir }}"
+ when:
+ - opensdsbuilt.stat.exists is undefined or opensdsbuilt.stat.exists == false
+
+- name: create opensds global config directory if it doesn't exist
+ file:
+ path: "{{ opensds_config_dir }}/driver"
+ state: directory
+ mode: 0755
+
+- name: create opensds log directory if it doesn't exist
+ file:
+ path: "{{ opensds_log_dir }}"
+ state: directory
+ mode: 0755
+
+- name: configure opensds global info
+ shell: |
+ cat > opensds.conf <<OPENSDS_GLOABL_CONFIG_DOC
+ [osdslet]
+ api_endpoint = {{ controller_endpoint }}
+ graceful = True
+ log_file = {{ controller_log_file }}
+ socket_order = inc
+
+ [osdsdock]
+ api_endpoint = {{ dock_endpoint }}
+ log_file = {{ dock_log_file }}
+ # Specify which backends should be enabled, sample,ceph,cinder,lvm and so on.
+ enabled_backends = {{ enabled_backend }}
+
+ [lvm]
+ name = {{ lvm_name }}
+ description = {{ lvm_description }}
+ driver_name = {{ lvm_driver_name }}
+ config_path = {{ lvm_config_path }}
+
+ [ceph]
+ name = {{ ceph_name }}
+ description = {{ ceph_description }}
+ driver_name = {{ ceph_driver_name }}
+ config_path = {{ ceph_config_path }}
+
+ [cinder]
+ name = {{ cinder_name }}
+ description = {{ cinder_description }}
+ driver_name = {{ cinder_driver_name }}
+ config_path = {{ cinder_config_path }}
+
+ [database]
+ endpoint = {{ db_endpoint }}
+ driver = {{ db_driver }}
+ args:
+ chdir: "{{ opensds_config_dir }}"
+ ignore_errors: yes
+
+- name: copy opensds lvm backend file if specify lvm backend
+ copy:
+ src: ../../../group_vars/lvm/lvm.yaml
+ dest: "{{ lvm_config_path }}"
+ when: enabled_backend == "lvm"
+
+- name: copy opensds ceph backend file if specify ceph backend
+ copy:
+ src: ../../../group_vars/ceph/ceph.yaml
+ dest: "{{ ceph_config_path }}"
+ when: enabled_backend == "ceph"
+
+- name: copy opensds cinder backend file if specify cinder backend
+ copy:
+ src: ../../../group_vars/cinder/cinder.yaml
+ dest: "{{ cinder_config_path }}"
+ when: enabled_backend == "cinder"
diff --git a/ci/ansible/roles/osdsdb/scenarios/etcd.yml b/ci/ansible/roles/osdsdb/scenarios/etcd.yml
new file mode 100644
index 0000000..79dc444
--- /dev/null
+++ b/ci/ansible/roles/osdsdb/scenarios/etcd.yml
@@ -0,0 +1,38 @@
+---
+- name: check for etcd existed
+ stat:
+ path: "{{ etcd_dir }}/etcd"
+ ignore_errors: yes
+ register: etcdexisted
+
+- name: download etcd
+ get_url:
+ url={{ etcd_download_url }}
+ dest=/tmp/{{ etcd_tarball }}
+ when:
+ - etcdexisted.stat.exists is undefined or etcdexisted.stat.exists == false
+
+- name: extract the etcd tarball
+ unarchive:
+ src=/tmp/{{ etcd_tarball }}
+ dest=/tmp/
+ when:
+ - etcdexisted.stat.exists is undefined or etcdexisted.stat.exists == false
+
+- name: Check if etcd is running
+ shell: ps aux | grep etcd | grep -v grep
+ ignore_errors: true
+ register: service_etcd_status
+
+- name: run etcd daemon service
+ shell: nohup ./etcd &>>etcd.log &
+ become: true
+ args:
+ chdir: "{{ etcd_dir }}"
+ when: service_etcd_status.rc != 0
+
+- name: check etcd cluster health
+ shell: ./etcdctl cluster-health
+ become: true
+ args:
+ chdir: "{{ etcd_dir }}"
diff --git a/ci/ansible/roles/osdsdb/tasks/main.yml b/ci/ansible/roles/osdsdb/tasks/main.yml
new file mode 100644
index 0000000..41cbd09
--- /dev/null
+++ b/ci/ansible/roles/osdsdb/tasks/main.yml
@@ -0,0 +1,4 @@
+---
+- name: include scenarios/etcd.yml
+ include: scenarios/etcd.yml
+ when: db_driver == "etcd" \ No newline at end of file
diff --git a/ci/ansible/roles/osdsdock/scenarios/ceph.yml b/ci/ansible/roles/osdsdock/scenarios/ceph.yml
new file mode 100644
index 0000000..2f6348e
--- /dev/null
+++ b/ci/ansible/roles/osdsdock/scenarios/ceph.yml
@@ -0,0 +1,69 @@
+---
+- name: install ceph-common external package when ceph backend enabled
+ apt:
+ name: ceph-common
+ when: enabled_backend == "ceph"
+
+- name: check for ceph-ansible source code existed
+ stat:
+ path: /tmp/ceph-ansible
+ ignore_errors: yes
+ register: cephansibleexisted
+
+- name: download ceph-ansible source code
+ git:
+ repo: https://github.com/ceph/ceph-ansible.git
+ dest: /tmp/ceph-ansible
+ when:
+ - cephansibleexisted.stat.exists is undefined or cephansibleexisted.stat.exists == false
+
+- name: copy ceph inventory host into ceph-ansible directory
+ copy:
+ src: ../../../group_vars/ceph/ceph.hosts
+ dest: /tmp/ceph-ansible/ceph.hosts
+
+- name: copy ceph all.yml file into ceph-ansible group_vars directory
+ copy:
+ src: ../../../group_vars/ceph/all.yml
+ dest: /tmp/ceph-ansible/group_vars/all.yml
+
+- name: copy ceph osds.yml file into ceph-ansible group_vars directory
+ copy:
+ src: ../../../group_vars/ceph/osds.yml
+ dest: /tmp/ceph-ansible/group_vars/osds.yml
+
+- name: copy site.yml.sample to site.yml in ceph-ansible
+ copy:
+ src: /tmp/ceph-ansible/site.yml.sample
+ dest: /tmp/ceph-ansible/site.yml
+
+- name: ping all hosts
+ shell: ansible all -m ping -i ceph.hosts
+ become: true
+ args:
+ chdir: /tmp/ceph-ansible
+
+- name: run ceph-ansible playbook
+ shell: ansible-playbook site.yml -i ceph.hosts
+ become: true
+ args:
+ chdir: /tmp/ceph-ansible
+
+- name: Check if ceph osd is running
+ shell: ps aux | grep ceph-osd | grep -v grep
+ ignore_errors: false
+ changed_when: false
+ register: service_ceph_osd_status
+
+- name: Check if ceph mon is running
+ shell: ps aux | grep ceph-mon | grep -v grep
+ ignore_errors: false
+ changed_when: false
+ register: service_ceph_mon_status
+
+- name: Create a pool and initialize it.
+ shell: ceph osd pool create {{ ceph_pool_name }} 100 && ceph osd pool set {{ ceph_pool_name }} size 1
+ ignore_errors: yes
+ changed_when: false
+ register: ceph_init_pool
+ when: service_ceph_mon_status.rc == 0 and service_ceph_osd_status.rc == 0 \ No newline at end of file
diff --git a/ci/ansible/roles/osdsdock/scenarios/cinder.yml b/ci/ansible/roles/osdsdock/scenarios/cinder.yml
new file mode 100644
index 0000000..7313caa
--- /dev/null
+++ b/ci/ansible/roles/osdsdock/scenarios/cinder.yml
@@ -0,0 +1 @@
+---
diff --git a/ci/ansible/roles/osdsdock/scenarios/cinder_standalone.yml b/ci/ansible/roles/osdsdock/scenarios/cinder_standalone.yml
new file mode 100644
index 0000000..4ad5cea
--- /dev/null
+++ b/ci/ansible/roles/osdsdock/scenarios/cinder_standalone.yml
@@ -0,0 +1,141 @@
+---
+
+- name: install python-pip
+ apt:
+ name: python-pip
+
+- name: install lvm2
+ apt:
+ name: lvm2
+
+- name: install thin-provisioning-tools
+ apt:
+ name: thin-provisioning-tools
+
+- name: install docker-compose
+ pip:
+ name: docker-compose
+
+- name: create directory to save source code and volume group file
+ file:
+ path: "{{ cinder_data_dir }}"
+ state: directory
+ recurse: yes
+
+- name: create volume group in thin mode
+ shell:
+ _raw_params: |
+ function _create_lvm_volume_group {
+ local vg=$1
+ local size=$2
+
+ local backing_file={{ cinder_data_dir }}/${vg}.img
+ if ! sudo vgs $vg; then
+ # Only create if the file doesn't already exists
+ [[ -f $backing_file ]] || truncate -s $size $backing_file
+ local vg_dev
+ vg_dev=`sudo losetup -f --show $backing_file`
+
+ # Only create volume group if it doesn't already exist
+ if ! sudo vgs $vg; then
+ sudo vgcreate $vg $vg_dev
+ fi
+ fi
+ }
+ modprobe dm_thin_pool
+ _create_lvm_volume_group {{ cinder_volume_group }} 10G
+ args:
+ executable: /bin/bash
+ become: true
+
+- name: check for python-cinderclient source code existed
+ stat:
+ path: "{{ cinder_data_dir }}/python-cinderclient"
+ ignore_errors: yes
+ register: cinderclient_existed
+
+- name: download python-cinderclient source code
+ git:
+ repo: https://github.com/openstack/python-cinderclient.git
+ dest: "{{ cinder_data_dir }}/python-cinderclient"
+ when:
+ - cinderclient_existed.stat.exists is undefined or cinderclient_existed.stat.exists == false
+
+# Tested successfully in this version `ab0185bfc6e8797a35a2274c2a5ee03afb03dd60`
+# git checkout -b ab0185bfc6e8797a35a2274c2a5ee03afb03dd60
+- name: pip install cinderclinet
+ shell: |
+ pip install -e .
+ become: true
+ args:
+ chdir: "{{ cinder_data_dir }}/python-cinderclient"
+
+- name: check for python-brick-cinderclient-ext source code existed
+ stat:
+ path: "{{ cinder_data_dir }}/python-brick-cinderclient-ext"
+ ignore_errors: yes
+ register: brick_existed
+
+- name: download python-brick-cinderclient-ext source code
+ git:
+ repo: https://github.com/openstack/python-brick-cinderclient-ext.git
+ dest: "{{ cinder_data_dir }}/python-brick-cinderclient-ext"
+ when:
+ - brick_existed.stat.exists is undefined or brick_existed.stat.exists == false
+
+# Tested successfully in this version `a281e67bf9c12521ea5433f86cec913854826a33`
+# git checkout -b a281e67bf9c12521ea5433f86cec913854826a33
+- name: pip install python-brick-cinderclient-ext
+ shell: |
+ pip install -e .
+ become: true
+ args:
+ chdir: "{{ cinder_data_dir }}/python-brick-cinderclient-ext"
+
+
+- name: check for cinder source code existed
+ stat:
+ path: "{{ cinder_data_dir }}/cinder"
+ ignore_errors: yes
+ register: cinder_existed
+
+- name: download cinder source code
+ git:
+ repo: https://github.com/openstack/cinder.git
+ dest: "{{ cinder_data_dir }}/cinder"
+ when:
+ - cinder_existed.stat.exists is undefined or cinder_existed.stat.exists == false
+
+# Tested successfully in this version `7bbc95344d3961d0bf059252723fa40b33d4b3fe`
+# git checkout -b 7bbc95344d3961d0bf059252723fa40b33d4b3fe
+- name: update blockbox configuration
+ shell: |
+ sed -i "s/PLATFORM ?= debian:stretch/PLATFORM ?= {{ cinder_container_platform }}/g" Makefile
+ sed -i "s/TAG ?= debian-cinder:latest/TAG ?= {{ cinder_image_tag }}:latest/g" Makefile
+
+ sed -i "s/image: debian-cinder/image: {{ cinder_image_tag }}/g" docker-compose.yml
+ sed -i "s/image: lvm-debian-cinder/image: {{ cinder_image_tag }}/g" docker-compose.yml
+
+ sed -i "s/volume_group = cinder-volumes /volume_group = {{ cinder_volume_group }}/g" etc/cinder.conf
+ become: true
+ args:
+ chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box"
+
+- name: make blockbox
+ shell: make blockbox
+ become: true
+ args:
+ chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box"
+
+- name: start cinder-standalone service
+ shell: docker-compose up -d
+ become: true
+ args:
+ chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box"
+
+- name: wait for cinder service to start normally
+ wait_for:
+ host: 127.0.0.1
+ port: 8776
+ delay: 2
+ timeout: 120
diff --git a/ci/ansible/roles/osdsdock/scenarios/lvm.yml b/ci/ansible/roles/osdsdock/scenarios/lvm.yml
new file mode 100644
index 0000000..d1d7b36
--- /dev/null
+++ b/ci/ansible/roles/osdsdock/scenarios/lvm.yml
@@ -0,0 +1,22 @@
+---
+- name: install lvm2 external package when lvm backend enabled
+ apt:
+ name: lvm2
+
+- name: check if physical volume existed
+ shell: pvdisplay {{ pv_device }}
+ ignore_errors: yes
+ register: pv_existed
+
+- name: create a physical volume
+ shell: pvcreate {{ pv_device }}
+ when: pv_existed is undefined or pv_existed.rc != 0
+
+- name: check if volume group existed
+ shell: vgdisplay {{ vg_name }}
+ ignore_errors: yes
+ register: vg_existed
+
+- name: create a volume group
+ shell: vgcreate {{ vg_name }} {{ pv_device }}
+ when: vg_existed is undefined or vg_existed.rc != 0
diff --git a/ci/ansible/roles/osdsdock/tasks/main.yml b/ci/ansible/roles/osdsdock/tasks/main.yml
new file mode 100644
index 0000000..2462905
--- /dev/null
+++ b/ci/ansible/roles/osdsdock/tasks/main.yml
@@ -0,0 +1,31 @@
+---
+- name: include scenarios/lvm.yml
+ include: scenarios/lvm.yml
+ when: enabled_backend == "lvm"
+
+- name: include scenarios/ceph.yml
+ include: scenarios/ceph.yml
+ when: enabled_backend == "ceph"
+
+- name: include scenarios/cinder.yml
+ include: scenarios/cinder.yml
+ when: enabled_backend == "cinder" and use_cinder_standalone == false
+
+- name: include scenarios/cinder_standalone.yml
+ include: scenarios/cinder_standalone.yml
+ when: enabled_backend == "cinder" and use_cinder_standalone == true
+
+- name: run osdsdock daemon service
+ shell:
+ cmd: |
+ i=0
+ while
+ i="$((i+1))"
+ [ "$i" -lt 4 ]
+ do
+ nohup bin/osdsdock &>/dev/null &
+ sleep 5
+ ps aux | grep osdsdock | grep -v grep && break
+ done
+ args:
+ chdir: "{{ opensds_build_dir }}/out"
diff --git a/ci/ansible/roles/osdslet/tasks/main.yml b/ci/ansible/roles/osdslet/tasks/main.yml
new file mode 100644
index 0000000..2c3e0aa
--- /dev/null
+++ b/ci/ansible/roles/osdslet/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+- name: run osdslet daemon service
+ shell:
+ cmd: |
+ i=0
+ while
+ i="$((i+1))"
+ [ "$i" -lt 4 ]
+ do
+ nohup bin/osdslet > osdslet.out 2> osdslet.err < /dev/null &
+ sleep 5
+ ps aux | grep osdslet | grep -v grep && break
+ done
+ args:
+ chdir: "{{ opensds_build_dir }}/out"