diff options
author | leonwang <wanghui71@huawei.com> | 2018-03-15 08:25:05 +0000 |
---|---|---|
committer | leonwang <wanghui71@huawei.com> | 2018-03-15 08:39:22 +0000 |
commit | 6bc7e08cc5d80941c80e8d36d3a2b1373f147a05 (patch) | |
tree | 3e236cfc1f4ce35ad8ab09843010d16010da4054 /ci/ansible/roles | |
parent | 6018fcdd41c2074b2c94d8033f1434be028b054b (diff) |
Merge nbp installation into opensds ansible script
In this update, the nbp-ansible is removed from stor4nfv repo and
all code has been merged into ansible repo. Besides, the latest
update reduce a lot of work to download and build opensds source
code. And some installation docs are also updated.
Remove license statement for the moment.
Change-Id: Ib8504d96e2d41e1c3ab7e0c94689111679d56abd
Signed-off-by: leonwang <wanghui71@huawei.com>
Diffstat (limited to 'ci/ansible/roles')
-rw-r--r--[-rwxr-xr-x] | ci/ansible/roles/cleaner/tasks/main.yml | 355 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/roles/common/tasks/main.yml | 242 | ||||
-rw-r--r-- | ci/ansible/roles/nbp-installer/scenarios/csi.yml | 0 | ||||
-rw-r--r-- | ci/ansible/roles/nbp-installer/scenarios/flexvolume.yml | 11 | ||||
-rw-r--r-- | ci/ansible/roles/nbp-installer/tasks/main.yml | 8 | ||||
-rw-r--r-- | ci/ansible/roles/osdsdb/scenarios/container.yml | 20 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/roles/osdsdb/scenarios/etcd.yml | 78 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/roles/osdsdb/tasks/main.yml | 16 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/roles/osdsdock/scenarios/ceph.yml | 151 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/roles/osdsdock/scenarios/cinder.yml | 10 | ||||
-rw-r--r-- | ci/ansible/roles/osdsdock/scenarios/cinder_standalone.yml | 291 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/roles/osdsdock/scenarios/lvm.yml | 47 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/roles/osdsdock/tasks/main.yml | 88 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/roles/osdslet/tasks/main.yml | 52 |
14 files changed, 702 insertions, 667 deletions
diff --git a/ci/ansible/roles/cleaner/tasks/main.yml b/ci/ansible/roles/cleaner/tasks/main.yml index c1c465c..4b3b0c2 100755..100644 --- a/ci/ansible/roles/cleaner/tasks/main.yml +++ b/ci/ansible/roles/cleaner/tasks/main.yml @@ -1,167 +1,188 @@ ---- -- name: remove golang tarball - file: - path: "/opt/{{ golang_tarball }}" - state: absent - force: yes - ignore_errors: yes - -- name: kill etcd daemon service - shell: killall etcd - ignore_errors: yes - when: db_driver == "etcd" and container_enabled == false - -- name: kill etcd containerized service - docker: - image: quay.io/coreos/etcd:latest - state: stopped - when: container_enabled == true - -- name: remove etcd service data - file: - path: "{{ etcd_dir }}" - state: absent - force: yes - ignore_errors: yes - when: db_driver == "etcd" - -- name: remove etcd tarball - file: - path: "/opt/{{ etcd_tarball }}" - state: absent - force: yes - ignore_errors: yes - when: db_driver == "etcd" - -- name: kill osdslet daemon service - shell: killall osdslet - ignore_errors: yes - when: container_enabled == false - -- name: kill osdslet containerized service - docker: - image: opensdsio/opensds-controller:latest - state: stopped - when: container_enabled == true - -- name: kill osdsdock daemon service - shell: killall osdsdock - ignore_errors: yes - when: container_enabled == false - -- name: kill osdsdock containerized service - docker: - image: opensdsio/opensds-dock:latest - state: stopped - when: container_enabled == true - -- name: clean all opensds build files - shell: . /etc/profile; make clean - args: - chdir: "{{ opensds_root_dir }}" - -- name: clean all opensds configuration files - file: - path: "{{ opensds_config_dir }}" - state: absent - force: yes - ignore_errors: yes - -- name: clean all opensds log files - file: - path: "{{ opensds_log_dir }}" - state: absent - force: yes - ignore_errors: yes - -- name: check if it existed before cleaning a volume group - shell: vgdisplay {{ vg_name }} - ignore_errors: yes - register: vg_existed - when: enabled_backend == "lvm" - -- name: remove a volume group if lvm backend specified - shell: vgremove {{ vg_name }} - when: enabled_backend == "lvm" and vg_existed.rc == 0 - -- name: check if it existed before cleaning a physical volume - shell: pvdisplay {{ pv_device }} - ignore_errors: yes - register: pv_existed - when: enabled_backend == "lvm" - -- name: remove a physical volume if lvm backend specified - shell: pvremove {{ pv_device }} - when: enabled_backend == "lvm" and pv_existed.rc == 0 - -- name: stop cinder-standalone service - shell: docker-compose down - become: true - args: - chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box" - when: enabled_backend == "cinder" - -- name: clean the volume group of cinder - shell: - _raw_params: | - - # _clean_lvm_volume_group removes all default LVM volumes - # - # Usage: _clean_lvm_volume_group $vg - function _clean_lvm_volume_group { - local vg=$1 - - # Clean out existing volumes - sudo lvremove -f $vg - } - - # _remove_lvm_volume_group removes the volume group - # - # Usage: _remove_lvm_volume_group $vg - function _remove_lvm_volume_group { - local vg=$1 - - # Remove the volume group - sudo vgremove -f $vg - } - - # _clean_lvm_backing_file() removes the backing file of the - # volume group - # - # Usage: _clean_lvm_backing_file() $backing_file - function _clean_lvm_backing_file { - local backing_file=$1 - - # If the backing physical device is a loop device, it was probably setup by DevStack - if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then - local vg_dev - vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'.img'/ { print $1}') - if [[ -n "$vg_dev" ]]; then - sudo losetup -d $vg_dev - fi - rm -f $backing_file - fi - } - - # clean_lvm_volume_group() cleans up the volume group and removes the - # backing file - # - # Usage: clean_lvm_volume_group $vg - function clean_lvm_volume_group { - local vg=$1 - - _clean_lvm_volume_group $vg - _remove_lvm_volume_group $vg - # if there is no logical volume left, it's safe to attempt a cleanup - # of the backing file - if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then - _clean_lvm_backing_file {{ cinder_data_dir }}/${vg}.img - fi - } - - clean_lvm_volume_group {{cinder_volume_group}} - - args: - executable: /bin/bash - become: true - when: enabled_backend == "cinder" +---
+- name: kill osdslet daemon service
+ shell: killall osdslet
+ ignore_errors: yes
+ when: container_enabled == false
+
+- name: kill osdslet containerized service
+ docker:
+ image: opensdsio/opensds-controller:latest
+ state: stopped
+ when: container_enabled == true
+
+- name: kill osdsdock daemon service
+ shell: killall osdsdock
+ ignore_errors: yes
+ when: container_enabled == false
+
+- name: kill osdsdock containerized service
+ docker:
+ image: opensdsio/opensds-dock:latest
+ state: stopped
+ when: container_enabled == true
+
+- name: kill etcd daemon service
+ shell: killall etcd
+ ignore_errors: yes
+ when: db_driver == "etcd" and container_enabled == false
+
+- name: kill etcd containerized service
+ docker:
+ image: quay.io/coreos/etcd:latest
+ state: stopped
+ when: db_driver == "etcd" and container_enabled == true
+
+- name: remove etcd service data
+ file:
+ path: "{{ etcd_dir }}"
+ state: absent
+ force: yes
+ ignore_errors: yes
+ when: db_driver == "etcd"
+
+- name: remove etcd tarball
+ file:
+ path: "/opt/{{ etcd_tarball }}"
+ state: absent
+ force: yes
+ ignore_errors: yes
+ when: db_driver == "etcd"
+
+- name: clean opensds release files
+ file:
+ path: "{{ opensds_dir }}"
+ state: absent
+ force: yes
+ ignore_errors: yes
+
+- name: clean opensds release tarball file
+ file:
+ path: "{{ opensds_tarball_url }}"
+ state: absent
+ force: yes
+ ignore_errors: yes
+
+- name: clean opensds flexvolume plugins binary file
+ file:
+ path: "{{ flexvolume_plugin_dir }}"
+ state: absent
+ force: yes
+ ignore_errors: yes
+ when: nbp_plugin_type == "flexvolume"
+
+- name: clean nbp release files
+ file:
+ path: "{{ nbp_dir }}"
+ state: absent
+ force: yes
+ ignore_errors: yes
+
+- name: clean nbp release tarball file
+ file:
+ path: "{{ nbp_tarball_url }}"
+ state: absent
+ force: yes
+ ignore_errors: yes
+
+- name: clean all opensds configuration files
+ file:
+ path: "{{ opensds_config_dir }}"
+ state: absent
+ force: yes
+ ignore_errors: yes
+
+- name: clean all opensds log files
+ file:
+ path: "{{ opensds_log_dir }}"
+ state: absent
+ force: yes
+ ignore_errors: yes
+
+- name: check if it existed before cleaning a volume group
+ shell: vgdisplay {{ vg_name }}
+ ignore_errors: yes
+ register: vg_existed
+ when: enabled_backend == "lvm"
+
+- name: remove a volume group if lvm backend specified
+ lvg:
+ vg: "{{ vg_name }}"
+ state: absent
+ when: enabled_backend == "lvm" and vg_existed.rc == 0
+
+- name: remove physical volumes if lvm backend specified
+ shell: pvremove {{ item }}
+ with_items: "{{ pv_devices }}"
+ when: enabled_backend == "lvm"
+
+- name: stop cinder-standalone service
+ shell: docker-compose down
+ become: true
+ args:
+ chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box"
+ when: enabled_backend == "cinder"
+
+- name: clean the volume group of cinder
+ shell:
+ _raw_params: |
+
+ # _clean_lvm_volume_group removes all default LVM volumes
+ #
+ # Usage: _clean_lvm_volume_group $vg
+ function _clean_lvm_volume_group {
+ local vg=$1
+
+ # Clean out existing volumes
+ sudo lvremove -f $vg
+ }
+
+ # _remove_lvm_volume_group removes the volume group
+ #
+ # Usage: _remove_lvm_volume_group $vg
+ function _remove_lvm_volume_group {
+ local vg=$1
+
+ # Remove the volume group
+ sudo vgremove -f $vg
+ }
+
+ # _clean_lvm_backing_file() removes the backing file of the
+ # volume group
+ #
+ # Usage: _clean_lvm_backing_file() $backing_file
+ function _clean_lvm_backing_file {
+ local backing_file=$1
+
+ # If the backing physical device is a loop device, it was probably setup by DevStack
+ if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
+ local vg_dev
+ vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'.img'/ { print $1}')
+ if [[ -n "$vg_dev" ]]; then
+ sudo losetup -d $vg_dev
+ fi
+ rm -f $backing_file
+ fi
+ }
+
+ # clean_lvm_volume_group() cleans up the volume group and removes the
+ # backing file
+ #
+ # Usage: clean_lvm_volume_group $vg
+ function clean_lvm_volume_group {
+ local vg=$1
+
+ _clean_lvm_volume_group $vg
+ _remove_lvm_volume_group $vg
+ # if there is no logical volume left, it's safe to attempt a cleanup
+ # of the backing file
+ if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then
+ _clean_lvm_backing_file {{ cinder_data_dir }}/${vg}.img
+ fi
+ }
+
+ clean_lvm_volume_group {{cinder_volume_group}}
+
+ args:
+ executable: /bin/bash
+ become: true
+ when: enabled_backend == "cinder"
diff --git a/ci/ansible/roles/common/tasks/main.yml b/ci/ansible/roles/common/tasks/main.yml index d6bef82..7ae2234 100755..100644 --- a/ci/ansible/roles/common/tasks/main.yml +++ b/ci/ansible/roles/common/tasks/main.yml @@ -1,121 +1,121 @@ ---- -# If we can't get golang installed before any module is used we will fail -# so just try what we can to get it installed -- name: check for golang - stat: - path: /usr/local/go - ignore_errors: yes - register: systemgolang - -- name: install golang for debian based systems - shell: - cmd: | - set -e - set -x - - wget {{ golang_download_url }} -P /opt/ - tar xvf /opt/{{ golang_tarball }} -C /usr/local/ - cat >> /etc/profile <<GOLANG__CONFIG_DOC - export GOROOT=/usr/local/go - export GOPATH=\$HOME/gopath - export PATH=\$PATH:\$GOROOT/bin:\$GOPATH/bin - GOLANG__CONFIG_DOC - - executable: /bin/bash - ignore_errors: yes - when: - - systemgolang.stat.exists is undefined or systemgolang.stat.exists == false - -- name: Run the equivalent of "apt-get update" as a separate step - apt: - update_cache: yes - -- name: install librados-dev external package - apt: - name: librados-dev - -- name: install librbd-dev external package - apt: - name: librbd-dev - -- pip: - name: docker-py - when: container_enabled == true - -- name: check for opensds source code existed - stat: - path: "{{ opensds_root_dir }}" - ignore_errors: yes - register: opensdsexisted - -- name: download opensds source code - git: - repo: "{{ remote_url }}" - dest: "{{ opensds_root_dir }}" - when: - - opensdsexisted.stat.exists is undefined or opensdsexisted.stat.exists == false - -- name: check for opensds binary file existed - stat: - path: "{{ opensds_build_dir }}" - ignore_errors: yes - register: opensdsbuilt - -- name: build opensds binary file - shell: . /etc/profile; make - args: - chdir: "{{ opensds_root_dir }}" - when: - - opensdsbuilt.stat.exists is undefined or opensdsbuilt.stat.exists == false - -- name: create opensds global config directory if it doesn't exist - file: - path: "{{ opensds_config_dir }}/driver" - state: directory - mode: 0755 - -- name: create opensds log directory if it doesn't exist - file: - path: "{{ opensds_log_dir }}" - state: directory - mode: 0755 - -- name: configure opensds global info - shell: | - cat > opensds.conf <<OPENSDS_GLOABL_CONFIG_DOC - [osdslet] - api_endpoint = {{ controller_endpoint }} - graceful = True - log_file = {{ controller_log_file }} - socket_order = inc - - [osdsdock] - api_endpoint = {{ dock_endpoint }} - log_file = {{ dock_log_file }} - # Specify which backends should be enabled, sample,ceph,cinder,lvm and so on. - enabled_backends = {{ enabled_backend }} - - [lvm] - name = {{ lvm_name }} - description = {{ lvm_description }} - driver_name = {{ lvm_driver_name }} - config_path = {{ lvm_config_path }} - - [ceph] - name = {{ ceph_name }} - description = {{ ceph_description }} - driver_name = {{ ceph_driver_name }} - config_path = {{ ceph_config_path }} - - [cinder] - name = {{ cinder_name }} - description = {{ cinder_description }} - driver_name = {{ cinder_driver_name }} - config_path = {{ cinder_config_path }} - - [database] - endpoint = {{ db_endpoint }} - driver = {{ db_driver }} - args: - chdir: "{{ opensds_config_dir }}" - ignore_errors: yes +---
+- name: run the equivalent of "apt-get update" as a separate step
+ apt:
+ update_cache: yes
+
+- name: install librados-dev and librbd-dev external packages
+ apt:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - librados-dev
+ - librbd-dev
+
+- name: install docker-py package with pip when enabling containerized deployment
+ pip:
+ name: docker-py
+ when: container_enabled == true
+
+- name: check for opensds release files existed
+ stat:
+ path: "{{ opensds_dir }}"
+ ignore_errors: yes
+ register: opensdsreleasesexisted
+
+- name: download opensds release files
+ get_url:
+ url={{ opensds_download_url }}
+ dest={{ opensds_tarball_url }}
+ when:
+ - opensdsreleasesexisted.stat.exists is undefined or opensdsreleasesexisted.stat.exists == false
+
+- name: extract the opensds release tarball
+ unarchive:
+ src={{ opensds_tarball_url }}
+ dest=/opt/
+ when:
+ - opensdsreleasesexisted.stat.exists is undefined or opensdsreleasesexisted.stat.exists == false
+
+- name: check for nbp release files existed
+ stat:
+ path: "{{ nbp_dir }}"
+ ignore_errors: yes
+ register: nbpreleasesexisted
+
+- name: download nbp release files
+ get_url:
+ url={{ nbp_download_url }}
+ dest={{ nbp_tarball_url }}
+ when:
+ - nbpreleasesexisted.stat.exists is undefined or nbpreleasesexisted.stat.exists == false
+
+- name: extract the nbp release tarball
+ unarchive:
+ src={{ nbp_tarball_url }}
+ dest=/opt/
+ when:
+ - nbpreleasesexisted.stat.exists is undefined or nbpreleasesexisted.stat.exists == false
+
+- name: change the mode of all binary files in opensds release
+ file:
+ path: "{{ opensds_dir }}/bin"
+ mode: 0755
+ recurse: yes
+
+- name: change the mode of all binary files in nbp release
+ file:
+ path: "{{ nbp_dir }}/flexvolume"
+ mode: 0755
+ recurse: yes
+
+- name: create opensds global config directory if it doesn't exist
+ file:
+ path: "{{ opensds_config_dir }}/driver"
+ state: directory
+ mode: 0755
+
+- name: create opensds log directory if it doesn't exist
+ file:
+ path: "{{ opensds_log_dir }}"
+ state: directory
+ mode: 0755
+
+- name: configure opensds global info
+ shell: |
+ cat > opensds.conf <<OPENSDS_GLOABL_CONFIG_DOC
+ [osdslet]
+ api_endpoint = {{ controller_endpoint }}
+ graceful = True
+ log_file = {{ controller_log_file }}
+ socket_order = inc
+
+ [osdsdock]
+ api_endpoint = {{ dock_endpoint }}
+ log_file = {{ dock_log_file }}
+ # Specify which backends should be enabled, sample,ceph,cinder,lvm and so on.
+ enabled_backends = {{ enabled_backend }}
+
+ [lvm]
+ name = {{ lvm_name }}
+ description = {{ lvm_description }}
+ driver_name = {{ lvm_driver_name }}
+ config_path = {{ lvm_config_path }}
+
+ [ceph]
+ name = {{ ceph_name }}
+ description = {{ ceph_description }}
+ driver_name = {{ ceph_driver_name }}
+ config_path = {{ ceph_config_path }}
+
+ [cinder]
+ name = {{ cinder_name }}
+ description = {{ cinder_description }}
+ driver_name = {{ cinder_driver_name }}
+ config_path = {{ cinder_config_path }}
+
+ [database]
+ endpoint = {{ db_endpoint }}
+ driver = {{ db_driver }}
+ args:
+ chdir: "{{ opensds_config_dir }}"
+ ignore_errors: yes
diff --git a/ci/ansible/roles/nbp-installer/scenarios/csi.yml b/ci/ansible/roles/nbp-installer/scenarios/csi.yml new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/ci/ansible/roles/nbp-installer/scenarios/csi.yml diff --git a/ci/ansible/roles/nbp-installer/scenarios/flexvolume.yml b/ci/ansible/roles/nbp-installer/scenarios/flexvolume.yml new file mode 100644 index 0000000..0bba93b --- /dev/null +++ b/ci/ansible/roles/nbp-installer/scenarios/flexvolume.yml @@ -0,0 +1,11 @@ +---
+- name: Create flexvolume plugin directory if not existed
+ file:
+ path: "{{ flexvolume_plugin_dir }}"
+ state: directory
+ mode: 0755
+
+- name: Copy opensds flexvolume plugin binary file into flexvolume plugin dir
+ copy:
+ src: "{{ nbp_dir }}/flexvolume/opensds"
+ dest: "{{ flexvolume_plugin_dir }}/opensds"
diff --git a/ci/ansible/roles/nbp-installer/tasks/main.yml b/ci/ansible/roles/nbp-installer/tasks/main.yml new file mode 100644 index 0000000..58057f1 --- /dev/null +++ b/ci/ansible/roles/nbp-installer/tasks/main.yml @@ -0,0 +1,8 @@ +---
+- name: include scenarios/flexvolume.yml
+ include: scenarios/flexvolume.yml
+ when: nbp_plugin_type == "flexvolume"
+
+- name: include scenarios/csi.yml
+ include: scenarios/csi.yml
+ when: nbp_plugin_type == "csi"
diff --git a/ci/ansible/roles/osdsdb/scenarios/container.yml b/ci/ansible/roles/osdsdb/scenarios/container.yml index 8a75ef2..afbd15b 100644 --- a/ci/ansible/roles/osdsdb/scenarios/container.yml +++ b/ci/ansible/roles/osdsdb/scenarios/container.yml @@ -1,10 +1,10 @@ ---- -- name: run etcd containerized service - docker: - name: myetcd - image: quay.io/coreos/etcd:latest - command: /usr/local/bin/etcd --advertise-client-urls http://{{ etcd_host }}:{{ etcd_port }} --listen-client-urls http://{{ etcd_host }}:{{ etcd_port }} -advertise-client-urls http://{{ etcd_host }}:{{ etcd_peer_port }} -listen-peer-urls http://{{ etcd_host }}:{{ etcd_peer_port }} - state: started - net: host - volumes: - - "/usr/share/ca-certificates/:/etc/ssl/certs" +---
+- name: run etcd containerized service
+ docker:
+ name: myetcd
+ image: quay.io/coreos/etcd:latest
+ command: /usr/local/bin/etcd --advertise-client-urls http://{{ etcd_host }}:{{ etcd_port }} --listen-client-urls http://{{ etcd_host }}:{{ etcd_port }} -advertise-client-urls http://{{ etcd_host }}:{{ etcd_peer_port }} -listen-peer-urls http://{{ etcd_host }}:{{ etcd_peer_port }}
+ state: started
+ net: host
+ volumes:
+ - "/usr/share/ca-certificates/:/etc/ssl/certs"
diff --git a/ci/ansible/roles/osdsdb/scenarios/etcd.yml b/ci/ansible/roles/osdsdb/scenarios/etcd.yml index b05f0e7..9c3352b 100755..100644 --- a/ci/ansible/roles/osdsdb/scenarios/etcd.yml +++ b/ci/ansible/roles/osdsdb/scenarios/etcd.yml @@ -1,39 +1,39 @@ ---- -- name: check for etcd existed - stat: - path: "{{ etcd_dir }}/etcd" - ignore_errors: yes - register: etcdexisted - -- name: download etcd - get_url: - url={{ etcd_download_url }} - dest=/opt/{{ etcd_tarball }} - when: - - etcdexisted.stat.exists is undefined or etcdexisted.stat.exists == false - -- name: extract the etcd tarball - unarchive: - src=/opt/{{ etcd_tarball }} - dest=/opt/ - when: - - etcdexisted.stat.exists is undefined or etcdexisted.stat.exists == false - -- name: Check if etcd is running - shell: ps aux | grep etcd | grep -v grep - ignore_errors: true - register: service_etcd_status - -- name: run etcd daemon service - shell: nohup ./etcd --advertise-client-urls http://{{ etcd_host }}:{{ etcd_port }} --listen-client-urls http://{{ etcd_host }}:{{ etcd_port }} -advertise-client-urls http://{{ etcd_host }}:{{ etcd_peer_port }} -listen-peer-urls http://{{ etcd_host }}:{{ etcd_peer_port }} &>>etcd.log & - become: true - args: - chdir: "{{ etcd_dir }}" - when: service_etcd_status.rc != 0 - -- name: check etcd cluster health - shell: ./etcdctl cluster-health - become: true - ignore_errors: true - args: - chdir: "{{ etcd_dir }}" +---
+- name: check for etcd existed
+ stat:
+ path: "{{ etcd_dir }}/etcd"
+ ignore_errors: yes
+ register: etcdexisted
+
+- name: download etcd
+ get_url:
+ url={{ etcd_download_url }}
+ dest=/opt/{{ etcd_tarball }}
+ when:
+ - etcdexisted.stat.exists is undefined or etcdexisted.stat.exists == false
+
+- name: extract the etcd tarball
+ unarchive:
+ src=/opt/{{ etcd_tarball }}
+ dest=/opt/
+ when:
+ - etcdexisted.stat.exists is undefined or etcdexisted.stat.exists == false
+
+- name: Check if etcd is running
+ shell: ps aux | grep etcd | grep -v grep
+ ignore_errors: true
+ register: service_etcd_status
+
+- name: run etcd daemon service
+ shell: nohup ./etcd --advertise-client-urls http://{{ etcd_host }}:{{ etcd_port }} --listen-client-urls http://{{ etcd_host }}:{{ etcd_port }} -advertise-client-urls http://{{ etcd_host }}:{{ etcd_peer_port }} -listen-peer-urls http://{{ etcd_host }}:{{ etcd_peer_port }} &>>etcd.log &
+ become: true
+ args:
+ chdir: "{{ etcd_dir }}"
+ when: service_etcd_status.rc != 0
+
+- name: check etcd cluster health
+ shell: ./etcdctl cluster-health
+ become: true
+ ignore_errors: true
+ args:
+ chdir: "{{ etcd_dir }}"
diff --git a/ci/ansible/roles/osdsdb/tasks/main.yml b/ci/ansible/roles/osdsdb/tasks/main.yml index 03530b4..efbfba9 100755..100644 --- a/ci/ansible/roles/osdsdb/tasks/main.yml +++ b/ci/ansible/roles/osdsdb/tasks/main.yml @@ -1,8 +1,8 @@ ---- -- name: include scenarios/etcd.yml - include: scenarios/etcd.yml - when: db_driver == "etcd" and container_enabled == false - -- name: include scenarios/container.yml - include: scenarios/container.yml - when: db_driver == "etcd" and container_enabled == true +---
+- name: include scenarios/etcd.yml
+ include: scenarios/etcd.yml
+ when: db_driver == "etcd" and container_enabled == false
+
+- name: include scenarios/container.yml
+ include: scenarios/container.yml
+ when: db_driver == "etcd" and container_enabled == true
diff --git a/ci/ansible/roles/osdsdock/scenarios/ceph.yml b/ci/ansible/roles/osdsdock/scenarios/ceph.yml index 2b6196c..b844a29 100755..100644 --- a/ci/ansible/roles/osdsdock/scenarios/ceph.yml +++ b/ci/ansible/roles/osdsdock/scenarios/ceph.yml @@ -1,74 +1,77 @@ ---- -- name: install ceph-common external package when ceph backend enabled - apt: - name: ceph-common - when: enabled_backend == "ceph" - -- name: copy opensds ceph backend file if specify ceph backend - copy: - src: ../../../group_vars/ceph/ceph.yaml - dest: "{{ ceph_config_path }}" - -- name: check for ceph-ansible source code existed - stat: - path: /opt/ceph-ansible - ignore_errors: yes - register: cephansibleexisted - -- name: download ceph-ansible source code - git: - repo: https://github.com/ceph/ceph-ansible.git - dest: /opt/ceph-ansible - when: - - cephansibleexisted.stat.exists is undefined or cephansibleexisted.stat.exists == false - -- name: copy ceph inventory host into ceph-ansible directory - copy: - src: ../../../group_vars/ceph/ceph.hosts - dest: /opt/ceph-ansible/ceph.hosts - -- name: copy ceph all.yml file into ceph-ansible group_vars directory - copy: - src: ../../../group_vars/ceph/all.yml - dest: /opt/ceph-ansible/group_vars/all.yml - -- name: copy ceph osds.yml file into ceph-ansible group_vars directory - copy: - src: ../../../group_vars/ceph/osds.yml - dest: /opt/ceph-ansible/group_vars/osds.yml - -- name: copy site.yml.sample to site.yml in ceph-ansible - copy: - src: /opt/ceph-ansible/site.yml.sample - dest: /opt/ceph-ansible/site.yml - -- name: ping all hosts - shell: ansible all -m ping -i ceph.hosts - become: true - args: - chdir: /opt/ceph-ansible - -- name: run ceph-ansible playbook - shell: ansible-playbook site.yml -i ceph.hosts | tee /var/log/ceph_ansible.log - become: true - args: - chdir: /opt/ceph-ansible - -#- name: Check if ceph osd is running -# shell: ps aux | grep ceph-osd | grep -v grep -# ignore_errors: false -# changed_when: false -# register: service_ceph_osd_status - -- name: Check if ceph mon is running - shell: ps aux | grep ceph-mon | grep -v grep - ignore_errors: false - changed_when: false - register: service_ceph_mon_status - -- name: Create a pool and initialize it. - shell: ceph osd pool create {{ ceph_pool_name }} 100 && ceph osd pool set {{ ceph_pool_name }} size 1 - ignore_errors: yes - changed_when: false - register: ceph_init_pool - when: service_ceph_mon_status.rc == 0 # and service_ceph_osd_status.rc == 0 +---
+- name: install ceph-common external package when ceph backend enabled
+ apt:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - ceph-common
+ when: enabled_backend == "ceph"
+
+- name: copy opensds ceph backend file if specify ceph backend
+ copy:
+ src: ../../../group_vars/ceph/ceph.yaml
+ dest: "{{ ceph_config_path }}"
+
+- name: check for ceph-ansible source code existed
+ stat:
+ path: /opt/ceph-ansible
+ ignore_errors: yes
+ register: cephansibleexisted
+
+- name: download ceph-ansible source code
+ git:
+ repo: https://github.com/ceph/ceph-ansible.git
+ dest: /opt/ceph-ansible
+ when:
+ - cephansibleexisted.stat.exists is undefined or cephansibleexisted.stat.exists == false
+
+- name: copy ceph inventory host into ceph-ansible directory
+ copy:
+ src: ../../../group_vars/ceph/ceph.hosts
+ dest: /opt/ceph-ansible/ceph.hosts
+
+- name: copy ceph all.yml file into ceph-ansible group_vars directory
+ copy:
+ src: ../../../group_vars/ceph/all.yml
+ dest: /opt/ceph-ansible/group_vars/all.yml
+
+- name: copy ceph osds.yml file into ceph-ansible group_vars directory
+ copy:
+ src: ../../../group_vars/ceph/osds.yml
+ dest: /opt/ceph-ansible/group_vars/osds.yml
+
+- name: copy site.yml.sample to site.yml in ceph-ansible
+ copy:
+ src: /opt/ceph-ansible/site.yml.sample
+ dest: /opt/ceph-ansible/site.yml
+
+- name: ping all hosts
+ shell: ansible all -m ping -i ceph.hosts
+ become: true
+ args:
+ chdir: /opt/ceph-ansible
+
+- name: run ceph-ansible playbook
+ shell: ansible-playbook site.yml -i ceph.hosts | tee /var/log/ceph_ansible.log
+ become: true
+ args:
+ chdir: /opt/ceph-ansible
+
+#- name: Check if ceph osd is running
+# shell: ps aux | grep ceph-osd | grep -v grep
+# ignore_errors: false
+# changed_when: false
+# register: service_ceph_osd_status
+
+- name: Check if ceph mon is running
+ shell: ps aux | grep ceph-mon | grep -v grep
+ ignore_errors: false
+ changed_when: false
+ register: service_ceph_mon_status
+
+- name: Create specified pools and initialize them with default pool size.
+ shell: ceph osd pool create {{ item }} 100 && ceph osd pool set {{ item }} size 1
+ ignore_errors: yes
+ changed_when: false
+ with_items: "{{ ceph_pools }}"
+ when: service_ceph_mon_status.rc == 0 # and service_ceph_osd_status.rc == 0
diff --git a/ci/ansible/roles/osdsdock/scenarios/cinder.yml b/ci/ansible/roles/osdsdock/scenarios/cinder.yml index 333c5c0..6136f25 100755..100644 --- a/ci/ansible/roles/osdsdock/scenarios/cinder.yml +++ b/ci/ansible/roles/osdsdock/scenarios/cinder.yml @@ -1,5 +1,5 @@ ---- -- name: copy opensds cinder backend file if specify cinder backend - copy: - src: ../../../group_vars/cinder/cinder.yaml - dest: "{{ cinder_config_path }}" +---
+- name: copy opensds cinder backend file if specify cinder backend
+ copy:
+ src: ../../../group_vars/cinder/cinder.yaml
+ dest: "{{ cinder_config_path }}"
diff --git a/ci/ansible/roles/osdsdock/scenarios/cinder_standalone.yml b/ci/ansible/roles/osdsdock/scenarios/cinder_standalone.yml index 7bf2b97..49f4063 100644 --- a/ci/ansible/roles/osdsdock/scenarios/cinder_standalone.yml +++ b/ci/ansible/roles/osdsdock/scenarios/cinder_standalone.yml @@ -1,146 +1,145 @@ ---- - -- name: install python-pip - apt: - name: python-pip - -- name: install lvm2 - apt: - name: lvm2 - -- name: install thin-provisioning-tools - apt: - name: thin-provisioning-tools - -- name: install docker-compose - pip: - name: docker-compose - -- name: copy opensds cinder backend file if specify cinder backend - copy: - src: ../../../group_vars/cinder/cinder.yaml - dest: "{{ cinder_config_path }}" - -- name: create directory to save source code and volume group file - file: - path: "{{ cinder_data_dir }}" - state: directory - recurse: yes - -- name: create volume group in thin mode - shell: - _raw_params: | - function _create_lvm_volume_group { - local vg=$1 - local size=$2 - - local backing_file={{ cinder_data_dir }}/${vg}.img - if ! sudo vgs $vg; then - # Only create if the file doesn't already exists - [[ -f $backing_file ]] || truncate -s $size $backing_file - local vg_dev - vg_dev=`sudo losetup -f --show $backing_file` - - # Only create volume group if it doesn't already exist - if ! sudo vgs $vg; then - sudo vgcreate $vg $vg_dev - fi - fi - } - modprobe dm_thin_pool - _create_lvm_volume_group {{ cinder_volume_group }} 10G - args: - executable: /bin/bash - become: true - -- name: check for python-cinderclient source code existed - stat: - path: "{{ cinder_data_dir }}/python-cinderclient" - ignore_errors: yes - register: cinderclient_existed - -- name: download python-cinderclient source code - git: - repo: https://github.com/openstack/python-cinderclient.git - dest: "{{ cinder_data_dir }}/python-cinderclient" - when: - - cinderclient_existed.stat.exists is undefined or cinderclient_existed.stat.exists == false - -# Tested successfully in this version `ab0185bfc6e8797a35a2274c2a5ee03afb03dd60` -# git checkout -b ab0185bfc6e8797a35a2274c2a5ee03afb03dd60 -- name: pip install cinderclinet - shell: | - pip install -e . - become: true - args: - chdir: "{{ cinder_data_dir }}/python-cinderclient" - -- name: check for python-brick-cinderclient-ext source code existed - stat: - path: "{{ cinder_data_dir }}/python-brick-cinderclient-ext" - ignore_errors: yes - register: brick_existed - -- name: download python-brick-cinderclient-ext source code - git: - repo: https://github.com/openstack/python-brick-cinderclient-ext.git - dest: "{{ cinder_data_dir }}/python-brick-cinderclient-ext" - when: - - brick_existed.stat.exists is undefined or brick_existed.stat.exists == false - -# Tested successfully in this version `a281e67bf9c12521ea5433f86cec913854826a33` -# git checkout -b a281e67bf9c12521ea5433f86cec913854826a33 -- name: pip install python-brick-cinderclient-ext - shell: | - pip install -e . - become: true - args: - chdir: "{{ cinder_data_dir }}/python-brick-cinderclient-ext" - - -- name: check for cinder source code existed - stat: - path: "{{ cinder_data_dir }}/cinder" - ignore_errors: yes - register: cinder_existed - -- name: download cinder source code - git: - repo: https://github.com/openstack/cinder.git - dest: "{{ cinder_data_dir }}/cinder" - when: - - cinder_existed.stat.exists is undefined or cinder_existed.stat.exists == false - -# Tested successfully in this version `7bbc95344d3961d0bf059252723fa40b33d4b3fe` -# git checkout -b 7bbc95344d3961d0bf059252723fa40b33d4b3fe -- name: update blockbox configuration - shell: | - sed -i "s/PLATFORM ?= debian:stretch/PLATFORM ?= {{ cinder_container_platform }}/g" Makefile - sed -i "s/TAG ?= debian-cinder:latest/TAG ?= {{ cinder_image_tag }}:latest/g" Makefile - - sed -i "s/image: debian-cinder/image: {{ cinder_image_tag }}/g" docker-compose.yml - sed -i "s/image: lvm-debian-cinder/image: lvm-{{ cinder_image_tag }}/g" docker-compose.yml - - sed -i "s/volume_group = cinder-volumes /volume_group = {{ cinder_volume_group }}/g" etc/cinder.conf - become: true - args: - chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box" - -- name: make blockbox - shell: make blockbox - become: true - args: - chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box" - -- name: start cinder-standalone service - shell: docker-compose up -d - become: true - args: - chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box" - -- name: wait for cinder service to start normally - wait_for: - host: 127.0.0.1 - port: 8776 - delay: 2 - timeout: 120 +---
+- name: install python-pip
+ apt:
+ name: python-pip
+
+- name: install lvm2
+ apt:
+ name: lvm2
+
+- name: install thin-provisioning-tools
+ apt:
+ name: thin-provisioning-tools
+
+- name: install docker-compose
+ pip:
+ name: docker-compose
+
+- name: copy opensds cinder backend file if specify cinder backend
+ copy:
+ src: ../../../group_vars/cinder/cinder.yaml
+ dest: "{{ cinder_config_path }}"
+
+- name: create directory to save source code and volume group file
+ file:
+ path: "{{ cinder_data_dir }}"
+ state: directory
+ recurse: yes
+
+- name: create volume group in thin mode
+ shell:
+ _raw_params: |
+ function _create_lvm_volume_group {
+ local vg=$1
+ local size=$2
+
+ local backing_file={{ cinder_data_dir }}/${vg}.img
+ if ! sudo vgs $vg; then
+ # Only create if the file doesn't already exists
+ [[ -f $backing_file ]] || truncate -s $size $backing_file
+ local vg_dev
+ vg_dev=`sudo losetup -f --show $backing_file`
+
+ # Only create volume group if it doesn't already exist
+ if ! sudo vgs $vg; then
+ sudo vgcreate $vg $vg_dev
+ fi
+ fi
+ }
+ modprobe dm_thin_pool
+ _create_lvm_volume_group {{ cinder_volume_group }} 10G
+ args:
+ executable: /bin/bash
+ become: true
+
+- name: check for python-cinderclient source code existed
+ stat:
+ path: "{{ cinder_data_dir }}/python-cinderclient"
+ ignore_errors: yes
+ register: cinderclient_existed
+
+- name: download python-cinderclient source code
+ git:
+ repo: https://github.com/openstack/python-cinderclient.git
+ dest: "{{ cinder_data_dir }}/python-cinderclient"
+ when:
+ - cinderclient_existed.stat.exists is undefined or cinderclient_existed.stat.exists == false
+
+# Tested successfully in this version `ab0185bfc6e8797a35a2274c2a5ee03afb03dd60`
+# git checkout -b ab0185bfc6e8797a35a2274c2a5ee03afb03dd60
+- name: pip install cinderclinet
+ shell: |
+ pip install -e .
+ become: true
+ args:
+ chdir: "{{ cinder_data_dir }}/python-cinderclient"
+
+- name: check for python-brick-cinderclient-ext source code existed
+ stat:
+ path: "{{ cinder_data_dir }}/python-brick-cinderclient-ext"
+ ignore_errors: yes
+ register: brick_existed
+
+- name: download python-brick-cinderclient-ext source code
+ git:
+ repo: https://github.com/openstack/python-brick-cinderclient-ext.git
+ dest: "{{ cinder_data_dir }}/python-brick-cinderclient-ext"
+ when:
+ - brick_existed.stat.exists is undefined or brick_existed.stat.exists == false
+
+# Tested successfully in this version `a281e67bf9c12521ea5433f86cec913854826a33`
+# git checkout -b a281e67bf9c12521ea5433f86cec913854826a33
+- name: pip install python-brick-cinderclient-ext
+ shell: |
+ pip install -e .
+ become: true
+ args:
+ chdir: "{{ cinder_data_dir }}/python-brick-cinderclient-ext"
+
+
+- name: check for cinder source code existed
+ stat:
+ path: "{{ cinder_data_dir }}/cinder"
+ ignore_errors: yes
+ register: cinder_existed
+
+- name: download cinder source code
+ git:
+ repo: https://github.com/openstack/cinder.git
+ dest: "{{ cinder_data_dir }}/cinder"
+ when:
+ - cinder_existed.stat.exists is undefined or cinder_existed.stat.exists == false
+
+# Tested successfully in this version `7bbc95344d3961d0bf059252723fa40b33d4b3fe`
+# git checkout -b 7bbc95344d3961d0bf059252723fa40b33d4b3fe
+- name: update blockbox configuration
+ shell: |
+ sed -i "s/PLATFORM ?= debian:stretch/PLATFORM ?= {{ cinder_container_platform }}/g" Makefile
+ sed -i "s/TAG ?= debian-cinder:latest/TAG ?= {{ cinder_image_tag }}:latest/g" Makefile
+
+ sed -i "s/image: debian-cinder/image: {{ cinder_image_tag }}/g" docker-compose.yml
+ sed -i "s/image: lvm-debian-cinder/image: lvm-{{ cinder_image_tag }}/g" docker-compose.yml
+
+ sed -i "s/volume_group = cinder-volumes /volume_group = {{ cinder_volume_group }}/g" etc/cinder.conf
+ become: true
+ args:
+ chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box"
+
+- name: make blockbox
+ shell: make blockbox
+ become: true
+ args:
+ chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box"
+
+- name: start cinder-standalone service
+ shell: docker-compose up -d
+ become: true
+ args:
+ chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box"
+
+- name: wait for cinder service to start normally
+ wait_for:
+ host: 127.0.0.1
+ port: 8776
+ delay: 2
+ timeout: 120
diff --git a/ci/ansible/roles/osdsdock/scenarios/lvm.yml b/ci/ansible/roles/osdsdock/scenarios/lvm.yml index 5847aa3..743fe3b 100755..100644 --- a/ci/ansible/roles/osdsdock/scenarios/lvm.yml +++ b/ci/ansible/roles/osdsdock/scenarios/lvm.yml @@ -1,27 +1,20 @@ ---- -- name: install lvm2 external package when lvm backend enabled - apt: - name: lvm2 - -- name: copy opensds lvm backend file if specify lvm backend - copy: - src: ../../../group_vars/lvm/lvm.yaml - dest: "{{ lvm_config_path }}" - -- name: check if physical volume existed - shell: pvdisplay {{ pv_device }} - ignore_errors: yes - register: pv_existed - -- name: create a physical volume - shell: pvcreate {{ pv_device }} - when: pv_existed is undefined or pv_existed.rc != 0 - -- name: check if volume group existed - shell: vgdisplay {{ vg_name }} - ignore_errors: yes - register: vg_existed - -- name: create a volume group - shell: vgcreate {{ vg_name }} {{ pv_device }} - when: vg_existed is undefined or vg_existed.rc != 0 +---
+- name: install lvm2 external package when lvm backend enabled
+ apt:
+ name: lvm2
+
+- name: copy opensds lvm backend file if specify lvm backend
+ copy:
+ src: ../../../group_vars/lvm/lvm.yaml
+ dest: "{{ lvm_config_path }}"
+
+- name: check if volume group existed
+ shell: vgdisplay {{ vg_name }}
+ ignore_errors: yes
+ register: vg_existed
+
+- name: create a volume group and initialize it
+ lvg:
+ vg: "{{ vg_name }}"
+ pvs: "{{ pv_devices }}"
+ when: vg_existed is undefined or vg_existed.rc != 0
diff --git a/ci/ansible/roles/osdsdock/tasks/main.yml b/ci/ansible/roles/osdsdock/tasks/main.yml index 68f9fdb..215cf00 100755..100644 --- a/ci/ansible/roles/osdsdock/tasks/main.yml +++ b/ci/ansible/roles/osdsdock/tasks/main.yml @@ -1,44 +1,44 @@ ---- -- name: include scenarios/lvm.yml - include: scenarios/lvm.yml - when: enabled_backend == "lvm" - -- name: include scenarios/ceph.yml - include: scenarios/ceph.yml - when: enabled_backend == "ceph" - -- name: include scenarios/cinder.yml - include: scenarios/cinder.yml - when: enabled_backend == "cinder" and use_cinder_standalone == false - -- name: include scenarios/cinder_standalone.yml - include: scenarios/cinder_standalone.yml - when: enabled_backend == "cinder" and use_cinder_standalone == true - -- name: run osdsdock daemon service - shell: - cmd: | - i=0 - while - i="$((i+1))" - [ "$i" -lt 4 ] - do - nohup bin/osdsdock &>/dev/null & - sleep 5 - ps aux | grep osdsdock | grep -v grep && break - done - args: - chdir: "{{ opensds_build_dir }}/out" - when: container_enabled == false - -- name: run osdsdock containerized service - docker: - name: osdsdock - image: opensdsio/opensds-dock:latest - state: started - net: host - privileged: true - volumes: - - "/etc/opensds/:/etc/opensds" - - "/etc/ceph/:/etc/ceph" - when: container_enabled == true +---
+- name: include scenarios/lvm.yml
+ include: scenarios/lvm.yml
+ when: enabled_backend == "lvm"
+
+- name: include scenarios/ceph.yml
+ include: scenarios/ceph.yml
+ when: enabled_backend == "ceph"
+
+- name: include scenarios/cinder.yml
+ include: scenarios/cinder.yml
+ when: enabled_backend == "cinder" and use_cinder_standalone == false
+
+- name: include scenarios/cinder_standalone.yml
+ include: scenarios/cinder_standalone.yml
+ when: enabled_backend == "cinder" and use_cinder_standalone == true
+
+- name: run osdsdock daemon service
+ shell:
+ cmd: |
+ i=0
+ while
+ i="$((i+1))"
+ [ "$i" -lt 4 ]
+ do
+ nohup bin/osdsdock > osdsdock.out 2> osdsdock.err < /dev/null &
+ sleep 5
+ ps aux | grep osdsdock | grep -v grep && break
+ done
+ args:
+ chdir: "{{ opensds_dir }}"
+ when: container_enabled == false
+
+- name: run osdsdock containerized service
+ docker:
+ name: osdsdock
+ image: opensdsio/opensds-dock:latest
+ state: started
+ net: host
+ privileged: true
+ volumes:
+ - "/etc/opensds/:/etc/opensds"
+ - "/etc/ceph/:/etc/ceph"
+ when: container_enabled == true
diff --git a/ci/ansible/roles/osdslet/tasks/main.yml b/ci/ansible/roles/osdslet/tasks/main.yml index 14ab40e..02b71fc 100755..100644 --- a/ci/ansible/roles/osdslet/tasks/main.yml +++ b/ci/ansible/roles/osdslet/tasks/main.yml @@ -1,26 +1,26 @@ ---- -- name: run osdslet daemon service - shell: - cmd: | - i=0 - while - i="$((i+1))" - [ "$i" -lt 4 ] - do - nohup bin/osdslet > osdslet.out 2> osdslet.err < /dev/null & - sleep 5 - ps aux | grep osdslet | grep -v grep && break - done - args: - chdir: "{{ opensds_build_dir }}/out" - when: container_enabled == false - -- name: run osdslet containerized service - docker: - name: osdslet - image: opensdsio/opensds-controller:latest - state: started - net: host - volumes: - - "/etc/opensds/:/etc/opensds" - when: container_enabled == true +---
+- name: run osdslet daemon service
+ shell:
+ cmd: |
+ i=0
+ while
+ i="$((i+1))"
+ [ "$i" -lt 4 ]
+ do
+ nohup bin/osdslet > osdslet.out 2> osdslet.err < /dev/null &
+ sleep 5
+ ps aux | grep osdslet | grep -v grep && break
+ done
+ args:
+ chdir: "{{ opensds_dir }}"
+ when: container_enabled == false
+
+- name: run osdslet containerized service
+ docker:
+ name: osdslet
+ image: opensdsio/opensds-controller:latest
+ state: started
+ net: host
+ volumes:
+ - "/etc/opensds/:/etc/opensds"
+ when: container_enabled == true
|