summaryrefslogtreecommitdiffstats
path: root/ci/ansible/roles/cleaner/tasks/main.yml
diff options
context:
space:
mode:
Diffstat (limited to 'ci/ansible/roles/cleaner/tasks/main.yml')
-rw-r--r--ci/ansible/roles/cleaner/tasks/main.yml211
1 files changed, 53 insertions, 158 deletions
diff --git a/ci/ansible/roles/cleaner/tasks/main.yml b/ci/ansible/roles/cleaner/tasks/main.yml
index fcfb79b..8399b08 100644
--- a/ci/ansible/roles/cleaner/tasks/main.yml
+++ b/ci/ansible/roles/cleaner/tasks/main.yml
@@ -1,68 +1,44 @@
----
+# Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
- name: kill osdslet daemon service
- shell: killall osdslet
- ignore_errors: yes
+ shell: killall osdslet osdsdock
when: container_enabled == false
+ ignore_errors: true
- name: kill osdslet containerized service
- docker:
- image: opensdsio/opensds-controller:latest
+ docker_container:
+ name: osdslet
+ image: "{{ controller_docker_image }}"
state: stopped
when: container_enabled == true
-- name: kill osdsdock daemon service
- shell: killall osdsdock
- ignore_errors: yes
- when: container_enabled == false
-
- name: kill osdsdock containerized service
- docker:
- image: opensdsio/opensds-dock:latest
+ docker_container:
+ name: osdsdock
+ image: "{{ dock_docker_image }}"
state: stopped
when: container_enabled == true
-- name: kill etcd daemon service
- shell: killall etcd
- ignore_errors: yes
- when: db_driver == "etcd" and container_enabled == false
-
-- name: kill etcd containerized service
- docker:
- image: "{{ etcd_docker_image }}"
+- name: stop container where dashboard is located
+ docker_container:
+ name: dashboard
+ image: "{{ dashboard_docker_image }}"
state: stopped
- when: db_driver == "etcd" and container_enabled == true
-
-- name: remove etcd service data
- file:
- path: "{{ etcd_dir }}"
- state: absent
- force: yes
- ignore_errors: yes
- when: db_driver == "etcd"
-
-- name: remove etcd tarball
- file:
- path: "/opt/{{ etcd_tarball }}"
- state: absent
- force: yes
- ignore_errors: yes
- when: db_driver == "etcd"
-
-- name: clean opensds release files
- file:
- path: "{{ opensds_dir }}"
- state: absent
- force: yes
- ignore_errors: yes
+ when: dashboard_installation_type == "container"
-- name: clean opensds release tarball file
- file:
- path: "{{ opensds_tarball_url }}"
- state: absent
- force: yes
- ignore_errors: yes
-
-- name: clean opensds flexvolume plugins binary file
+- name: clean opensds flexvolume plugins binary file if flexvolume specified
file:
path: "{{ flexvolume_plugin_dir }}"
state: absent
@@ -70,119 +46,38 @@
ignore_errors: yes
when: nbp_plugin_type == "flexvolume"
-- name: clean nbp release files
- file:
- path: "{{ nbp_dir }}"
- state: absent
- force: yes
- ignore_errors: yes
-
-- name: clean nbp release tarball file
- file:
- path: "{{ nbp_tarball_url }}"
- state: absent
- force: yes
- ignore_errors: yes
-
-- name: clean all opensds configuration files
- file:
- path: "{{ opensds_config_dir }}"
- state: absent
- force: yes
+- name: clean opensds csi plugin if csi plugin specified
+ shell: |
+ . /etc/profile
+ kubectl delete -f deploy/kubernetes
+ args:
+ chdir: "{{ nbp_work_dir }}/csi"
ignore_errors: yes
+ when: nbp_plugin_type == "csi"
-- name: clean all opensds log files
+- name: clean all configuration and log files in opensds and nbp work directory
file:
- path: "{{ opensds_log_dir }}"
+ path: "{{ item }}"
state: absent
force: yes
+ with_items:
+ - "{{ opensds_work_dir }}"
+ - "{{ nbp_work_dir }}"
+ - "{{ opensds_config_dir }}"
+ - "{{ opensds_log_dir }}"
ignore_errors: yes
-- name: check if it existed before cleaning a volume group
- shell: vgdisplay {{ vg_name }}
- ignore_errors: yes
- register: vg_existed
- when: enabled_backend == "lvm"
-
-- name: remove a volume group if lvm backend specified
- lvg:
- vg: "{{ vg_name }}"
- state: absent
- when: enabled_backend == "lvm" and vg_existed.rc == 0
-
-- name: remove physical volumes if lvm backend specified
- shell: pvremove {{ item }}
- with_items: "{{ pv_devices }}"
- when: enabled_backend == "lvm"
-
-- name: stop cinder-standalone service
- shell: docker-compose down
- become: true
- args:
- chdir: "{{ cinder_data_dir }}/cinder/contrib/block-box"
- when: enabled_backend == "cinder"
-
-- name: clean the volume group of cinder
- shell:
- _raw_params: |
-
- # _clean_lvm_volume_group removes all default LVM volumes
- #
- # Usage: _clean_lvm_volume_group $vg
- function _clean_lvm_volume_group {
- local vg=$1
+- name: include scenarios/auth-keystone.yml when specifies keystone
+ include_tasks: scenarios/auth-keystone.yml
+ when: opensds_auth_strategy == "keystone"
- # Clean out existing volumes
- sudo lvremove -f $vg
- }
+- name: include scenarios/repository.yml if installed from repository
+ include_tasks: scenarios/repository.yml
+ when: install_from == "repository" or dashboard_installation_type == "source_code"
- # _remove_lvm_volume_group removes the volume group
- #
- # Usage: _remove_lvm_volume_group $vg
- function _remove_lvm_volume_group {
- local vg=$1
+- name: include scenarios/release.yml if installed from release
+ include_tasks: scenarios/release.yml
+ when: install_from == "release"
- # Remove the volume group
- sudo vgremove -f $vg
- }
-
- # _clean_lvm_backing_file() removes the backing file of the
- # volume group
- #
- # Usage: _clean_lvm_backing_file() $backing_file
- function _clean_lvm_backing_file {
- local backing_file=$1
-
- # If the backing physical device is a loop device, it was probably setup by DevStack
- if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
- local vg_dev
- vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'.img'/ { print $1}')
- if [[ -n "$vg_dev" ]]; then
- sudo losetup -d $vg_dev
- fi
- rm -f $backing_file
- fi
- }
-
- # clean_lvm_volume_group() cleans up the volume group and removes the
- # backing file
- #
- # Usage: clean_lvm_volume_group $vg
- function clean_lvm_volume_group {
- local vg=$1
-
- _clean_lvm_volume_group $vg
- _remove_lvm_volume_group $vg
- # if there is no logical volume left, it's safe to attempt a cleanup
- # of the backing file
- if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then
- _clean_lvm_backing_file {{ cinder_data_dir }}/${vg}.img
- fi
- }
-
- clean_lvm_volume_group {{cinder_volume_group}}
-
- args:
- executable: /bin/bash
- become: true
- when: enabled_backend == "cinder"
+- name: include scenarios/backend.yml for cleaning up storage backend service
+ include_tasks: scenarios/backend.yml