From e4a8e01da5834049f210186610a291e9c5f6747d Mon Sep 17 00:00:00 2001 From: wutianwei Date: Fri, 27 Oct 2017 11:25:11 +0800 Subject: fix ceph reboot issue When storage nodes reboot or shutdown, the partitions of loop device will lose. We add the command partprobe -s {{loopdevice}} to rc.local, it will reload the partitions, when boot up the storage nodes. Change-Id: I31dfca953aa254fa516421a494318b01cd39675c Signed-off-by: wutianwei --- deploy/adapters/ansible/roles/storage/tasks/ceph.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/deploy/adapters/ansible/roles/storage/tasks/ceph.yml b/deploy/adapters/ansible/roles/storage/tasks/ceph.yml index e024c671..50476c7b 100644 --- a/deploy/adapters/ansible/roles/storage/tasks/ceph.yml +++ b/deploy/adapters/ansible/roles/storage/tasks/ceph.yml @@ -43,3 +43,10 @@ line: "losetup -f /var/{{ item }}.img" insertbefore: "{{ rc_local_insert_before }}" with_items: "{{ ceph_osd_images }}" + +- name: Create ceph partitions at boot time + lineinfile: + dest: "{{ rc_local }}" + line: "partprobe -s {{ item }}" + insertbefore: "{{ rc_local_insert_before }}" + with_items: "{{ ceph_loopback.results | map(attribute='stdout') | list }}" -- cgit 1.2.3-korg