summaryrefslogtreecommitdiffstats
path: root/deploy/adapters/ansible/roles/ceph-deploy
diff options
context:
space:
mode:
authorgrakiss <grakiss.wanglei@huawei.com>2015-10-13 14:38:31 +0800
committergrakiss <grakiss.wanglei@huawei.com>2015-10-13 20:04:12 +0800
commitcb004d80668a6bf63e208e81bf3c4fa86d889dcf (patch)
treee4f7047e1f3c6c7192257a6a521ddb0ceebaa875 /deploy/adapters/ansible/roles/ceph-deploy
parent6cea13a2abbc7292c74de0bdf667d285e746845e (diff)
fix hanging due to nested sudo
JIRA: COMPASS-93 - clear all the sudo through scripts Change-Id: I2d750858658b09de17cc32df5359ef04263ff1f4 Signed-off-by: grakiss <grakiss.wanglei@huawei.com>
Diffstat (limited to 'deploy/adapters/ansible/roles/ceph-deploy')
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh4
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml6
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_pre.yml4
3 files changed, 7 insertions, 7 deletions
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh b/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh
index 13e5fd8a..f2a40663 100644
--- a/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh
+++ b/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh
@@ -33,9 +33,9 @@ fi
losetup -d /dev/loop0
echo "vgcreate"
-vgcreate -y ceph-volumes $(sudo losetup --show -f /ceph/images/ceph-volumes.img)
+vgcreate -y ceph-volumes $(losetup --show -f /ceph/images/ceph-volumes.img)
echo "lvcreate"
-sudo lvcreate -l 100%FREE -nceph0 ceph-volumes
+lvcreate -l 100%FREE -nceph0 ceph-volumes
echo "mkfs"
mkfs.xfs -f /dev/ceph-volumes/ceph0
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml
index b437d4a6..cb95cf07 100644
--- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml
+++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml
@@ -1,21 +1,21 @@
---
- name: modify glance-api.conf for ceph
- shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(default_store\).*/\1 = rbd/' /etc/glance/glance-api.conf && sed -i '/^\[glance_store/a stores = rbd\nrbd_store_pool = images\nrbd_store_user = glance\nrbd_store_ceph_conf = /etc/ceph/ceph.conf\nrbd_store_chunk_size = 8\nshow_image_direct_url=True' /etc/glance/glance-api.conf && sudo glance-control api restart"
+ shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(default_store\).*/\1 = rbd/' /etc/glance/glance-api.conf && sed -i '/^\[glance_store/a stores = rbd\nrbd_store_pool = images\nrbd_store_user = glance\nrbd_store_ceph_conf = /etc/ceph/ceph.conf\nrbd_store_chunk_size = 8\nshow_image_direct_url=True' /etc/glance/glance-api.conf && glance-control api restart"
with_items:
- "{{ groups['controller'] }}"
tags:
- ceph_conf_glance
- name: modify cinder.conf for ceph
- shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(volume_driver\).*/\1 = cinder.volume.drivers.rbd.RBDDriver/' /etc/cinder/cinder.conf && sed -i '/^\[DEFAULT/a rbd_pool = volumes\nrbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_flatten_volume_from_snapshot = false\nrbd_max_clone_depth = 5\nrbd_store_chunk_size = 4\nrados_connect_timeout = -1\nglance_api_version = 2\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}' /etc/cinder/cinder.conf && sudo service {{ cinder_service }} restart"
+ shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(volume_driver\).*/\1 = cinder.volume.drivers.rbd.RBDDriver/' /etc/cinder/cinder.conf && sed -i '/^\[DEFAULT/a rbd_pool = volumes\nrbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_flatten_volume_from_snapshot = false\nrbd_max_clone_depth = 5\nrbd_store_chunk_size = 4\nrados_connect_timeout = -1\nglance_api_version = 2\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}' /etc/cinder/cinder.conf && service {{ cinder_service }} restart"
with_items:
- "{{ groups['compute'] }}"
tags:
- ceph_conf_cinder
- name: modify nova.conf for ceph
- shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(images_type\).*/\1 = rbd/' /etc/nova/nova-compute.conf && sed -i '/^\[libvirt/a images_rbd_pool = vms\nimages_rbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}\ndisk_cachemodes=\"network=writeback\"\nlive_migration_flag=\"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"' /etc/nova/nova-compute.conf && sudo service {{ nova_service }} restart"
+ shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(images_type\).*/\1 = rbd/' /etc/nova/nova-compute.conf && sed -i '/^\[libvirt/a images_rbd_pool = vms\nimages_rbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}\ndisk_cachemodes=\"network=writeback\"\nlive_migration_flag=\"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"' /etc/nova/nova-compute.conf && service {{ nova_service }} restart"
with_items:
- "{{ groups['compute'] }}"
tags:
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_pre.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_pre.yml
index a2ff030b..52e54cbe 100644
--- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_pre.yml
+++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_pre.yml
@@ -12,12 +12,12 @@
shell: ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' && ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
- name: send glance key to controller nodes
- shell: ceph auth get-or-create client.glance | ssh {{ item }} sudo tee /etc/ceph/ceph.client.glance.keyring && ssh {{ item }} sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring
+ shell: ceph auth get-or-create client.glance | ssh {{ item }} tee /etc/ceph/ceph.client.glance.keyring && ssh {{ item }} chown glance:glance /etc/ceph/ceph.client.glance.keyring
with_items:
- "{{ groups['controller'] }}"
- name: send cinder key to compute nodes
- shell: ceph auth get-or-create client.cinder | ssh {{ item }} sudo tee /etc/ceph/ceph.client.cinder.keyring && ssh {{ item }} sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
+ shell: ceph auth get-or-create client.cinder | ssh {{ item }} tee /etc/ceph/ceph.client.cinder.keyring && ssh {{ item }} chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
with_items:
- "{{ groups['compute'] }}"
tags: