summaryrefslogtreecommitdiffstats
path: root/deploy/adapters/ansible
diff options
context:
space:
mode:
Diffstat (limited to 'deploy/adapters/ansible')
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml21
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_pre.yml13
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/files/create_img.sh3
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/files/get_var_size.sh6
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/files/losetup.sh7
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/tasks/loop.yml20
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml40
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/tasks/real.yml10
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mongodb.yml2
-rw-r--r--deploy/adapters/ansible/roles/setup-network/files/setup_networks/setup_networks.py1
10 files changed, 82 insertions, 41 deletions
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml
index 2c194fae..ab010266 100644
--- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml
+++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml
@@ -1,21 +1,36 @@
---
+- name: chown of glance/api.log
+ shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "chown -R glance:glance /var/log/glance"
+ with_items:
+ - "{{ groups['controller'] }}"
+ tags:
+ - ceph_conf_glance
+ ignore_errors: True
- name: modify glance-api.conf for ceph
- shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(default_store\).*/\1 = rbd/' /etc/glance/glance-api.conf && sed -i '/^\[glance_store/a stores = rbd\nrbd_store_pool = images\nrbd_store_user = glance\nrbd_store_ceph_conf = /etc/ceph/ceph.conf\nrbd_store_chunk_size = 8\nshow_image_direct_url=True' /etc/glance/glance-api.conf && service {{ glance_service }} restart"
+ shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(default_store\).*/\1 = rbd/g' /etc/glance/glance-api.conf && sed -i '/^\[glance_store/a stores = rbd\nrbd_store_pool = images\nrbd_store_user = glance\nrbd_store_ceph_conf = /etc/ceph/ceph.conf\nrbd_store_chunk_size = 8\nshow_image_direct_url=True' /etc/glance/glance-api.conf"
+ with_items:
+ - "{{ groups['controller'] }}"
+ tags:
+ - ceph_conf_glance
+
+- name: restart glance
+ shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "rm -f /var/log/glance/api.log && chown -R glance:glance /var/log/glance && service {{ glance_service }} restart"
with_items:
- "{{ groups['controller'] }}"
tags:
- ceph_conf_glance
+ ignore_errors: True
- name: modify cinder.conf for ceph
- shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(volume_driver\).*/\1 = cinder.volume.drivers.rbd.RBDDriver/' /etc/cinder/cinder.conf && sed -i '/^\[DEFAULT/a rbd_pool = volumes\nrbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_flatten_volume_from_snapshot = false\nrbd_max_clone_depth = 5\nrbd_store_chunk_size = 4\nrados_connect_timeout = -1\nglance_api_version = 2\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}' /etc/cinder/cinder.conf && service {{ cinder_service }} restart"
+ shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(volume_driver\).*/\1 = cinder.volume.drivers.rbd.RBDDriver/g' /etc/cinder/cinder.conf && sed -i 's/^\(rbd_secret_uuid\).*/\1 = {{ ceph_uuid.stdout_lines[0] }}/g' /etc/cinder/cinder.conf && sed -i '/^\[DEFAULT/a rbd_pool = volumes\nrbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_flatten_volume_from_snapshot = false\nrbd_max_clone_depth = 5\nrbd_store_chunk_size = 4\nrados_connect_timeout = -1\nglance_api_version = 2\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}' /etc/cinder/cinder.conf && service {{ cinder_service }} restart"
with_items:
- "{{ groups['compute'] }}"
tags:
- ceph_conf_cinder
- name: modify nova.conf for ceph
- shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(images_type\).*/\1 = rbd/' /etc/nova/nova-compute.conf && sed -i '/^\[libvirt/a images_rbd_pool = vms\nimages_rbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}\ndisk_cachemodes=\"network=writeback\"\nlive_migration_flag=\"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"' /etc/nova/nova-compute.conf && service {{ nova_service }} restart"
+ shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(images_type\).*/\1 = rbd/g' /etc/nova/nova-compute.conf && sed -i 's/^\(rbd_secret_uuid\).*/\1 = {{ ceph_uuid.stdout_lines[0] }}/g' /etc/nova/nova-compute.conf && sed -i '/^\[libvirt/a images_rbd_pool = vms\nimages_rbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}\ndisk_cachemodes=\"network=writeback\"\nlive_migration_flag=\"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"' /etc/nova/nova-compute.conf && service {{ nova_service }} restart"
with_items:
- "{{ groups['compute'] }}"
tags:
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_pre.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_pre.yml
index 52e54cbe..d0ee7e2e 100644
--- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_pre.yml
+++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_pre.yml
@@ -1,7 +1,7 @@
---
- name: create pool
- shell: ceph osd pool create {{ item }} 80
+ shell: ceph osd pool create {{ item }} 50
with_items:
- volumes
- images
@@ -48,8 +48,17 @@
tags:
- ceph_copy_secret
+- name: undefine libvirt secret in case of repeatedly execute ceph_deploy
+ shell: ssh -o StrictHostKeyChecking=no -t {{ item }} "virsh secret-list | awk '\$1 ~ /[0-9]+/ {print \$1}' | xargs virsh secret-undefine"
+ with_items:
+ - "{{ groups['compute'] }}"
+ tags:
+ - ceph_copy_secret
+ ignore_errors: True
+
+
- name: create key for libvirt on compute nodes
- shell: ssh -o StrictHostKeyChecking=no -t {{ item }} "virsh secret-define --file secret.xml && virsh secret-set-value --secret {{ ceph_uuid.stdout_lines[0] }} --base64 \$(cat client.cinder.key) && rm client.cinder.key secret.xml"
+ shell: ssh -o StrictHostKeyChecking=no -t {{ item }} "virsh secret-define --file secret.xml && virsh secret-set-value --secret {{ ceph_uuid.stdout_lines[0] }} --base64 $(cat client.cinder.key)"
with_items:
- "{{ groups['compute'] }}"
tags:
diff --git a/deploy/adapters/ansible/roles/cinder-volume/files/create_img.sh b/deploy/adapters/ansible/roles/cinder-volume/files/create_img.sh
new file mode 100644
index 00000000..b69db0ed
--- /dev/null
+++ b/deploy/adapters/ansible/roles/cinder-volume/files/create_img.sh
@@ -0,0 +1,3 @@
+if [[ ! -f /var/cinder.img ]]; then
+ dd if=/dev/zero of=/var/cinder.img bs=1 count=1 seek=$1
+fi
diff --git a/deploy/adapters/ansible/roles/cinder-volume/files/get_var_size.sh b/deploy/adapters/ansible/roles/cinder-volume/files/get_var_size.sh
new file mode 100644
index 00000000..9670da12
--- /dev/null
+++ b/deploy/adapters/ansible/roles/cinder-volume/files/get_var_size.sh
@@ -0,0 +1,6 @@
+size=`df /var | awk '$3 ~ /[0-9]+/ { print $4 }'`;
+if [[ $size -gt 2000000000 ]]; then
+ echo -n 2000000000000;
+else
+ echo -n $((size * 1000));
+fi
diff --git a/deploy/adapters/ansible/roles/cinder-volume/files/losetup.sh b/deploy/adapters/ansible/roles/cinder-volume/files/losetup.sh
new file mode 100644
index 00000000..d0e6c776
--- /dev/null
+++ b/deploy/adapters/ansible/roles/cinder-volume/files/losetup.sh
@@ -0,0 +1,7 @@
+loop_dev=`losetup -a |grep "/var/cinder.img"|awk -F':' '{print $1}'`
+if [[ -z $loop_dev ]]; then
+ losetup -f --show /var/cinder.img
+else
+ echo $loop_dev
+fi
+
diff --git a/deploy/adapters/ansible/roles/cinder-volume/tasks/loop.yml b/deploy/adapters/ansible/roles/cinder-volume/tasks/loop.yml
new file mode 100644
index 00000000..b44253c4
--- /dev/null
+++ b/deploy/adapters/ansible/roles/cinder-volume/tasks/loop.yml
@@ -0,0 +1,20 @@
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: get available /var partition size
+ script: get_var_size.sh
+ register: part_size
+
+- name: create cinder file if not exitst
+ script: create_img.sh {{ part_size.stdout }}
+
+- name: do a losetup on /mnt/cinder-volumes
+ script: losetup.sh
+ register: loop_device
+
+- name: debug loop device
+ debug: msg={{ loop_device.stdout }}
+
+- name: create physical and group volumes
+ lvg: vg=cinder-volumes pvs={{ loop_device.stdout }}
+ vg_options=--force
diff --git a/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml b/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
index b64024da..a258a0cf 100644
--- a/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
@@ -13,43 +13,13 @@
stat: path={{ physical_device }}
register: status
-- name: replace physical_device if st is false
- local_action: copy src=loop.yml dest=/tmp/loop.yml
- when: status.stat.exists == False
-
- name: load loop.yml
- include_vars: /tmp/loop.yml
- when: status.stat.exists == False
-
-- name: check if cinder-volumes is mounted
- shell: ls /mnt
- register: cindervolumes
-
-- name: get available partition size
- shell: df / | awk '$3 ~ /[0-9]+/ { print $4 }'
- register: partition_size
-
-- name: if not mounted, mount it
- shell: dd if=/dev/zero of=/mnt/cinder-volumes
- bs=1 count=0 seek={{ partition_size.stdout }}
- when: cindervolumes.stdout != 'cinder-volumes'
-
-- name: get first lo device
- shell: losetup -f
- register: first_lo
- when: cindervolumes.stdout != 'cinder-volumes'
-
-- name: do a losetup on /mnt/cinder-volumes
- shell: losetup {{ first_lo.stdout }} /mnt/cinder-volumes
- when: cindervolumes.stdout != 'cinder-volumes'
-
-- name: destroy GPT lable
- shell: dd if=/dev/urandom of=/dev/sdb bs=4M count=1
- ignore_errors: True
+ include: loop.yml
+ when: status.stat.isblk == False
-- name: create physical and group volumes
- lvg: vg=cinder-volumes pvs={{ physical_device }}
- vg_options=--force
+- name: load real.yml
+ include: real.yml
+ when: status.stat.isblk == True
- name: upload cinder-volume configuration
template: src=cinder.conf dest=/etc/cinder/cinder.conf
diff --git a/deploy/adapters/ansible/roles/cinder-volume/tasks/real.yml b/deploy/adapters/ansible/roles/cinder-volume/tasks/real.yml
new file mode 100644
index 00000000..19ef828b
--- /dev/null
+++ b/deploy/adapters/ansible/roles/cinder-volume/tasks/real.yml
@@ -0,0 +1,10 @@
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: destroy GPT lable
+ shell: dd if=/dev/urandom of={{ physical_device }} bs=4M count=1
+ ignore_errors: True
+
+- name: create physical and group volumes
+ lvg: vg=cinder-volumes pvs={{ physical_device }}
+ vg_options=--force
diff --git a/deploy/adapters/ansible/roles/database/tasks/mongodb.yml b/deploy/adapters/ansible/roles/database/tasks/mongodb.yml
index ca61e905..5ca23a19 100644
--- a/deploy/adapters/ansible/roles/database/tasks/mongodb.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/mongodb.yml
@@ -19,7 +19,7 @@
- name: manually restart mongodb server
service: name=mongodb state=restarted
-- wait_for: port=27017 delay=3 timeout=30 host={{ internal_vip.ip }}
+- wait_for: port=27017 delay=3 timeout=60 host={{ internal_vip.ip }}
- name: create mongodb user
run_once: True
diff --git a/deploy/adapters/ansible/roles/setup-network/files/setup_networks/setup_networks.py b/deploy/adapters/ansible/roles/setup-network/files/setup_networks/setup_networks.py
index 94c7c652..e58d6c72 100644
--- a/deploy/adapters/ansible/roles/setup-network/files/setup_networks/setup_networks.py
+++ b/deploy/adapters/ansible/roles/setup-network/files/setup_networks/setup_networks.py
@@ -53,6 +53,7 @@ def setup_ips(ip_settings, sys_intf_mappings):
cmd = "ip addr add %s/%s brd %s dev %s;" \
% (intf_info["ip"], intf_info["netmask"], str(network.broadcast),intf_name)
if "gw" in intf_info:
+ cmd += "route del default;"
cmd += "ip route add default via %s dev %s" % (intf_info["gw"], intf_name)
LOG.info("setup_ips: cmd=%s" % cmd)
os.system(cmd)