summaryrefslogtreecommitdiffstats
path: root/deploy/adapters/ansible
diff options
context:
space:
mode:
Diffstat (limited to 'deploy/adapters/ansible')
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.j21
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.j21
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/templates/glance.j21
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml21
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/files/create_img.sh3
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/files/get_var_size.sh6
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/files/losetup.sh7
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/tasks/loop.yml20
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml40
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/tasks/real.yml10
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mongodb.yml2
-rw-r--r--deploy/adapters/ansible/roles/ha/templates/haproxy.cfg1
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml7
-rw-r--r--deploy/adapters/ansible/roles/setup-network/files/setup_networks/setup_networks.py1
14 files changed, 77 insertions, 44 deletions
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.j2 b/deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.j2
index d3ee52ab..716317da 100644
--- a/deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.j2
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.j2
@@ -2,6 +2,7 @@
verbose = True
rpc_backend = rabbit
rabbit_host = {{ internal_vip.ip }}
+rabbit_userid = {{ RABBIT_USER }}
rabbit_password = {{ RABBIT_PASS }}
[publisher]
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.j2 b/deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.j2
index 09ca5272..c2993885 100644
--- a/deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.j2
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.j2
@@ -1,6 +1,7 @@
[DEFAULT]
rpc_backend = rabbit
rabbit_host = {{ internal_vip.ip }}
+rabbit_userid = {{ RABBIT_USER }}
rabbit_password = {{ RABBIT_PASS }}
auth_strategy = keystone
verbose = True
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance.j2 b/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance.j2
index c34f5d81..616e7e05 100644
--- a/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance.j2
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance.j2
@@ -2,4 +2,5 @@
notification_driver = messagingv2
rpc_backend = rabbit
rabbit_host = {{ internal_vip.ip }}
+rabbit_userid = {{ RABBIT_USER }}
rabbit_password = {{ RABBIT_PASS }}
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml
index 2c194fae..ab010266 100644
--- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml
+++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml
@@ -1,21 +1,36 @@
---
+- name: chown of glance/api.log
+ shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "chown -R glance:glance /var/log/glance"
+ with_items:
+ - "{{ groups['controller'] }}"
+ tags:
+ - ceph_conf_glance
+ ignore_errors: True
- name: modify glance-api.conf for ceph
- shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(default_store\).*/\1 = rbd/' /etc/glance/glance-api.conf && sed -i '/^\[glance_store/a stores = rbd\nrbd_store_pool = images\nrbd_store_user = glance\nrbd_store_ceph_conf = /etc/ceph/ceph.conf\nrbd_store_chunk_size = 8\nshow_image_direct_url=True' /etc/glance/glance-api.conf && service {{ glance_service }} restart"
+ shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(default_store\).*/\1 = rbd/g' /etc/glance/glance-api.conf && sed -i '/^\[glance_store/a stores = rbd\nrbd_store_pool = images\nrbd_store_user = glance\nrbd_store_ceph_conf = /etc/ceph/ceph.conf\nrbd_store_chunk_size = 8\nshow_image_direct_url=True' /etc/glance/glance-api.conf"
+ with_items:
+ - "{{ groups['controller'] }}"
+ tags:
+ - ceph_conf_glance
+
+- name: restart glance
+ shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "rm -f /var/log/glance/api.log && chown -R glance:glance /var/log/glance && service {{ glance_service }} restart"
with_items:
- "{{ groups['controller'] }}"
tags:
- ceph_conf_glance
+ ignore_errors: True
- name: modify cinder.conf for ceph
- shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(volume_driver\).*/\1 = cinder.volume.drivers.rbd.RBDDriver/' /etc/cinder/cinder.conf && sed -i '/^\[DEFAULT/a rbd_pool = volumes\nrbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_flatten_volume_from_snapshot = false\nrbd_max_clone_depth = 5\nrbd_store_chunk_size = 4\nrados_connect_timeout = -1\nglance_api_version = 2\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}' /etc/cinder/cinder.conf && service {{ cinder_service }} restart"
+ shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(volume_driver\).*/\1 = cinder.volume.drivers.rbd.RBDDriver/g' /etc/cinder/cinder.conf && sed -i 's/^\(rbd_secret_uuid\).*/\1 = {{ ceph_uuid.stdout_lines[0] }}/g' /etc/cinder/cinder.conf && sed -i '/^\[DEFAULT/a rbd_pool = volumes\nrbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_flatten_volume_from_snapshot = false\nrbd_max_clone_depth = 5\nrbd_store_chunk_size = 4\nrados_connect_timeout = -1\nglance_api_version = 2\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}' /etc/cinder/cinder.conf && service {{ cinder_service }} restart"
with_items:
- "{{ groups['compute'] }}"
tags:
- ceph_conf_cinder
- name: modify nova.conf for ceph
- shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(images_type\).*/\1 = rbd/' /etc/nova/nova-compute.conf && sed -i '/^\[libvirt/a images_rbd_pool = vms\nimages_rbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}\ndisk_cachemodes=\"network=writeback\"\nlive_migration_flag=\"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"' /etc/nova/nova-compute.conf && service {{ nova_service }} restart"
+ shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(images_type\).*/\1 = rbd/g' /etc/nova/nova-compute.conf && sed -i 's/^\(rbd_secret_uuid\).*/\1 = {{ ceph_uuid.stdout_lines[0] }}/g' /etc/nova/nova-compute.conf && sed -i '/^\[libvirt/a images_rbd_pool = vms\nimages_rbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}\ndisk_cachemodes=\"network=writeback\"\nlive_migration_flag=\"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"' /etc/nova/nova-compute.conf && service {{ nova_service }} restart"
with_items:
- "{{ groups['compute'] }}"
tags:
diff --git a/deploy/adapters/ansible/roles/cinder-volume/files/create_img.sh b/deploy/adapters/ansible/roles/cinder-volume/files/create_img.sh
new file mode 100644
index 00000000..b69db0ed
--- /dev/null
+++ b/deploy/adapters/ansible/roles/cinder-volume/files/create_img.sh
@@ -0,0 +1,3 @@
+if [[ ! -f /var/cinder.img ]]; then
+ dd if=/dev/zero of=/var/cinder.img bs=1 count=1 seek=$1
+fi
diff --git a/deploy/adapters/ansible/roles/cinder-volume/files/get_var_size.sh b/deploy/adapters/ansible/roles/cinder-volume/files/get_var_size.sh
new file mode 100644
index 00000000..9670da12
--- /dev/null
+++ b/deploy/adapters/ansible/roles/cinder-volume/files/get_var_size.sh
@@ -0,0 +1,6 @@
+size=`df /var | awk '$3 ~ /[0-9]+/ { print $4 }'`;
+if [[ $size -gt 2000000000 ]]; then
+ echo -n 2000000000000;
+else
+ echo -n $((size * 1000));
+fi
diff --git a/deploy/adapters/ansible/roles/cinder-volume/files/losetup.sh b/deploy/adapters/ansible/roles/cinder-volume/files/losetup.sh
new file mode 100644
index 00000000..d0e6c776
--- /dev/null
+++ b/deploy/adapters/ansible/roles/cinder-volume/files/losetup.sh
@@ -0,0 +1,7 @@
+loop_dev=`losetup -a |grep "/var/cinder.img"|awk -F':' '{print $1}'`
+if [[ -z $loop_dev ]]; then
+ losetup -f --show /var/cinder.img
+else
+ echo $loop_dev
+fi
+
diff --git a/deploy/adapters/ansible/roles/cinder-volume/tasks/loop.yml b/deploy/adapters/ansible/roles/cinder-volume/tasks/loop.yml
new file mode 100644
index 00000000..b44253c4
--- /dev/null
+++ b/deploy/adapters/ansible/roles/cinder-volume/tasks/loop.yml
@@ -0,0 +1,20 @@
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: get available /var partition size
+ script: get_var_size.sh
+ register: part_size
+
+- name: create cinder file if not exitst
+ script: create_img.sh {{ part_size.stdout }}
+
+- name: do a losetup on /mnt/cinder-volumes
+ script: losetup.sh
+ register: loop_device
+
+- name: debug loop device
+ debug: msg={{ loop_device.stdout }}
+
+- name: create physical and group volumes
+ lvg: vg=cinder-volumes pvs={{ loop_device.stdout }}
+ vg_options=--force
diff --git a/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml b/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
index b64024da..a258a0cf 100644
--- a/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
@@ -13,43 +13,13 @@
stat: path={{ physical_device }}
register: status
-- name: replace physical_device if st is false
- local_action: copy src=loop.yml dest=/tmp/loop.yml
- when: status.stat.exists == False
-
- name: load loop.yml
- include_vars: /tmp/loop.yml
- when: status.stat.exists == False
-
-- name: check if cinder-volumes is mounted
- shell: ls /mnt
- register: cindervolumes
-
-- name: get available partition size
- shell: df / | awk '$3 ~ /[0-9]+/ { print $4 }'
- register: partition_size
-
-- name: if not mounted, mount it
- shell: dd if=/dev/zero of=/mnt/cinder-volumes
- bs=1 count=0 seek={{ partition_size.stdout }}
- when: cindervolumes.stdout != 'cinder-volumes'
-
-- name: get first lo device
- shell: losetup -f
- register: first_lo
- when: cindervolumes.stdout != 'cinder-volumes'
-
-- name: do a losetup on /mnt/cinder-volumes
- shell: losetup {{ first_lo.stdout }} /mnt/cinder-volumes
- when: cindervolumes.stdout != 'cinder-volumes'
-
-- name: destroy GPT lable
- shell: dd if=/dev/urandom of=/dev/sdb bs=4M count=1
- ignore_errors: True
+ include: loop.yml
+ when: status.stat.isblk == False
-- name: create physical and group volumes
- lvg: vg=cinder-volumes pvs={{ physical_device }}
- vg_options=--force
+- name: load real.yml
+ include: real.yml
+ when: status.stat.isblk == True
- name: upload cinder-volume configuration
template: src=cinder.conf dest=/etc/cinder/cinder.conf
diff --git a/deploy/adapters/ansible/roles/cinder-volume/tasks/real.yml b/deploy/adapters/ansible/roles/cinder-volume/tasks/real.yml
new file mode 100644
index 00000000..19ef828b
--- /dev/null
+++ b/deploy/adapters/ansible/roles/cinder-volume/tasks/real.yml
@@ -0,0 +1,10 @@
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: destroy GPT lable
+ shell: dd if=/dev/urandom of={{ physical_device }} bs=4M count=1
+ ignore_errors: True
+
+- name: create physical and group volumes
+ lvg: vg=cinder-volumes pvs={{ physical_device }}
+ vg_options=--force
diff --git a/deploy/adapters/ansible/roles/database/tasks/mongodb.yml b/deploy/adapters/ansible/roles/database/tasks/mongodb.yml
index ca61e905..5ca23a19 100644
--- a/deploy/adapters/ansible/roles/database/tasks/mongodb.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/mongodb.yml
@@ -19,7 +19,7 @@
- name: manually restart mongodb server
service: name=mongodb state=restarted
-- wait_for: port=27017 delay=3 timeout=30 host={{ internal_vip.ip }}
+- wait_for: port=27017 delay=3 timeout=60 host={{ internal_vip.ip }}
- name: create mongodb user
run_once: True
diff --git a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
index 721c9731..c8065f05 100644
--- a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
+++ b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
@@ -39,7 +39,6 @@ listen proxy-glance_api_cluster
bind {{ internal_vip.ip }}:9292
bind {{ public_vip.ip }}:9292
option tcpka
- option httpchk
option tcplog
balance source
{% for host,ip in haproxy_hosts.items() %}
diff --git a/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml
index 7658d90d..5bea0ae2 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml
@@ -9,10 +9,9 @@
- name: Stop the Open vSwitch service and clear existing OVSDB
shell: >
- service openvswitch-switch stop ;
- rm -rf /var/log/openvswitch/* ;
- rm -rf /etc/openvswitch/conf.db ;
- service openvswitch-switch start ;
+ ovs-vsctl del-br br-int ;
+ ovs-vsctl del-br br-tun ;
+ ovs-vsctl del-manager ;
#- name: get image http server
# shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
diff --git a/deploy/adapters/ansible/roles/setup-network/files/setup_networks/setup_networks.py b/deploy/adapters/ansible/roles/setup-network/files/setup_networks/setup_networks.py
index 5e6520af..bf784bb3 100644
--- a/deploy/adapters/ansible/roles/setup-network/files/setup_networks/setup_networks.py
+++ b/deploy/adapters/ansible/roles/setup-network/files/setup_networks/setup_networks.py
@@ -56,6 +56,7 @@ def setup_ips(ip_settings, sys_intf_mappings):
cmd = "ip addr add %s/%s brd %s dev %s;" \
% (intf_info["ip"], intf_info["netmask"], str(network.broadcast),intf_name)
if "gw" in intf_info:
+ cmd += "route del default;"
cmd += "ip route add default via %s dev %s" % (intf_info["gw"], intf_name)
LOG.info("setup_ips: cmd=%s" % cmd)
os.system(cmd)