summaryrefslogtreecommitdiffstats
path: root/deploy/adapters
diff options
context:
space:
mode:
Diffstat (limited to 'deploy/adapters')
-rw-r--r--deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml36
-rw-r--r--deploy/adapters/ansible/openstack/allinone.yml8
-rw-r--r--deploy/adapters/ansible/openstack/compute.yml2
-rw-r--r--deploy/adapters/ansible/openstack/controller.yml2
-rw-r--r--deploy/adapters/ansible/openstack/multinodes.yml24
-rw-r--r--deploy/adapters/ansible/openstack/network.yml2
-rw-r--r--deploy/adapters/ansible/openstack/single-controller.yml10
-rw-r--r--deploy/adapters/ansible/openstack/storage.yml2
-rw-r--r--deploy/adapters/ansible/roles/aodh/handlers/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/aodh/tasks/aodh_config.yml1
-rwxr-xr-xdeploy/adapters/ansible/roles/boot-recovery/tasks/main.yml1
-rwxr-xr-xdeploy/adapters/ansible/roles/boot-recovery/vars/RedHat.yml1
-rwxr-xr-xdeploy/adapters/ansible/roles/boot-recovery/vars/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_config.yml9
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_install.yml3
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_install.yml3
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-config/tasks/create_config.yml26
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-config/tasks/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml15
-rw-r--r--deploy/adapters/ansible/roles/ceph-mon/tasks/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/ceph-mon/vars/main.yml1
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_conf.yml98
-rw-r--r--deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_post.yml1
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_pre.yml32
-rw-r--r--deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml1
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-openstack/vars/main.yml7
-rw-r--r--deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml27
-rw-r--r--deploy/adapters/ansible/roles/ceph-osd/tasks/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/ceph-purge/tasks/main.yml8
-rw-r--r--deploy/adapters/ansible/roles/cinder-controller/handlers/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_config.yml1
-rw-r--r--deploy/adapters/ansible/roles/cinder-controller/vars/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml5
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/vars/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/common/tasks/main.yml16
-rw-r--r--deploy/adapters/ansible/roles/common/vars/Debian.yml3
-rw-r--r--deploy/adapters/ansible/roles/compute-recovery/tasks/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/compute-recovery/vars/Debian.yml1
-rw-r--r--deploy/adapters/ansible/roles/compute-recovery/vars/RedHat.yml1
-rw-r--r--deploy/adapters/ansible/roles/congress/tasks/congress_config.yml4
-rw-r--r--deploy/adapters/ansible/roles/congress/tasks/congress_db.yml3
-rw-r--r--deploy/adapters/ansible/roles/congress/tasks/congress_install.yml4
-rw-r--r--deploy/adapters/ansible/roles/congress/vars/Debian.yml2
-rw-r--r--deploy/adapters/ansible/roles/controller-recovery/tasks/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/controller-recovery/vars/Debian.yml1
-rw-r--r--deploy/adapters/ansible/roles/controller-recovery/vars/RedHat.yml1
-rw-r--r--deploy/adapters/ansible/roles/controller-recovery/vars/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/dashboard/tasks/main.yml8
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml24
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_redhat.yml20
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mariadb_config.yml11
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml11
-rwxr-xr-xdeploy/adapters/ansible/roles/database/tasks/mongodb_config.yml4
-rwxr-xr-xdeploy/adapters/ansible/roles/database/tasks/mongodb_install.yml3
-rw-r--r--deploy/adapters/ansible/roles/database/vars/Debian.yml30
-rw-r--r--deploy/adapters/ansible/roles/ext-network/handlers/main.yml4
-rw-r--r--deploy/adapters/ansible/roles/ext-network/tasks/main.yml11
-rw-r--r--deploy/adapters/ansible/roles/ext-network/vars/Debian.yml1
-rw-r--r--deploy/adapters/ansible/roles/ext-network/vars/RedHat.yml1
-rw-r--r--deploy/adapters/ansible/roles/glance/tasks/glance_config.yml3
-rw-r--r--deploy/adapters/ansible/roles/glance/tasks/nfs.yml24
-rw-r--r--deploy/adapters/ansible/roles/heat/handlers/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/heat/tasks/heat_config.yml1
-rw-r--r--deploy/adapters/ansible/roles/heat/tasks/heat_install.yml9
-rw-r--r--deploy/adapters/ansible/roles/heat/vars/Debian.yml1
-rw-r--r--deploy/adapters/ansible/roles/heat/vars/RedHat.yml2
-rw-r--r--deploy/adapters/ansible/roles/heat/vars/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml23
-rw-r--r--deploy/adapters/ansible/roles/keystone/tasks/keystone_create.yml22
-rw-r--r--deploy/adapters/ansible/roles/keystone/vars/main.yml20
-rw-r--r--deploy/adapters/ansible/roles/kvmfornfv/defaults/Debian.yml (renamed from deploy/adapters/ansible/roles/kvmfornfv/defaults/Debian.yaml)1
-rw-r--r--deploy/adapters/ansible/roles/kvmfornfv/defaults/RedHat.yml (renamed from deploy/adapters/ansible/roles/kvmfornfv/defaults/RedHat.yaml)2
-rw-r--r--deploy/adapters/ansible/roles/kvmfornfv/defaults/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/kvmfornfv/tasks/main.yml5
-rw-r--r--deploy/adapters/ansible/roles/memcached/vars/Debian.yml2
-rw-r--r--deploy/adapters/ansible/roles/monitor/tasks/main.yml6
-rw-r--r--deploy/adapters/ansible/roles/monitor/vars/Debian.yml1
-rw-r--r--deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml4
-rw-r--r--deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml211
-rw-r--r--deploy/adapters/ansible/roles/moon/vars/main.yml39
-rw-r--r--deploy/adapters/ansible/roles/mq/tasks/rabbitmq_cluster.yml3
-rwxr-xr-xdeploy/adapters/ansible/roles/mq/tasks/rabbitmq_install.yml2
-rw-r--r--deploy/adapters/ansible/roles/mq/vars/Debian.yml1
-rw-r--r--deploy/adapters/ansible/roles/mq/vars/RedHat.yml1
-rw-r--r--deploy/adapters/ansible/roles/neutron-common/handlers/main.yml3
-rw-r--r--deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml20
-rw-r--r--deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml6
-rw-r--r--deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml10
-rw-r--r--deploy/adapters/ansible/roles/neutron-controller/vars/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/handlers/main.yml4
-rwxr-xr-xdeploy/adapters/ansible/roles/neutron-network/tasks/firewall.yml3
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/tasks/main.yml14
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/tasks/odl.yml8
-rwxr-xr-xdeploy/adapters/ansible/roles/neutron-network/tasks/vpn.yml10
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/vars/RedHat.yml1
-rw-r--r--deploy/adapters/ansible/roles/nova-compute/tasks/main.yml2
-rw-r--r--deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml2
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/handlers/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_00_download_packages.yml25
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_02_unarchive_odl_and_jdk.yml4
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml2
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml2
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_07_start_check_odl.yml4
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_08_configure_neutron.yml29
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/03_00_switch_off_neutron_openvswitch_agent.yml6
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml19
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/03_03_00_recover_external_network_l3.yml13
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/03_03_01_recover_external_network_l2.yml5
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/03_03_recover_external_network.yml2
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/03_04_setup_ml2.yml9
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/04_odl_l3_nova.yml2
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml16
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/moon-odl.yml18
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml4
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/vars/RedHat.yml4
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/vars/main.yml33
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/tasks/main.yml10
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml70
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml39
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/vars/main.yml26
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/files/recover_network_opencontrail.py36
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/files/setup_networks_opencontrail.py121
-rw-r--r--deploy/adapters/ansible/roles/open-contrail/tasks/ext-net.yml47
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-collector.yml24
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-common.yml104
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-compute.yml55
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-config.yml51
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-control.yml32
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-database.yml25
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-interface.yml34
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-kernel.yml60
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-webui.yml26
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/main.yml151
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/-node-common.yml28
-rw-r--r--deploy/adapters/ansible/roles/open-contrail/tasks/provision/-rabbitmq-stop.yml30
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/-redis-setup.yml34
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/-vrouter-compute-setup.yml115
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml91
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-collector.yml106
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-compute.yml269
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-config.yml350
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-control.yml69
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-database.yml209
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-increase-limits.yml60
-rw-r--r--deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-rabbitmq.yml87
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-route.yml50
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-toragent.yml85
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-tsn.yml104
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-webui.yml75
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml46
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/install/override.j21
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-analytics-api-conf.j231
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-api-conf.j229
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-api-supervisord-conf.j212
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-collector-conf.j286
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-control-conf.j215
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-device-manager-conf.j216
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-discovery-conf.j243
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-discovery-supervisord-conf.j212
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-dns-conf.j215
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-keystone-auth-conf.j29
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-query-engine-conf.j218
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-schema-conf.j222
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-sudoers.j25
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-svc-monitor-conf.j231
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-tor-agent-conf.j2111
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-tor-agent-ini.j212
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-vnc-api-lib-ini.j211
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-vrouter-agent-conf.j2177
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/default-pmac.j21
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/haproxy-contrail-cfg.j278
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-authorization-properties.j22
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-basicauthusers-properties.j230
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-log4j-properties.j226
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-publisher-properties.j216
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/keepalived-conf.j229
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/neutron-contrail-plugin-ini.j215
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/nova.j258
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/qemu-device-acl-conf.j26
-rw-r--r--deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-conf-single.j26
-rw-r--r--deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-conf.j225
-rw-r--r--deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-cookie.j21
-rw-r--r--deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-env-conf.j22
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/vrouter-nodemgr-param.j21
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/zookeeper-unique-id.j21
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/vrouter-functions.sh223
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/vars/Debian.yml48
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/vars/RedHat.yml9
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/vars/main.yml89
-rw-r--r--deploy/adapters/ansible/roles/secgroup/handlers/main.yml4
-rw-r--r--deploy/adapters/ansible/roles/secgroup/tasks/secgroup.yml9
-rw-r--r--deploy/adapters/ansible/roles/setup-network/tasks/main.yml8
-rwxr-xr-xdeploy/adapters/ansible/roles/storage/files/loop.yml1
-rwxr-xr-xdeploy/adapters/ansible/roles/storage/tasks/main.yml5
-rwxr-xr-xdeploy/adapters/ansible/roles/storage/tasks/real.yml2
-rw-r--r--deploy/adapters/ansible/roles/swift/tasks/swift-compute1.yml22
-rw-r--r--deploy/adapters/ansible/roles/swift/tasks/swift-controller1.yml2
-rw-r--r--deploy/adapters/ansible/roles/swift/tasks/swift-controller2.yml39
-rw-r--r--deploy/adapters/ansible/roles/swift/tasks/swift.yml6
-rwxr-xr-xdeploy/adapters/ansible/roles/tacker/tasks/main.yml3
-rwxr-xr-xdeploy/adapters/ansible/roles/tacker/tasks/tacker_controller.yml120
-rwxr-xr-xdeploy/adapters/ansible/roles/tacker/vars/main.yml1
202 files changed, 913 insertions, 4519 deletions
diff --git a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
index 4e85568c..ef833224 100644
--- a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
+++ b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
@@ -27,14 +27,19 @@
group: root
- name: generate ssh keys
- shell: if [ ! -f ~/.ssh/id_rsa.pub ]; then ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ""; else echo "already gen ssh key!"; fi;
+ shell: if [ ! -f ~/.ssh/id_rsa.pub ]; \
+ then ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ""; \
+ else echo "already gen ssh key!"; fi;
- name: fetch ssh keys
- fetch: src=/root/.ssh/id_rsa.pub dest=/tmp/ssh-keys-{{ ansible_hostname }} flat=yes
+ fetch:
+ src: /root/.ssh/id_rsa.pub
+ dest: /tmp/ssh-keys-{{ ansible_hostname }}
+ flat: "yes"
- authorized_key:
user: root
- key: "{{ lookup('file', item) }}"
+ key: "{{ lookup('file', item) }}"
with_fileglob:
- /tmp/ssh-keys-*
max_fail_percentage: 0
@@ -88,10 +93,10 @@
- cinder-volume
- ceilometer_compute
-#- hosts: all
-# remote_user: root
-## max_fail_percentage: 0
-# roles:
+- hosts: all
+ remote_user: root
+ max_fail_percentage: 0
+ roles: []
# - moon
- hosts: all
@@ -104,7 +109,7 @@
remote_user: root
max_fail_percentage: 0
roles: []
- # - ceph-deploy
+# - ceph-deploy
- hosts: ceph
remote_user: root
@@ -144,8 +149,8 @@
tasks:
- name: set bash to nova
user:
- name: nova
- shell: /bin/bash
+ name: nova
+ shell: /bin/bash
- name: make sure ssh dir exist
file:
@@ -176,14 +181,13 @@
- authorized_key:
user: nova
- key: "{{ lookup('file', item) }}"
+ key: "{{ lookup('file', item) }}"
with_fileglob:
- /tmp/ssh-keys-*
- name: chown ssh file
shell: chown -R nova:nova /var/lib/nova/.ssh;
-
- hosts: all
remote_user: root
max_fail_percentage: 0
@@ -198,13 +202,6 @@
- hosts: all
remote_user: root
- sudo: True
- max_fail_percentage: 0
- roles:
- - open-contrail
-
-- hosts: all
- remote_user: root
serial: 1
max_fail_percentage: 0
roles:
@@ -239,4 +236,3 @@
max_fail_percentage: 0
roles:
- compute-recovery
-
diff --git a/deploy/adapters/ansible/openstack/allinone.yml b/deploy/adapters/ansible/openstack/allinone.yml
index fabc6eb4..e8597434 100644
--- a/deploy/adapters/ansible/openstack/allinone.yml
+++ b/deploy/adapters/ansible/openstack/allinone.yml
@@ -1,6 +1,6 @@
---
- hosts: controller
- sudo: True
+ sudo: "True"
roles:
- common
- kvmfornfv
@@ -14,19 +14,19 @@
- glance
- hosts: network
- sudo: True
+ sudo: "True"
roles:
- common
- neutron-network
- hosts: storage
- sudo: True
+ sudo: "True"
roles:
- common
- cinder-volume
- hosts: compute
- sudo: True
+ sudo: "True"
roles:
- common
- nova-compute
diff --git a/deploy/adapters/ansible/openstack/compute.yml b/deploy/adapters/ansible/openstack/compute.yml
index d0c41c3e..7a1488f3 100644
--- a/deploy/adapters/ansible/openstack/compute.yml
+++ b/deploy/adapters/ansible/openstack/compute.yml
@@ -1,7 +1,7 @@
---
- hosts: all
remote_user: vagrant
- sudo: True
+ sudo: "True"
roles:
- common
- kvmfornfv
diff --git a/deploy/adapters/ansible/openstack/controller.yml b/deploy/adapters/ansible/openstack/controller.yml
index 0269281e..e6c52733 100644
--- a/deploy/adapters/ansible/openstack/controller.yml
+++ b/deploy/adapters/ansible/openstack/controller.yml
@@ -1,7 +1,7 @@
---
- hosts: controller
remote_user: root
- sudo: True
+ sudo: "True"
roles:
- common
- database
diff --git a/deploy/adapters/ansible/openstack/multinodes.yml b/deploy/adapters/ansible/openstack/multinodes.yml
index 5b43a696..4bd22936 100644
--- a/deploy/adapters/ansible/openstack/multinodes.yml
+++ b/deploy/adapters/ansible/openstack/multinodes.yml
@@ -1,66 +1,66 @@
---
- hosts: database
- sudo: True
+ sudo: "True"
roles:
- common
- database
- hosts: messaging
- sudo: True
+ sudo: "True"
roles:
- common
- mq
- hosts: identity
- sudo: True
+ sudo: "True"
roles:
- common
- keystone
- hosts: compute-controller
- sudo: True
+ sudo: "True"
roles:
- common
- nova-controller
- hosts: network-server
- sudo: True
+ sudo: "True"
roles:
- common
- neutron-controller
- hosts: storage-controller
- sudo: True
+ sudo: "True"
roles:
- common
- cinder-controller
- hosts: image
- sudo: True
+ sudo: "True"
roles:
- common
- glance
- hosts: dashboard
- sudo: True
+ sudo: "True"
roles:
- common
- dashboard
- hosts: network-worker
- sudo: True
+ sudo: "True"
roles:
- common
- neutron-network
- hosts: storage-volume
- sudo: True
+ sudo: "True"
roles:
- common
- cinder-volume
- hosts: compute-worker
- sudo: True
+ sudo: "True"
roles:
- common
- nova-compute
@@ -68,6 +68,6 @@
- hosts: odl
remote_user: root
- sudo: True
+ sudo: "True"
roles:
- odl
diff --git a/deploy/adapters/ansible/openstack/network.yml b/deploy/adapters/ansible/openstack/network.yml
index 77752e45..8e4ec439 100644
--- a/deploy/adapters/ansible/openstack/network.yml
+++ b/deploy/adapters/ansible/openstack/network.yml
@@ -1,7 +1,7 @@
---
- hosts: all
remote_user: vagrant
- sudo: True
+ sudo: "True"
roles:
- common
- neutron-network
diff --git a/deploy/adapters/ansible/openstack/single-controller.yml b/deploy/adapters/ansible/openstack/single-controller.yml
index 96ec0a6a..e7f6900d 100644
--- a/deploy/adapters/ansible/openstack/single-controller.yml
+++ b/deploy/adapters/ansible/openstack/single-controller.yml
@@ -1,6 +1,6 @@
---
- hosts: controller
- sudo: True
+ sudo: "True"
roles:
- common
- database
@@ -13,19 +13,19 @@
- glance
- hosts: network
- sudo: True
+ sudo: "True"
roles:
- common
- neutron-network
- hosts: storage
- sudo: True
+ sudo: "True"
roles:
- common
- cinder-volume
- hosts: compute
- sudo: True
+ sudo: "True"
roles:
- common
- nova-compute
@@ -33,6 +33,6 @@
- hosts: odl
remote_user: root
- sudo: True
+ sudo: "True"
roles:
- odl
diff --git a/deploy/adapters/ansible/openstack/storage.yml b/deploy/adapters/ansible/openstack/storage.yml
index f0be1381..6a6bd34c 100644
--- a/deploy/adapters/ansible/openstack/storage.yml
+++ b/deploy/adapters/ansible/openstack/storage.yml
@@ -1,7 +1,7 @@
---
- hosts: all
remote_user: vagrant
- sudo: True
+ sudo: "True"
roles:
- common
- cinder-volume
diff --git a/deploy/adapters/ansible/roles/aodh/handlers/main.yml b/deploy/adapters/ansible/roles/aodh/handlers/main.yml
index a652ea4b..4ff2a4f9 100644
--- a/deploy/adapters/ansible/roles/aodh/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/aodh/handlers/main.yml
@@ -10,4 +10,3 @@
- name: restart aodh services
service: name={{ item }} state=restarted enabled=yes
with_items: "{{ services | union(services_noarch) }}"
-
diff --git a/deploy/adapters/ansible/roles/aodh/tasks/aodh_config.yml b/deploy/adapters/ansible/roles/aodh/tasks/aodh_config.yml
index e60d5338..e165121f 100644
--- a/deploy/adapters/ansible/roles/aodh/tasks/aodh_config.yml
+++ b/deploy/adapters/ansible/roles/aodh/tasks/aodh_config.yml
@@ -11,4 +11,3 @@
shell: su -s /bin/sh -c "aodh-dbsync" aodh
notify:
- restart aodh services
-
diff --git a/deploy/adapters/ansible/roles/boot-recovery/tasks/main.yml b/deploy/adapters/ansible/roles/boot-recovery/tasks/main.yml
index 695779f7..1521f2ff 100755
--- a/deploy/adapters/ansible/roles/boot-recovery/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/boot-recovery/tasks/main.yml
@@ -23,4 +23,3 @@
when: RECOVERY_ENV
tags:
- recovery-stop-service
-
diff --git a/deploy/adapters/ansible/roles/boot-recovery/vars/RedHat.yml b/deploy/adapters/ansible/roles/boot-recovery/vars/RedHat.yml
index c46f79c8..42d9aed9 100755
--- a/deploy/adapters/ansible/roles/boot-recovery/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/boot-recovery/vars/RedHat.yml
@@ -12,4 +12,3 @@ controller_services:
- neutron-openvswitch-agent
- openstack-aodh-expirer
- mysql
-
diff --git a/deploy/adapters/ansible/roles/boot-recovery/vars/main.yml b/deploy/adapters/ansible/roles/boot-recovery/vars/main.yml
index 22af29f4..89c1e8f0 100755
--- a/deploy/adapters/ansible/roles/boot-recovery/vars/main.yml
+++ b/deploy/adapters/ansible/roles/boot-recovery/vars/main.yml
@@ -8,4 +8,3 @@
##############################################################################
---
controller_services_noarch: []
-
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_config.yml b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_config.yml
index 9b487def..a5a78281 100644
--- a/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_config.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_config.yml
@@ -14,14 +14,14 @@
src: "{{ item }}"
dest: /opt/os_templates
with_items:
- - ceilometer.conf.j2
- - nova.conf.j2
+ - ceilometer.conf.j2
+ - nova.conf.j2
- name: update ceilometer configs
shell: crudini --merge {{ item.dest }} < /opt/os_templates/{{ item.src }}
with_items:
- - src: nova.conf.j2
- dest: /etc/nova/nova.conf
+ - src: nova.conf.j2
+ dest: /etc/nova/nova.conf
notify: restart nova service
- name: delete config
@@ -34,4 +34,3 @@
with_items: "{{ ceilometer_services }}"
- meta: flush_handlers
-
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_install.yml b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_install.yml
index b1d86f8d..e2414262 100644
--- a/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_install.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_install.yml
@@ -30,6 +30,5 @@
template:
src: ceilometer.conf.j2
dest: /etc/ceilometer/ceilometer.conf
- backup: yes
+ backup: "yes"
notify: restart ceilometer service
-
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_install.yml b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_install.yml
index b1d86f8d..e2414262 100644
--- a/deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_install.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_install.yml
@@ -30,6 +30,5 @@
template:
src: ceilometer.conf.j2
dest: /etc/ceilometer/ceilometer.conf
- backup: yes
+ backup: "yes"
notify: restart ceilometer service
-
diff --git a/deploy/adapters/ansible/roles/ceph-config/tasks/create_config.yml b/deploy/adapters/ansible/roles/ceph-config/tasks/create_config.yml
index 771fb6ab..b0be0f2e 100755
--- a/deploy/adapters/ansible/roles/ceph-config/tasks/create_config.yml
+++ b/deploy/adapters/ansible/roles/ceph-config/tasks/create_config.yml
@@ -1,3 +1,4 @@
+---
##############################################################################
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
#
@@ -43,15 +44,19 @@
when: inventory_hostname in groups['ceph_mon']
- name: create mon.keyring
- shell: "ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'"
+ shell: "ceph-authtool --create-keyring /tmp/ceph.mon.keyring \
+ --gen-key -n mon. --cap mon 'allow *'"
when: inventory_hostname in groups['ceph_adm']
- name: create admin.keyring
- shell: "ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'"
+ shell: "ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring \
+ --gen-key -n client.admin --set-uid=0 \
+ --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'"
when: inventory_hostname in groups['ceph_adm']
- name: Add the client.admin key to the ceph.mon.keyring
- shell: "ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring"
+ shell: "ceph-authtool /tmp/ceph.mon.keyring \
+ --import-keyring /etc/ceph/ceph.client.admin.keyring"
when: inventory_hostname in groups['ceph_adm']
- name: fetch mon.keyring to local
@@ -64,11 +69,17 @@
when: compute_expansion
- name: fetch client.admin.keyring to local
- fetch: src="/etc/ceph/ceph.client.admin.keyring" dest="/tmp/ceph.client.admin.keyring" flat=yes
+ fetch:
+ src: "/etc/ceph/ceph.client.admin.keyring"
+ dest: "/tmp/ceph.client.admin.keyring"
+ flat: "yes"
when: inventory_hostname in groups['ceph_adm']
- name: fetch mon.keyring from ceph_adm
- fetch: src="/etc/ceph/ceph.client.admin.keyring" dest="/tmp/ceph.client.admin.keyring" flat=yes
+ fetch:
+ src: "/etc/ceph/ceph.client.admin.keyring"
+ dest: "/tmp/ceph.client.admin.keyring"
+ flat: "yes"
delegate_to: "{{ public_vip.ip }}"
when: compute_expansion
@@ -76,7 +87,8 @@
copy: src="/tmp/ceph.mon.keyring" dest="/tmp/ceph.mon.keyring"
- name: copy admin.keyring to remote nodes
- copy: src="/tmp/ceph.client.admin.keyring" dest="/etc/ceph/ceph.client.admin.keyring"
-
+ copy:
+ src: "/tmp/ceph.client.admin.keyring"
+ dest: "/etc/ceph/ceph.client.admin.keyring"
- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/ceph-config/tasks/main.yml b/deploy/adapters/ansible/roles/ceph-config/tasks/main.yml
index dbe9fea5..49feab61 100755
--- a/deploy/adapters/ansible/roles/ceph-config/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ceph-config/tasks/main.yml
@@ -1,3 +1,4 @@
+---
##############################################################################
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
#
diff --git a/deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml b/deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml
index 1d14c2d2..f5600bec 100644
--- a/deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml
+++ b/deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml
@@ -1,3 +1,4 @@
+---
##############################################################################
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
#
@@ -13,7 +14,8 @@
file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}" state="directory"
- name: Populate the monitor daemon
- shell: "ceph-mon --mkfs -i {{ inventory_hostname }} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring"
+ shell: "ceph-mon --mkfs -i {{ inventory_hostname }} --monmap /tmp/monmap \
+ --keyring /tmp/ceph.mon.keyring"
- name: Change ceph/mon dir owner to ceph
shell: "chown -R ceph:ceph /var/lib/ceph/mon"
@@ -27,7 +29,9 @@
when: ansible_os_family == "Debian"
- name: Touch the done and auto start file
- file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}/{{ item }}" state="touch"
+ file:
+ path: "/var/lib/ceph/mon/ceph-{{ inventory_hostname }}/{{ item }}"
+ state: "touch"
with_items:
- "done"
- "{{ ceph_start_type }}"
@@ -39,5 +43,8 @@
wait_for: path=/var/lib/ceph/bootstrap-osd/ceph.keyring
- name: fetch osd keyring
- fetch: src="/var/lib/ceph/bootstrap-osd/ceph.keyring" dest="/tmp/ceph.osd.keyring" flat=yes
- run_once: True
+ fetch:
+ src: "/var/lib/ceph/bootstrap-osd/ceph.keyring"
+ dest: "/tmp/ceph.osd.keyring"
+ flat: "yes"
+ run_once: "True"
diff --git a/deploy/adapters/ansible/roles/ceph-mon/tasks/main.yml b/deploy/adapters/ansible/roles/ceph-mon/tasks/main.yml
index 3defa26d..5aa0694d 100644
--- a/deploy/adapters/ansible/roles/ceph-mon/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ceph-mon/tasks/main.yml
@@ -1,3 +1,4 @@
+---
##############################################################################
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
#
diff --git a/deploy/adapters/ansible/roles/ceph-mon/vars/main.yml b/deploy/adapters/ansible/roles/ceph-mon/vars/main.yml
index 466ea6ab..d760b4e6 100644
--- a/deploy/adapters/ansible/roles/ceph-mon/vars/main.yml
+++ b/deploy/adapters/ansible/roles/ceph-mon/vars/main.yml
@@ -7,4 +7,3 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_conf.yml b/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_conf.yml
index 0496ba97..84515267 100755
--- a/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_conf.yml
+++ b/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_conf.yml
@@ -12,29 +12,113 @@
when: inventory_hostname in groups['controller']
tags:
- ceph_conf_glance
- ignore_errors: True
+ ignore_errors: "True"
- name: modify glance-api.conf for ceph
- shell: sed -i 's/^\(default_store\).*/\1 = rbd/g' /etc/glance/glance-api.conf && sed -i '/^\[glance_store/a rbd_store_pool = images\nrbd_store_user = glance\nrbd_store_ceph_conf = /etc/ceph/ceph.conf\nrbd_store_chunk_size = 8\nshow_image_direct_url=True' /etc/glance/glance-api.conf
+ shell: |
+ sed -i 's/^\(default_store\).*/\1 = rbd/g' /etc/glance/glance-api.conf;
+ sed -i '/^\[glance_store/a rbd_store_pool = images' \
+ /etc/glance/glance-api.conf;
+ sed -i '/^\[glance_store/a rbd_store_user = glance' \
+ /etc/glance/glance-api.conf;
+ sed -i '/^\[glance_store/a rbd_store_ceph_conf = /etc/ceph/ceph.conf' \
+ /etc/glance/glance-api.conf;
+ sed -i '/^\[glance_store/a rbd_store_chunk_size = 8' \
+ /etc/glance/glance-api.conf;
+ sed -i '/^\[glance_store/a show_image_direct_url=True' \
+ /etc/glance/glance-api.conf;
when: inventory_hostname in groups['controller']
tags:
- ceph_conf_glance
-- name: restart glance
- shell: rm -f /var/log/glance/api.log && chown -R glance:glance /var/log/glance && service {{ glance_service }} restart
+- name: remove glance-api log
+ shell: |
+ rm -f /var/log/glance/api.log;
+ chown -R glance:glance /var/log/glance;
+ when: inventory_hostname in groups['controller']
+ tags:
+ - ceph_conf_glance
+ ignore_errors: "True"
+
+- name: restart glance service
+ shell: service {{ glance_service }} restart
+ register: result
+ until: result.rc == 0
+ retries: 10
+ delay: 3
when: inventory_hostname in groups['controller']
tags:
- ceph_conf_glance
- ignore_errors: True
- name: modify cinder.conf for ceph
- shell: sed -i 's/^\(volume_driver\).*/\1 = cinder.volume.drivers.rbd.RBDDriver/g' /etc/cinder/cinder.conf && sed -i 's/^\(rbd_secret_uuid\).*/\1 = {{ ceph_uuid }}/g' /etc/cinder/cinder.conf && sed -i '/^\[DEFAULT/a rbd_pool = volumes\nrbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_flatten_volume_from_snapshot = false\nrbd_max_clone_depth = 5\nrbd_store_chunk_size = 4\nrados_connect_timeout = -1\nglance_api_version = 2\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid }}' /etc/cinder/cinder.conf && service {{ cinder_service }} restart
+ shell: |
+ sed -i 's/^\(volume_driver\).*/\1 = cinder.volume.drivers.rbd.RBDDriver/g' \
+ /etc/cinder/cinder.conf;
+ sed -i 's/^\(rbd_secret_uuid\).*/\1 = {{ ceph_uuid }}/g' \
+ /etc/cinder/cinder.conf;
+ sed -i '/^\[DEFAULT/a rbd_pool = volumes' \
+ /etc/cinder/cinder.conf;
+ sed -i '/^\[DEFAULT/a rbd_ceph_conf = /etc/ceph/ceph.conf' \
+ /etc/cinder/cinder.conf;
+ sed -i '/^\[DEFAULT/a rbd_flatten_volume_from_snapshot = false' \
+ /etc/cinder/cinder.conf;
+ sed -i '/^\[DEFAULT/a rbd_max_clone_depth = 5' \
+ /etc/cinder/cinder.conf;
+ sed -i '/^\[DEFAULT/a rbd_store_chunk_size = 4' \
+ /etc/cinder/cinder.conf;
+ sed -i '/^\[DEFAULT/a rados_connect_timeout = -1' \
+ /etc/cinder/cinder.conf;
+ sed -i '/^\[DEFAULT/a glance_api_version = 2' \
+ /etc/cinder/cinder.conf;
+ sed -i '/^\[DEFAULT/a rbd_user = cinder' \
+ /etc/cinder/cinder.conf;
+ sed -i '/^\[DEFAULT/a rbd_secret_uuid = {{ ceph_uuid }}' \
+ /etc/cinder/cinder.conf;
+ when: inventory_hostname in groups['compute']
+ tags:
+ - ceph_conf_cinder
+
+- name: restart cinder service
+ shell: service {{ cinder_service }} restart
+ register: result
+ until: result.rc == 0
+ retries: 10
+ delay: 3
when: inventory_hostname in groups['compute']
tags:
- ceph_conf_cinder
- name: modify nova.conf for ceph
- shell: sed -i 's/^\(images_type\).*/\1 = rbd/g' /etc/nova/nova-compute.conf && sed -i 's/^\(rbd_secret_uuid\).*/\1 = {{ ceph_uuid }}/g' /etc/nova/nova-compute.conf && sed -i '/^\[libvirt/a images_rbd_pool = vms\nimages_rbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid }}\ndisk_cachemodes=\"network=writeback\"\nlive_migration_flag=\"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"' /etc/nova/nova-compute.conf && service {{ nova_service }} restart
+ shell: |
+ sed -i 's/^\(images_type\).*/\1 = rbd/g' \
+ /etc/nova/nova-compute.conf;
+ sed -i 's/^\(rbd_secret_uuid\).*/\1 = {{ ceph_uuid }}/g' \
+ /etc/nova/nova-compute.conf;
+ when: inventory_hostname in groups['compute']
+ tags:
+ - ceph_conf_nova
+
+- name: modify nova.conf libvirt for ceph
+ blockinfile:
+ dest: /etc/nova/nova-compute.conf
+ insertafter: "libvirt"
+ block: |
+ images_rbd_pool = vms
+ images_rbd_ceph_conf = /etc/ceph/ceph.conf
+ rbd_user = cinder
+ rbd_secret_uuid = {{ ceph_uuid }}
+ disk_cachemodes = "network=writeback"
+ live_migration_flag = "{{ live_migration_flag | join(',') }}"
+ when: inventory_hostname in groups['compute']
+ tags:
+ - ceph_conf_nova
+
+- name: restart nova service
+ shell: service {{ nova_service }} restart
+ register: result
+ until: result.rc == 0
+ retries: 10
+ delay: 3
when: inventory_hostname in groups['compute']
tags:
- ceph_conf_nova
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_post.yml b/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_post.yml
index 2097ca57..b7ec4a23 100644
--- a/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_post.yml
+++ b/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_post.yml
@@ -16,4 +16,3 @@
umount /var/lib/glance/images
sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
when: mount_info.stdout.find('images') != -1
-
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_pre.yml b/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_pre.yml
index ece4154f..3ff9df48 100755
--- a/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_pre.yml
+++ b/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_pre.yml
@@ -62,15 +62,26 @@
when: inventory_hostname in groups['ceph_adm']
- name: create ceph users for openstack
- shell: ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' && ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
+ shell: |
+ ceph auth get-or-create client.cinder mon 'allow r' osd \
+ 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, \
+ allow rwx pool=vms, allow rx pool=images';
+ ceph auth get-or-create client.glance mon 'allow r' osd \
+ 'allow class-read object_prefix rbd_children, allow rwx pool=images';
when: inventory_hostname in groups['ceph_adm']
- name: send glance key to controller nodes
- shell: ceph auth get-or-create client.glance | tee /etc/ceph/ceph.client.glance.keyring && chown glance:glance /etc/ceph/ceph.client.glance.keyring
+ shell: |
+ ceph auth get-or-create client.glance | \
+ tee /etc/ceph/ceph.client.glance.keyring;
+ chown glance:glance /etc/ceph/ceph.client.glance.keyring;
when: inventory_hostname in groups['controller']
- name: send cinder key to compute nodes
- shell: ceph auth get-or-create client.cinder | tee /etc/ceph/ceph.client.cinder.keyring && chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
+ shell: |
+ ceph auth get-or-create client.cinder | \
+ tee /etc/ceph/ceph.client.cinder.keyring;
+ chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring;
when: inventory_hostname in groups['compute']
tags:
- ceph_send_key
@@ -88,18 +99,21 @@
- ceph_copy_secret
- name: undefine libvirt secret in case of repeatedly execute ceph_deploy
- shell: "virsh secret-list | awk '$1 ~ /[0-9]+/ {print $1}' | xargs virsh secret-undefine"
+ shell: |
+ virsh secret-list | awk '$1 ~ /[0-9]+/ {print $1}' | \
+ xargs virsh secret-undefine
when: inventory_hostname in groups['compute']
tags:
- ceph_copy_secret
- ignore_errors: True
+ ignore_errors: "True"
- name: create key for libvirt on compute nodes
- shell: "virsh secret-define --file ~/secret.xml && virsh secret-set-value --secret {{ ceph_uuid }} --base64 $(cat client.cinder.key)"
+ shell: |
+ virsh secret-define --file ~/secret.xml;
+ virsh secret-set-value --secret {{ ceph_uuid }} \
+ --base64 $(cat client.cinder.key);
when: inventory_hostname in groups['compute']
tags:
- ceph_copy_secret
- ignore_errors: True
-
-
+ ignore_errors: "True"
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml b/deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml
index 06c3acb6..c34d1376 100644
--- a/deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml
@@ -1,3 +1,4 @@
+---
##############################################################################
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
#
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/vars/main.yml b/deploy/adapters/ansible/roles/ceph-openstack/vars/main.yml
index 6de7e9f0..32ebfc5f 100755
--- a/deploy/adapters/ansible/roles/ceph-openstack/vars/main.yml
+++ b/deploy/adapters/ansible/roles/ceph-openstack/vars/main.yml
@@ -11,3 +11,10 @@ packages_noarch: []
ceph_cluster_dir:
- /root/ceph-cluster
+
+live_migration_flag:
+ - 'VIR_MIGRATE_UNDEFINE_SOURCE'
+ - 'VIR_MIGRATE_PEER2PEER'
+ - 'VIR_MIGRATE_LIVE'
+ - 'VIR_MIGRATE_PERSIST_DEST'
+ - 'VIR_MIGRATE_TUNNELLED'
diff --git a/deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml b/deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml
index 363e5e6d..78d62f67 100644
--- a/deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml
+++ b/deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml
@@ -7,17 +7,21 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-
- name: create osd lv and mount it on /var/local/osd
script: create_osd.sh
- name: fetch osd keyring from ceph_adm
- fetch: src="/var/lib/ceph/bootstrap-osd/ceph.keyring" dest="/tmp/ceph.osd.keyring" flat=yes
+ fetch:
+ src: "/var/lib/ceph/bootstrap-osd/ceph.keyring"
+ dest: "/tmp/ceph.osd.keyring"
+ flat: "yes"
delegate_to: "{{ public_vip.ip }}"
when: compute_expansion
- name: copy osd keyring
- copy: src="/tmp/ceph.osd.keyring" dest="/var/lib/ceph/bootstrap-osd/ceph.keyring"
+ copy:
+ src: "/tmp/ceph.osd.keyring"
+ dest: "/var/lib/ceph/bootstrap-osd/ceph.keyring"
- name: prepare osd disk
shell: ceph-disk prepare --fs-type xfs /var/local/osd
@@ -33,10 +37,21 @@
service: name=ceph enabled=yes
- name: rebuild osd after reboot
- lineinfile: dest=/etc/init/ceph-osd-all-starter.conf insertafter="^task" line="pre-start script\n set -e\n /opt/setup_storage/losetup.sh\n sleep 3\n mount /dev/storage-volumes/ceph0 /var/local/osd\nend script"
+ blockinfile:
+ dest: /etc/init/ceph-osd-all-starter.conf
+ insertafter: "^task"
+ block: |
+ pre-start script
+ set -e
+ /opt/setup_storage/losetup.sh
+ sleep 3
+ mount /dev/storage-volumes/ceph0 /var/local/osd
+ end script
when: ansible_os_family == "Debian"
- name: rebuild osd after reboot for centos
- lineinfile: dest=/etc/init.d/ceph insertafter="^### END INIT INFO" line="\nsleep 1\nmount /dev/storage-volumes/ceph0 /var/local/osd"
+ lineinfile:
+ dest: /etc/init.d/ceph
+ insertafter: "^### END INIT INFO"
+ line: "\nsleep 1\nmount /dev/storage-volumes/ceph0 /var/local/osd"
when: ansible_os_family == "RedHat"
-
diff --git a/deploy/adapters/ansible/roles/ceph-osd/tasks/main.yml b/deploy/adapters/ansible/roles/ceph-osd/tasks/main.yml
index b2d10b15..bc1b29b6 100644
--- a/deploy/adapters/ansible/roles/ceph-osd/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ceph-osd/tasks/main.yml
@@ -1,3 +1,4 @@
+---
##############################################################################
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
#
diff --git a/deploy/adapters/ansible/roles/ceph-purge/tasks/main.yml b/deploy/adapters/ansible/roles/ceph-purge/tasks/main.yml
index a25572c1..5364cc82 100644
--- a/deploy/adapters/ansible/roles/ceph-purge/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ceph-purge/tasks/main.yml
@@ -1,3 +1,4 @@
+---
##############################################################################
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
#
@@ -21,7 +22,10 @@
- ceph_deploy
- name: purge ceph
- shell: "ceph-deploy purge {{ inventory_hostname }}; ceph-deploy purgedata {{ inventory_hostname }}; ceph-deploy forgetkeys"
+ shell: |
+ ceph-deploy purge {{ inventory_hostname }};
+ ceph-deploy purgedata {{ inventory_hostname }};
+ ceph-deploy forgetkeys
tags:
- ceph_purge
- ceph_deploy
@@ -31,5 +35,3 @@
tags:
- ceph_purge
- ceph_deploy
-
-
diff --git a/deploy/adapters/ansible/roles/cinder-controller/handlers/main.yml b/deploy/adapters/ansible/roles/cinder-controller/handlers/main.yml
index 541bf8d5..5791cd18 100644
--- a/deploy/adapters/ansible/roles/cinder-controller/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/cinder-controller/handlers/main.yml
@@ -10,4 +10,3 @@
- name: restart cinder control serveice
service: name={{ item }} state=restarted enabled=yes
with_items: "{{ services | union(services_noarch) }}"
-
diff --git a/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_config.yml b/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_config.yml
index e763a477..12af52f1 100644
--- a/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_config.yml
+++ b/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_config.yml
@@ -11,7 +11,6 @@
template: src=cinder.conf dest=/etc/cinder/cinder.conf
- name: sync cinder db
- #cinder_manage: action=dbsync
shell: su -s /bin/sh -c 'cinder-manage db sync' cinder
ignore_errors: true
changed_when: true
diff --git a/deploy/adapters/ansible/roles/cinder-controller/vars/main.yml b/deploy/adapters/ansible/roles/cinder-controller/vars/main.yml
index 483300e3..ebde9470 100644
--- a/deploy/adapters/ansible/roles/cinder-controller/vars/main.yml
+++ b/deploy/adapters/ansible/roles/cinder-controller/vars/main.yml
@@ -11,4 +11,3 @@ packages_noarch:
- python-cinderclient
services_noarch: []
-
diff --git a/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml b/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
index 29256d9c..5394b8e8 100644
--- a/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
@@ -31,9 +31,8 @@
with_items: "{{ services | union(services_noarch) }}"
- name: upload cinder-volume configuration
- template: src=cinder.conf dest=/etc/cinder/cinder.conf
- backup=yes
+ template: src=cinder.conf dest=/etc/cinder/cinder.conf backup=yes
notify:
- - restart cinder-volume services
+ - restart cinder-volume services
- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/cinder-volume/vars/main.yml b/deploy/adapters/ansible/roles/cinder-volume/vars/main.yml
index 99494502..e564e6c5 100644
--- a/deploy/adapters/ansible/roles/cinder-volume/vars/main.yml
+++ b/deploy/adapters/ansible/roles/cinder-volume/vars/main.yml
@@ -11,4 +11,3 @@ packages_noarch:
- lvm2
services_noarch: []
-
diff --git a/deploy/adapters/ansible/roles/common/tasks/main.yml b/deploy/adapters/ansible/roles/common/tasks/main.yml
index 0e19c004..135d5356 100644
--- a/deploy/adapters/ansible/roles/common/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/common/tasks/main.yml
@@ -26,18 +26,20 @@
- name: update compass-core name and ip to hosts files
shell: |
- echo "# compass" >> /etc/hosts
- echo {{ COMPASS_SERVER.stdout_lines[0] }} {{ name.stdout_lines[0] }} >> /etc/hosts
+ echo "# compass" >> /etc/hosts;
+ echo {{ COMPASS_SERVER.stdout_lines[0] }} {{ name.stdout_lines[0] }} \
+ >> /etc/hosts;
- name: install python-crypto
yum: name=python-crypto state=present
register: python_crypto_result
- ignore_errors: yes
+ ignore_errors: "yes"
when: ansible_os_family == "RedHat"
-- name: remove python crypt egg file to work-around https://bugs.centos.org/view.php?id=9896&nbn=2
+- name: remove python crypt egg file to work-around
shell: rm -rf /usr/lib64/python2.7/site-packages/pycrypto-2.6.1-py2.7.egg-info
- when: ansible_os_family == "RedHat" and python_crypto_result.msg == "Error unpacking rpm package python2-crypto-2.6.1-9.el7.x86_64\n"
+ when: ansible_os_family == "RedHat"
+ and python_crypto_result.msg | match("Error unpack.*crypto-2.6.1-9.*")
- name: install packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes"
@@ -69,11 +71,11 @@
- name: use ntpdate once for initial sync time
shell: ntpdate {{ ntp_server }}
- ignore_errors: True
+ ignore_errors: "True"
- name: sync sys clock to hard clock
shell: hwclock --systohc
- ignore_errors: True
+ ignore_errors: "True"
- name: create fireball keys dir
file: path=~/.fireball.keys state=directory mode=0700
diff --git a/deploy/adapters/ansible/roles/common/vars/Debian.yml b/deploy/adapters/ansible/roles/common/vars/Debian.yml
index 46e0374f..ed11bdd6 100644
--- a/deploy/adapters/ansible/roles/common/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/common/vars/Debian.yml
@@ -16,7 +16,6 @@ packages:
- python-iniparse
- python-lxml
- python-crypto
- #- python-d* #TODO, need remove
pip_packages:
- crudini
@@ -27,5 +26,3 @@ pip_conf: pip.conf
services:
- ntp
-
-
diff --git a/deploy/adapters/ansible/roles/compute-recovery/tasks/main.yml b/deploy/adapters/ansible/roles/compute-recovery/tasks/main.yml
index e67d0088..23b4a948 100644
--- a/deploy/adapters/ansible/roles/compute-recovery/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/compute-recovery/tasks/main.yml
@@ -23,4 +23,3 @@
when: RECOVERY_ENV
tags:
- recovery
-
diff --git a/deploy/adapters/ansible/roles/compute-recovery/vars/Debian.yml b/deploy/adapters/ansible/roles/compute-recovery/vars/Debian.yml
index 23200a50..4e843d03 100644
--- a/deploy/adapters/ansible/roles/compute-recovery/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/compute-recovery/vars/Debian.yml
@@ -12,4 +12,3 @@ compute_services:
- neutron-openvswitch-agent
- cinder-volume
- ceilometer-agent-compute
-
diff --git a/deploy/adapters/ansible/roles/compute-recovery/vars/RedHat.yml b/deploy/adapters/ansible/roles/compute-recovery/vars/RedHat.yml
index c96b062a..7b9488f8 100644
--- a/deploy/adapters/ansible/roles/compute-recovery/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/compute-recovery/vars/RedHat.yml
@@ -12,4 +12,3 @@ compute_services:
- neutron-openvswitch-agent
- openstack-cinder-volume
- openstack-ceilometer-compute
-
diff --git a/deploy/adapters/ansible/roles/congress/tasks/congress_config.yml b/deploy/adapters/ansible/roles/congress/tasks/congress_config.yml
index f40d4c22..c85057e8 100644
--- a/deploy/adapters/ansible/roles/congress/tasks/congress_config.yml
+++ b/deploy/adapters/ansible/roles/congress/tasks/congress_config.yml
@@ -8,7 +8,9 @@
##############################################################################
---
- name: congress db sync
- shell: /usr/local/bin/congress-db-manage --config-file /etc/congress/congress.conf upgrade head
+ shell: |
+ /usr/local/bin/congress-db-manage \
+ --config-file /etc/congress/congress.conf upgrade head
when: inventory_hostname == haproxy_hosts.keys()[0]
- name: start congress service
diff --git a/deploy/adapters/ansible/roles/congress/tasks/congress_db.yml b/deploy/adapters/ansible/roles/congress/tasks/congress_db.yml
index 1883509b..16832eaa 100644
--- a/deploy/adapters/ansible/roles/congress/tasks/congress_db.yml
+++ b/deploy/adapters/ansible/roles/congress/tasks/congress_db.yml
@@ -8,7 +8,7 @@
##############################################################################
---
- name: create congress db
- mysql_db:
+ mysql_db:
login_unix_socket: /var/run/mysqld/mysqld.sock
name: "{{ item.db }}"
state: present
@@ -25,4 +25,3 @@
with_nested:
- "{{ credentials }}"
- ['%', 'localhost']
-
diff --git a/deploy/adapters/ansible/roles/congress/tasks/congress_install.yml b/deploy/adapters/ansible/roles/congress/tasks/congress_install.yml
index 1e620783..4269b5e6 100644
--- a/deploy/adapters/ansible/roles/congress/tasks/congress_install.yml
+++ b/deploy/adapters/ansible/roles/congress/tasks/congress_install.yml
@@ -30,8 +30,8 @@
- name: create congress service work dir
file: path=/var/lib/congress state=directory
-- name: link the congress service
- file:
+- name: link the congress service
+ file:
src: /lib/systemd/system/congress.service
dest: /etc/systemd/system/multi-user.target.wants/congress.service
state: link
diff --git a/deploy/adapters/ansible/roles/congress/vars/Debian.yml b/deploy/adapters/ansible/roles/congress/vars/Debian.yml
index 1cc4645e..36aba73e 100644
--- a/deploy/adapters/ansible/roles/congress/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/congress/vars/Debian.yml
@@ -12,7 +12,7 @@ packages:
- python-congressclient
- python-cloudfoundryclient
-service:
+service:
- congress
credentials:
diff --git a/deploy/adapters/ansible/roles/controller-recovery/tasks/main.yml b/deploy/adapters/ansible/roles/controller-recovery/tasks/main.yml
index c5d2e959..eb7c4309 100644
--- a/deploy/adapters/ansible/roles/controller-recovery/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/controller-recovery/tasks/main.yml
@@ -23,4 +23,3 @@
when: RECOVERY_ENV
tags:
- recovery
-
diff --git a/deploy/adapters/ansible/roles/controller-recovery/vars/Debian.yml b/deploy/adapters/ansible/roles/controller-recovery/vars/Debian.yml
index 62753413..730ce830 100644
--- a/deploy/adapters/ansible/roles/controller-recovery/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/controller-recovery/vars/Debian.yml
@@ -38,4 +38,3 @@ controller_services:
- aodh-evaluator
- aodh-listener
- cron
-
diff --git a/deploy/adapters/ansible/roles/controller-recovery/vars/RedHat.yml b/deploy/adapters/ansible/roles/controller-recovery/vars/RedHat.yml
index 145acecd..75a69e7d 100644
--- a/deploy/adapters/ansible/roles/controller-recovery/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/controller-recovery/vars/RedHat.yml
@@ -37,4 +37,3 @@ controller_services:
- openstack-aodh-evaluator
- openstack-aodh-listener
- cron
-
diff --git a/deploy/adapters/ansible/roles/controller-recovery/vars/main.yml b/deploy/adapters/ansible/roles/controller-recovery/vars/main.yml
index 22af29f4..89c1e8f0 100644
--- a/deploy/adapters/ansible/roles/controller-recovery/vars/main.yml
+++ b/deploy/adapters/ansible/roles/controller-recovery/vars/main.yml
@@ -8,4 +8,3 @@
##############################################################################
---
controller_services_noarch: []
-
diff --git a/deploy/adapters/ansible/roles/dashboard/tasks/main.yml b/deploy/adapters/ansible/roles/dashboard/tasks/main.yml
index 4f8ccad0..289bfa83 100644
--- a/deploy/adapters/ansible/roles/dashboard/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/dashboard/tasks/main.yml
@@ -27,7 +27,8 @@
when: ansible_os_family == "Debian"
- name: remove ubuntu theme
- action: "{{ ansible_pkg_mgr }} name=openstack-dashboard-ubuntu-theme state=absent"
+ action:
+ "{{ ansible_pkg_mgr }} name=openstack-dashboard-ubuntu-theme state=absent"
when: ansible_os_family == 'Debian' and not enable_ubuntu_theme
notify:
- restart dashboard services
@@ -79,8 +80,9 @@
- restart dashboard services
- name: precompile horizon css
- shell: /usr/bin/python /usr/share/openstack-dashboard/manage.py compress --force
- ignore_errors: True
+ shell: |
+ /usr/bin/python /usr/share/openstack-dashboard/manage.py compress --force
+ ignore_errors: "True"
when: ansible_os_family == 'Debian'
notify:
- restart dashboard services
diff --git a/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml b/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml
index 442cd18b..9752b550 100644
--- a/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml
@@ -8,7 +8,9 @@
##############################################################################
---
- name: get cluster status
- shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}'
+ shell: |
+ mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"' \
+ | awk '{print $2}'
register: cluster_status
when:
- inventory_hostname == haproxy_hosts.keys()[0]
@@ -22,7 +24,9 @@
and not cluster_status.stdout | search("OPERATIONAL")
- name: wait for cluster ready
- shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}'
+ shell: |
+ mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"' \
+ | awk '{print $2}'
register: cluster_status
until: cluster_status|success
failed_when: not cluster_status.stdout | search("OPERATIONAL")
@@ -33,7 +37,9 @@
and not cluster_status.stdout | search("OPERATIONAL")
- name: if I in the cluster nodes
- shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_incoming_addresses"'|awk '{print $2}'
+ shell: |
+ mysql --silent --skip-column-names \
+ -e 'SHOW STATUS LIKE "wsrep_incoming_addresses"' | awk '{print $2}'
register: cluster_nodes
changed_when: false
@@ -42,7 +48,7 @@
when: |
inventory_hostname != haproxy_hosts.keys()[0]
and not cluster_nodes.stdout | search( "{{ internal_ip }}" )
- ignore_errors: True
+ ignore_errors: "True"
- name: delay 60 seconds
shell: sleep 60
@@ -55,15 +61,13 @@
- name: chmod directory
shell: >
- chmod 755 -R /var/lib/mysql/ ;
- chmod 755 -R /var/log/mysql/ ;
- chmod 755 -R /etc/mysql/conf.d/;
+ chmod 755 -R /var/lib/mysql/;
+ chmod 755 -R /var/log/mysql/;
+ chmod 755 -R /etc/mysql/conf.d/;
- name: restart first nodes
shell: service mysql restart
when: |
- (inventory_hostname == haproxy_hosts.keys()[0]
+ (inventory_hostname == haproxy_hosts.keys()[0]
and haproxy_hosts|length > 1
and not cluster_nodes.stdout | search( '{{ internal_ip }}' ))
-
-
diff --git a/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_redhat.yml b/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_redhat.yml
index cfd778f1..77b18702 100644
--- a/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_redhat.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_redhat.yml
@@ -13,10 +13,12 @@
- name: killall mysqld processes
shell: sudo killall -9 mysqld
when: RECOVERY_ENV
- ignore_errors: True
+ ignore_errors: "True"
- name: get cluster status
- shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}'
+ shell: |
+ mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"' \
+ | awk '{print $2}'
register: cluster_status
when:
- inventory_hostname == haproxy_hosts.keys()[0]
@@ -28,7 +30,9 @@
and not cluster_status.stdout | search("OPERATIONAL")
- name: wait for cluster ready
- shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}'
+ shell: |
+ mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"' \
+ | awk '{print $2}'
register: cluster_status
until: cluster_status|success
failed_when: not cluster_status.stdout | search("OPERATIONAL")
@@ -39,7 +43,9 @@
and not cluster_status.stdout | search("OPERATIONAL")
- name: if I in the cluster nodes
- shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_incoming_addresses"'|awk '{print $2}'
+ shell: |
+ mysql --silent --skip-column-names \
+ -e 'SHOW STATUS LIKE "wsrep_incoming_addresses"' | awk '{print $2}'
register: cluster_nodes
changed_when: false
@@ -47,7 +53,7 @@
service:
name: mysql
state: restarted
- enabled: yes
+ enabled: "yes"
when: |
inventory_hostname != haproxy_hosts.keys()[0]
and not cluster_nodes.stdout | search( "{{ internal_ip }}")
@@ -61,7 +67,5 @@
name: mysql
state: restarted
when: |
- inventory_hostname == haproxy_hosts.keys()[0]
+ inventory_hostname == haproxy_hosts.keys()[0]
and haproxy_hosts|length > 1
-
-
diff --git a/deploy/adapters/ansible/roles/database/tasks/mariadb_config.yml b/deploy/adapters/ansible/roles/database/tasks/mariadb_config.yml
index 780fc322..22b5d60c 100644
--- a/deploy/adapters/ansible/roles/database/tasks/mariadb_config.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/mariadb_config.yml
@@ -8,7 +8,10 @@
##############################################################################
---
- name: create all needed db
- mysql_db: login_unix_socket=/var/run/mysqld/mysqld.sock name={{ item.db }} state=present
+ mysql_db:
+ login_unix_socket=/var/run/mysqld/mysqld.sock
+ name={{ item.db }}
+ state=present
with_items: "{{ credentials }}"
tags:
- test_db
@@ -63,7 +66,8 @@
service:
name: mysql
state: restarted
- when: inventory_hostname == haproxy_hosts.keys()[0] and haproxy_hosts|length > 1
+ when: inventory_hostname == haproxy_hosts.keys()[0]
+ and haproxy_hosts|length > 1
- name: wait for cluster ready
command: mysql -e"show status like 'wsrep%'"
@@ -79,7 +83,6 @@
service:
name: mysql
state: restarted
- enabled: yes
+ enabled: "yes"
when:
- inventory_hostname != haproxy_hosts.keys()[0]
-
diff --git a/deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml b/deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml
index d1897173..ba800dca 100644
--- a/deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml
@@ -32,7 +32,7 @@
template:
src: '{{ item.src }}'
dest: '{{ item.dest }}'
- backup: yes
+ backup: "yes"
mode: 0644
with_items: "{{ mysql_config }}"
@@ -52,7 +52,13 @@
when: ansible_os_family == "Debian"
- name: set owner
- file: path=/var/lib/mysql owner=mysql group=mysql recurse=yes state=directory mode=0755
+ file:
+ path: /var/lib/mysql
+ owner: mysql
+ group: mysql
+ recurse: "yes"
+ state: directory
+ mode: 0755
- name: get logfile stat
stat: path='{{ mysql_data_dir }}/ib_logfile0'
@@ -67,4 +73,3 @@
when: |
logfile_stat.stat.exists
and logfile_stat.stat.size != 1073741824
-
diff --git a/deploy/adapters/ansible/roles/database/tasks/mongodb_config.yml b/deploy/adapters/ansible/roles/database/tasks/mongodb_config.yml
index 76aa5675..10f0322c 100755
--- a/deploy/adapters/ansible/roles/database/tasks/mongodb_config.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/mongodb_config.yml
@@ -19,7 +19,9 @@
shell: mongo compass /opt/replica.js
- name: wait replica servers are ready
- shell: mongo compass --eval 'printjson(rs.status())'|grep -E 'PRIMARY|SECONDARY'|wc -l
+ shell: |
+ mongo compass --eval 'printjson(rs.status())'| \
+ grep -E 'PRIMARY|SECONDARY'| wc -l
register: servers
until: servers.stdout|int == {{ haproxy_hosts|length }}
retries: 60
diff --git a/deploy/adapters/ansible/roles/database/tasks/mongodb_install.yml b/deploy/adapters/ansible/roles/database/tasks/mongodb_install.yml
index bac6c6eb..67bc0499 100755
--- a/deploy/adapters/ansible/roles/database/tasks/mongodb_install.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/mongodb_install.yml
@@ -19,7 +19,8 @@
template: src=mongodb.conf dest=/opt/os_templates backup=yes
- name: update mongodb config file
- shell: crudini --merge {{ mongodb_config.dest }} < /opt/os_templates/mongodb.conf
+ shell: |
+ crudini --merge {{ mongodb_config.dest }} < /opt/os_templates/mongodb.conf
- name: rm prealloc files
file:
diff --git a/deploy/adapters/ansible/roles/database/vars/Debian.yml b/deploy/adapters/ansible/roles/database/vars/Debian.yml
index 1021524d..c657f62e 100644
--- a/deploy/adapters/ansible/roles/database/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/database/vars/Debian.yml
@@ -8,27 +8,27 @@
##############################################################################
---
mongodb_packages:
- - mongodb-server
- - mongodb-clients
- - python-pymongo
+ - mongodb-server
+ - mongodb-clients
+ - python-pymongo
mysql_packages:
- python-mysqldb
- mysql-server
maridb_packages:
- - apt-transport-https
- - debconf-utils
- - libaio1
- - libc6
- - libdbd-mysql-perl
- - libgcc1
- - libgcrypt20
- - libstdc++6
- - python-software-properties
- - mariadb-client
- - galera-3
- - rsync
+ - apt-transport-https
+ - debconf-utils
+ - libaio1
+ - libc6
+ - libdbd-mysql-perl
+ - libgcc1
+ - libgcrypt20
+ - libstdc++6
+ - python-software-properties
+ - mariadb-client
+ - galera-3
+ - rsync
- socat
- mariadb-galera-server-10.0
- python-mysqldb
diff --git a/deploy/adapters/ansible/roles/ext-network/handlers/main.yml b/deploy/adapters/ansible/roles/ext-network/handlers/main.yml
index 36e39072..263b2c5f 100644
--- a/deploy/adapters/ansible/roles/ext-network/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/ext-network/handlers/main.yml
@@ -16,7 +16,7 @@
- name: kill dnsmasq
command: killall dnsmasq
- ignore_errors: True
+ ignore_errors: "True"
- name: restart neutron-dhcp-agent
service: name=neutron-dhcp-agent state=restarted enabled=yes
@@ -26,4 +26,4 @@
- name: restart xorp
service: name=xorp state=restarted enabled=yes sleep=10
- ignore_errors: True
+ ignore_errors: "True"
diff --git a/deploy/adapters/ansible/roles/ext-network/tasks/main.yml b/deploy/adapters/ansible/roles/ext-network/tasks/main.yml
index 10b33b5f..0fc3ee3a 100644
--- a/deploy/adapters/ansible/roles/ext-network/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ext-network/tasks/main.yml
@@ -13,7 +13,7 @@
- name: restart api server
service: name={{ item }} state=restarted enabled=yes
with_items: api_services | union(api_services_noarch)
- ignore_errors: True
+ ignore_errors: "True"
- name: restart neutron server
service: name=neutron-server state=restarted enabled=yes
@@ -28,8 +28,9 @@
{{ public_net_info.network }} \
--provider:network_type {{ public_net_info.type }} \
--provider:physical_network {{ public_net_info.provider_network }} \
- --router:external True
- when: public_net_info.enable == True and inventory_hostname == groups['controller'][0]
+ --router:external "True"
+ when: public_net_info.enable == True
+ and inventory_hostname == groups['controller'][0]
- name: create external subnet
shell:
@@ -41,5 +42,5 @@
--allocation-pool \
start={{ public_net_info.floating_ip_start }},end={{ public_net_info.floating_ip_end }} \
{{ public_net_info.network }} {{ public_net_info.floating_ip_cidr }}
- when: public_net_info.enable == True and inventory_hostname == groups['controller'][0]
-
+ when: public_net_info.enable == True
+ and inventory_hostname == groups['controller'][0]
diff --git a/deploy/adapters/ansible/roles/ext-network/vars/Debian.yml b/deploy/adapters/ansible/roles/ext-network/vars/Debian.yml
index 0b5c78b6..8cebcb48 100644
--- a/deploy/adapters/ansible/roles/ext-network/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/ext-network/vars/Debian.yml
@@ -15,4 +15,3 @@ api_services:
- heat-api-cfn
- aodh-api
- cinder-api
-
diff --git a/deploy/adapters/ansible/roles/ext-network/vars/RedHat.yml b/deploy/adapters/ansible/roles/ext-network/vars/RedHat.yml
index 886401fd..f595d7ba 100644
--- a/deploy/adapters/ansible/roles/ext-network/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/ext-network/vars/RedHat.yml
@@ -14,4 +14,3 @@ api_services:
- openstack-heat-api
- openstack-heat-api-cfn
- openstack-cinder-api
-
diff --git a/deploy/adapters/ansible/roles/glance/tasks/glance_config.yml b/deploy/adapters/ansible/roles/glance/tasks/glance_config.yml
index 2df75ca4..6ea5112f 100644
--- a/deploy/adapters/ansible/roles/glance/tasks/glance_config.yml
+++ b/deploy/adapters/ansible/roles/glance/tasks/glance_config.yml
@@ -8,9 +8,8 @@
##############################################################################
---
- name: sync glance db
- #glance_manage: action=dbsync
shell: su -s /bin/sh -c 'glance-manage db sync' glance
- ignore_errors: True
+ ignore_errors: "True"
notify:
- restart glance services
diff --git a/deploy/adapters/ansible/roles/glance/tasks/nfs.yml b/deploy/adapters/ansible/roles/glance/tasks/nfs.yml
index f0176c89..36d176f2 100644
--- a/deploy/adapters/ansible/roles/glance/tasks/nfs.yml
+++ b/deploy/adapters/ansible/roles/glance/tasks/nfs.yml
@@ -16,28 +16,28 @@
with_items:
- rpcbind
- nfs-utils
- run_once: True
+ run_once: "True"
- name: create image directory
local_action: file path=/opt/images state=directory mode=0777
- run_once: True
+ run_once: "True"
- name: remove nfs config item if exist
local_action: lineinfile dest=/etc/exports state=absent
regexp="^/opt/images"
- run_once: True
+ run_once: "True"
- name: update nfs config
local_action: lineinfile dest=/etc/exports state=present
line="/opt/images *(rw,insecure,sync,all_squash)"
- run_once: True
+ run_once: "True"
- name: restart compass nfs service
local_action: service name={{ item }} state=restarted enabled=yes
with_items:
- - rpcbind
- - nfs-server
- run_once: True
+ - rpcbind
+ - nfs-server
+ run_once: "True"
- name: get mount info
command: mount
@@ -57,10 +57,12 @@
- name: mount image directory
shell: |
- mkdir -p /var/lib/glance/images
- mount -t nfs -onfsvers=3 {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images
- sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
- echo {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab
+ mkdir -p /var/lib/glance/images;
+ mount -t nfs -onfsvers=3 {{ ip_info.stdout_lines[0] }}:/opt/images \
+ /var/lib/glance/images;
+ sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab;
+ echo {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images/ nfs \
+ fsvers=3 >> /etc/fstab;
when: mount_info.stdout.find('images') == -1
retries: 5
delay: 3
diff --git a/deploy/adapters/ansible/roles/heat/handlers/main.yml b/deploy/adapters/ansible/roles/heat/handlers/main.yml
index 77946a51..84bda2e3 100644
--- a/deploy/adapters/ansible/roles/heat/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/heat/handlers/main.yml
@@ -13,4 +13,3 @@
- name: remove heat-sqlite-db
shell: rm /var/lib/heat/heat.sqlite || touch heat.sqlite.db.removed
-
diff --git a/deploy/adapters/ansible/roles/heat/tasks/heat_config.yml b/deploy/adapters/ansible/roles/heat/tasks/heat_config.yml
index 911a19ed..d0835860 100644
--- a/deploy/adapters/ansible/roles/heat/tasks/heat_config.yml
+++ b/deploy/adapters/ansible/roles/heat/tasks/heat_config.yml
@@ -13,4 +13,3 @@
- restart heat service
- meta: flush_handlers
-
diff --git a/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml b/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml
index 15334d40..fd0f6eaa 100644
--- a/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml
+++ b/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml
@@ -18,11 +18,13 @@
with_items: "{{ services | union(services_noarch) }}"
- name: create heat user domain
- shell: >
+ shell: |
. /opt/admin-openrc.sh;
openstack domain create --description "Stack projects and users" heat;
- openstack user create --domain heat --password {{ HEAT_PASS }} heat_domain_admin;
- openstack role add --domain heat --user-domain heat --user heat_domain_admin admin;
+ openstack user create --domain heat --password {{ HEAT_PASS }} \
+ heat_domain_admin;
+ openstack role add --domain heat --user-domain heat \
+ --user heat_domain_admin admin;
openstack role create heat_stack_owner;
openstack role add --project demo --user demo heat_stack_owner;
when: inventory_hostname == groups['controller'][0]
@@ -34,4 +36,3 @@
notify:
- restart heat service
- remove heat-sqlite-db
-
diff --git a/deploy/adapters/ansible/roles/heat/vars/Debian.yml b/deploy/adapters/ansible/roles/heat/vars/Debian.yml
index 64608ca1..4c978517 100644
--- a/deploy/adapters/ansible/roles/heat/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/heat/vars/Debian.yml
@@ -17,4 +17,3 @@ services:
- heat-api
- heat-api-cfn
- heat-engine
-
diff --git a/deploy/adapters/ansible/roles/heat/vars/RedHat.yml b/deploy/adapters/ansible/roles/heat/vars/RedHat.yml
index 680b161f..435afd3f 100644
--- a/deploy/adapters/ansible/roles/heat/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/heat/vars/RedHat.yml
@@ -12,7 +12,7 @@ services:
- openstack-heat-api-cfn
- openstack-heat-engine
-packages:
+packages:
- openstack-heat-api
- openstack-heat-api-cfn
- openstack-heat-engine
diff --git a/deploy/adapters/ansible/roles/heat/vars/main.yml b/deploy/adapters/ansible/roles/heat/vars/main.yml
index 7f867d2a..f6fef749 100644
--- a/deploy/adapters/ansible/roles/heat/vars/main.yml
+++ b/deploy/adapters/ansible/roles/heat/vars/main.yml
@@ -10,4 +10,3 @@
packages_noarch: []
services_noarch: []
-
diff --git a/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml b/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml
index 33892c5f..ac3ff8fe 100644
--- a/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml
+++ b/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml
@@ -36,11 +36,12 @@
- restart keystone services
- name: Distribute the fernet key repository
- shell: rsync -e 'ssh -o StrictHostKeyChecking=no' \
- -avz \
- --delete \
- /etc/keystone/fernet-keys \
- root@{{ hostvars[ item ].ansible_eth0.ipv4.address }}:/etc/keystone/
+ shell: |
+ rsync -e 'ssh -o StrictHostKeyChecking=no' \
+ -avz \
+ --delete \
+ /etc/keystone/fernet-keys \
+ root@{{ hostvars[ item ].ansible_eth0.ipv4.address }}:/etc/keystone/
with_items: "{{ groups['controller'][1:] }}"
notify:
- restart keystone services
@@ -69,11 +70,12 @@
- restart keystone services
- name: Distribute the credential key repository
- shell: rsync -e 'ssh -o StrictHostKeyChecking=no' \
- -avz \
- --delete \
- /etc/keystone/credential-keys \
- root@{{ hostvars[ item ].ansible_eth0.ipv4.address }}:/etc/keystone/
+ shell: |
+ rsync -e 'ssh -o StrictHostKeyChecking=no' \
+ -avz \
+ --delete \
+ /etc/keystone/credential-keys \
+ root@{{ hostvars[ item ].ansible_eth0.ipv4.address }}:/etc/keystone/
with_items: "{{ groups['controller'][1:] }}"
notify:
- restart keystone services
@@ -93,4 +95,3 @@
- name: wait for keystone ready
wait_for: port=35357 delay=15 timeout=60 host={{ internal_ip }}
-
diff --git a/deploy/adapters/ansible/roles/keystone/tasks/keystone_create.yml b/deploy/adapters/ansible/roles/keystone/tasks/keystone_create.yml
index ab71fab4..2f5aefeb 100644
--- a/deploy/adapters/ansible/roles/keystone/tasks/keystone_create.yml
+++ b/deploy/adapters/ansible/roles/keystone/tasks/keystone_create.yml
@@ -11,17 +11,20 @@
shell:
. /opt/admin-openrc.sh;
openstack endpoint set \
- --interface public \
- --url {{ item.publicurl }} \
- $(openstack endpoint list | grep keystone | grep public | awk '{print $2}');
+ --interface public \
+ --url {{ item.publicurl }} \
+ $(openstack endpoint list | grep keystone | grep public \
+ | awk '{print $2}');
openstack endpoint set \
- --interface internal \
- --url {{ item.internalurl }} \
- $(openstack endpoint list | grep keystone | grep internal | awk '{print $2}');
+ --interface internal \
+ --url {{ item.internalurl }} \
+ $(openstack endpoint list | grep keystone | grep internal \
+ | awk '{print $2}');
openstack endpoint set \
- --interface admin \
- --url {{ item.adminurl }} \
- $(openstack endpoint list | grep keystone | grep admin | awk '{print $2}');
+ --interface admin \
+ --url {{ item.adminurl }} \
+ $(openstack endpoint list | grep keystone | grep admin \
+ | awk '{print $2}');
with_items: "{{ os_services[0:1] }}"
register: result
until: result.rc == 0
@@ -123,4 +126,3 @@
until: result.rc == 0
retries: 10
delay: 5
-
diff --git a/deploy/adapters/ansible/roles/keystone/vars/main.yml b/deploy/adapters/ansible/roles/keystone/vars/main.yml
index 916d6608..5dbc5628 100644
--- a/deploy/adapters/ansible/roles/keystone/vars/main.yml
+++ b/deploy/adapters/ansible/roles/keystone/vars/main.yml
@@ -173,13 +173,6 @@ os_users:
tenant: service
tenant_description: "Service Tenant"
-# - user: congress
-# password: "{{ CONGRESS_PASS }}"
-# email: congress@admin.com
-# role: admin
-# tenant: service
-# tenant_description: "Service Tenant"
-
- user: demo
password: "{{ DEMO_PASS }}"
email: heat@demo.com
@@ -187,10 +180,9 @@ os_users:
tenant: demo
tenant_description: "Demo Tenant"
-# - user: swift
-# password: "{{ CINDER_PASS }}"
-# email: swift@admin.com
-# role: admin
-# tenant: service
-# tenant_description: "Service Tenant"
-
+# - user: congress
+# password: "{{ CONGRESS_PASS }}"
+# email: congress@admin.com
+# role: admin
+# tenant: service
+# tenant_description: "Service Tenant"
diff --git a/deploy/adapters/ansible/roles/kvmfornfv/defaults/Debian.yaml b/deploy/adapters/ansible/roles/kvmfornfv/defaults/Debian.yml
index eef684f4..d273808a 100644
--- a/deploy/adapters/ansible/roles/kvmfornfv/defaults/Debian.yaml
+++ b/deploy/adapters/ansible/roles/kvmfornfv/defaults/Debian.yml
@@ -13,4 +13,3 @@ packages:
- grub-pc
- zlib1g
- libglib2.0
-
diff --git a/deploy/adapters/ansible/roles/kvmfornfv/defaults/RedHat.yaml b/deploy/adapters/ansible/roles/kvmfornfv/defaults/RedHat.yml
index c462bde2..8e6551e8 100644
--- a/deploy/adapters/ansible/roles/kvmfornfv/defaults/RedHat.yaml
+++ b/deploy/adapters/ansible/roles/kvmfornfv/defaults/RedHat.yml
@@ -14,5 +14,3 @@ packages:
- gettext
- qemu-kvm
- grub2
-
-
diff --git a/deploy/adapters/ansible/roles/kvmfornfv/defaults/main.yml b/deploy/adapters/ansible/roles/kvmfornfv/defaults/main.yml
index e5343ccc..d5549581 100644
--- a/deploy/adapters/ansible/roles/kvmfornfv/defaults/main.yml
+++ b/deploy/adapters/ansible/roles/kvmfornfv/defaults/main.yml
@@ -13,4 +13,3 @@ kernel_path: "{{ path }}/kernel"
qemu_path: "{{ path }}/qemu"
qemu_build_dir: "{{ qemu_path }}/build"
package: kvmfornfv.tar.gz
-
diff --git a/deploy/adapters/ansible/roles/kvmfornfv/tasks/main.yml b/deploy/adapters/ansible/roles/kvmfornfv/tasks/main.yml
index 380882bb..a8b24b34 100644
--- a/deploy/adapters/ansible/roles/kvmfornfv/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/kvmfornfv/tasks/main.yml
@@ -12,7 +12,9 @@
register: http_server
- name: download kvmfornfv package file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/kvmfornfv/{{ package }}" dest=/tmp/{{ package }}
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/kvmfornfv/{{ package }}"
+ dest: /tmp/{{ package }}
- name: extract kvmfornfv files
command: su -s /bin/sh -c "tar xzf /tmp/{{ package }} -C /"
@@ -24,4 +26,3 @@
- name: Update grub on Ubuntu
shell: grub-mkconfig -o /boot/grub/grub.conf
when: ansible_os_family == 'Debian'
-
diff --git a/deploy/adapters/ansible/roles/memcached/vars/Debian.yml b/deploy/adapters/ansible/roles/memcached/vars/Debian.yml
index 277bf3ba..348a26b3 100644
--- a/deploy/adapters/ansible/roles/memcached/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/memcached/vars/Debian.yml
@@ -11,5 +11,3 @@ packages:
- python-memcache
services: []
-
-
diff --git a/deploy/adapters/ansible/roles/monitor/tasks/main.yml b/deploy/adapters/ansible/roles/monitor/tasks/main.yml
index b31b91e5..fc072585 100644
--- a/deploy/adapters/ansible/roles/monitor/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/monitor/tasks/main.yml
@@ -10,7 +10,10 @@
- include_vars: "{{ ansible_os_family }}.yml"
- name: copy service check file
- copy: src=check_{{ ansible_os_family }}_service.sh dest=/usr/local/bin/check_service.sh mode=0777
+ copy:
+ src: check_{{ ansible_os_family }}_service.sh
+ dest: /usr/local/bin/check_service.sh
+ mode: 0777
- name: copy cron file
copy: src=root dest={{ cron_path }}/root mode=0600
@@ -19,4 +22,3 @@
service: name={{ cron }} state=restarted
- meta: flush_handlers
-
diff --git a/deploy/adapters/ansible/roles/monitor/vars/Debian.yml b/deploy/adapters/ansible/roles/monitor/vars/Debian.yml
index 225a1493..405ab453 100644
--- a/deploy/adapters/ansible/roles/monitor/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/monitor/vars/Debian.yml
@@ -9,4 +9,3 @@
---
cron: cron
cron_path: "/var/spool/cron/crontabs"
-
diff --git a/deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml b/deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml
index e4142b5f..c2ca2fcf 100644
--- a/deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml
+++ b/deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml
@@ -14,7 +14,3 @@
service: name={{ item }} state=restarted enabled=yes
with_items:
- nova-compute
-
-#- name: restart swift task
-# shell: swift-init all start
-# ignore_errors: True
diff --git a/deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml b/deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml
index 95dd2e89..ad030bda 100644
--- a/deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml
+++ b/deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml
@@ -8,7 +8,7 @@
##############################################################################
---
# install all packages
-- name: install keystone packages
+- name: install unzip packages
shell: apt-get install -y python-pip unzip
# download master.zip
@@ -17,17 +17,22 @@
register: http_server
- name: download keystone-moon packages
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/moon/master.zip" dest=/tmp/master.zip mode=0444
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/moon/master.zip"
+ dest: /tmp/master.zip
+ mode: 0444
- name: extract keystone-moon packages
unarchive: src=/tmp/master.zip dest=/tmp copy=no
# install all dependencies
- name: copy scripts
- copy: src=get_deb_depends.py dest=/tmp/get_deb_depends.py
+ copy: src=get_deb_depends.py dest=/tmp/get_deb_depends.py
- name: install keystone-moon dependencies
- shell: "apt-get install `python /tmp/get_deb_depends.py /tmp/moon-bin-master/*.deb`"
+ shell: |
+ apt-get install \
+ $(python /tmp/get_deb_depends.py /tmp/moon-bin-master/*.deb)
when: ansible_os_family == "Debian"
- name: delete configuration file
@@ -37,18 +42,18 @@
# install keystone moon
- name: copy scripts
- copy: src=deb.conf dest=/tmp/deb.conf
+ copy: src=deb.conf dest=/tmp/deb.conf
- name: install keystone moon
shell: >
export DEBIAN_FRONTEND="noninteractive";
sudo -E dpkg -i /tmp/moon-bin-master/*moon*.deb;
-#- name: install keystone moon
-# shell: >
-# export DEBIAN_FRONTEND="noninteractive";
-# sudo -E debconf-set-selections python-keystone < /tmp/deb.conf;
-# sudo -E dpkg -i /tmp/moon-bin-master/*moon*.deb;
+# - name: install keystone moon
+# shell: >
+# export DEBIAN_FRONTEND="noninteractive";
+# sudo -E debconf-set-selections python-keystone < /tmp/deb.conf;
+# sudo -E dpkg -i /tmp/moon-bin-master/*moon*.deb;
- name: stop keystone task
shell: >
@@ -63,20 +68,20 @@
path: /var/lib/keystone/keystone.db
state: absent
-#- name: update keystone conf
-# template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
+# - name: update keystone conf
+# template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
-#- name: assure listen port exist
-# lineinfile:
-# dest: '{{ apache_config_dir }}/ports.conf'
-# regexp: '{{ item.regexp }}'
-# line: '{{ item.line}}'
-# with_items:
-# - regexp: "^Listen {{ internal_ip }}:5000"
-# line: "Listen {{ internal_ip }}:5000"
-# - regexp: "^Listen {{ internal_ip }}:35357"
-# line: "Listen {{ internal_ip }}:35357"
+# - name: assure listen port exist
+# lineinfile:
+# dest: '{{ apache_config_dir }}/ports.conf'
+# regexp: '{{ item.regexp }}'
+# line: '{{ item.line}}'
+# with_items:
+# - regexp: "^Listen {{ internal_ip }}:5000"
+# line: "Listen {{ internal_ip }}:5000"
+# - regexp: "^Listen {{ internal_ip }}:35357"
+# line: "Listen {{ internal_ip }}:35357"
- name: update apache2 configs
template:
@@ -91,27 +96,32 @@
state: "link"
when: ansible_os_family == 'Debian'
-#- name: keystone source files
-# template: src={{ item }} dest=/opt/{{ item }}
-# with_items:
-# - admin-openrc.sh
-# - demo-openrc.sh
+# - name: keystone source files
+# template: src={{ item }} dest=/opt/{{ item }}
+# with_items:
+# - admin-openrc.sh
+# - demo-openrc.sh
# keystone paste ini
-- name: keystone paste ini 1
- shell: sudo cp /etc/keystone/keystone-paste.ini /etc/keystone/keystone-paste.ini.bak;
-
-- name: keystone paste ini 2
- shell: sudo sed "3i[pipeline:moon_pipeline]\npipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension moon_service\n\n[app:moon_service]\nuse = egg:keystone#moon_service\n" /etc/keystone/keystone-paste.ini > /tmp/keystone-paste.ini;
-
-- name: keystone paste ini 3
- shell: sudo cp /tmp/keystone-paste.ini /etc/keystone/keystone-paste.ini;
-
-- name: keystone paste ini 4
- shell: sudo sed "s/use = egg:Paste#urlmap/use = egg:Paste#urlmap\n\/moon = moon_pipeline/" /etc/keystone/keystone-paste.ini > /tmp/keystone-paste.ini;
+- name: backup keystone-paste.ini
+ shell: >
+ cp /etc/keystone/keystone-paste.ini /etc/keystone/keystone-paste.ini.bak;
-- name: keystone paste ini 5
- shell: sudo cp /tmp/keystone-paste.ini /etc/keystone/keystone-paste.ini;
+- name: config keystone-paste.ini
+ shell: >
+ sed -i "3i[pipeline:moon_pipeline]\n" /etc/keystone/keystone-paste.ini;
+ sed -i "5i[app:moon_service]\nuse = egg:keystone#moon_service\n" \
+ /etc/keystone/keystone-paste.ini;
+ sed -i "s/use = egg:Paste#urlmap/use = egg:Paste#urlmap\n\/moon = moon_pipeline/" \
+ /etc/keystone/keystone-paste.ini;
+
+- name: config keystone-paste.ini
+ blockinfile:
+ dest: /etc/keystone/keystone-paste.ini
+ insertafter: "pipeline:moon_pipeline"
+ block: >
+ pipeline = sizelimit url_normalize request_id build_auth_context
+ token_auth admin_token_auth json_body ec2_extension_v3 s3_extension moon_service
# moon log
- name: moon log
@@ -139,21 +149,14 @@
sudo /usr/bin/keystone-manage db_sync --extension moon;
when: inventory_hostname == haproxy_hosts.keys()[0]
-
-#############################################
- name: wait for keystone ready
wait_for: port=35357 delay=3 timeout=10 host={{ internal_ip }}
-#- name: cron job to purge expired tokens hourly
-# cron:
-# name: 'purge expired tokens'
-# special_time: hourly
-# job: '/usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1'
-
-#############################################
# moon workaround
- name: copy scripts
- copy: src=controllers.py dest=/usr/lib/python2.7/dist-packages/keystone/contrib/moon/controllers.py
+ copy:
+ src: controllers.py
+ dest: /usr/lib/python2.7/dist-packages/keystone/contrib/moon/controllers.py
# apache2 restart
- name: restart apache2
@@ -164,61 +167,55 @@
- name: install moon client
shell: sudo pip install /tmp/moon-bin-master/python-moonclient-0.1.tar.gz
-###################################################
-
-
-#- name: add tenants
-# keystone_user:
-# token: "{{ ADMIN_TOKEN }}"
-# endpoint: "http://{{ internal_ip }}:35357/v2.0"
-# tenant: "{{ item.tenant }}"
-# tenant_description: "{{ item.tenant_description }}"
-# with_items: "{{ os_users }}"
-# when: inventory_hostname == groups['controller'][0]
-#
-#- name: add users
-# keystone_user:
-# token: "{{ ADMIN_TOKEN }}"
-# endpoint: "http://{{ internal_ip }}:35357/v2.0"
-# user: "{{ item.user }}"
-# tenant: "{{ item.tenant }}"
-# password: "{{ item.password }}"
-# email: "{{ item.email }}"
-# with_items: "{{ os_users }}"
-# when: inventory_hostname == groups['controller'][0]
-#
-#- name: grant roles
-# keystone_user:
-# token: "{{ ADMIN_TOKEN }}"
-# endpoint: "http://{{ internal_ip }}:35357/v2.0"
-# user: "{{ item.user }}"
-# role: "{{ item.role }}"
-# tenant: "{{ item.tenant }}"
-# with_items: "{{ os_users }}"
-# when: inventory_hostname == groups['controller'][0]
-#
-#- name: add endpoints
-# keystone_service:
-# token: "{{ ADMIN_TOKEN }}"
-# endpoint: "http://{{ internal_ip }}:35357/v2.0"
-# name: "{{ item.name }}"
-# type: "{{ item.type }}"
-# region: "{{ item.region}}"
-# description: "{{ item.description }}"
-# publicurl: "{{ item.publicurl }}"
-# internalurl: "{{ item.internalurl }}"
-# adminurl: "{{ item.adminurl }}"
-# with_items: "{{ os_services }}"
-# when: inventory_hostname == groups['controller'][0]
-
-
-###################################################
+# - name: add tenants
+# keystone_user:
+# token: "{{ ADMIN_TOKEN }}"
+# endpoint: "http://{{ internal_ip }}:35357/v2.0"
+# tenant: "{{ item.tenant }}"
+# tenant_description: "{{ item.tenant_description }}"
+# with_items: "{{ os_users }}"
+# when: inventory_hostname == groups['controller'][0]
+
+# - name: add users
+# keystone_user:
+# token: "{{ ADMIN_TOKEN }}"
+# endpoint: "http://{{ internal_ip }}:35357/v2.0"
+# user: "{{ item.user }}"
+# tenant: "{{ item.tenant }}"
+# password: "{{ item.password }}"
+# email: "{{ item.email }}"
+# with_items: "{{ os_users }}"
+# when: inventory_hostname == groups['controller'][0]
+
+# - name: grant roles
+# keystone_user:
+# token: "{{ ADMIN_TOKEN }}"
+# endpoint: "http://{{ internal_ip }}:35357/v2.0"
+# user: "{{ item.user }}"
+# role: "{{ item.role }}"
+# tenant: "{{ item.tenant }}"
+# with_items: "{{ os_users }}"
+# when: inventory_hostname == groups['controller'][0]
+
+# - name: add endpoints
+# keystone_service:
+# token: "{{ ADMIN_TOKEN }}"
+# endpoint: "http://{{ internal_ip }}:35357/v2.0"
+# name: "{{ item.name }}"
+# type: "{{ item.type }}"
+# region: "{{ item.region}}"
+# description: "{{ item.description }}"
+# publicurl: "{{ item.publicurl }}"
+# internalurl: "{{ item.internalurl }}"
+# adminurl: "{{ item.adminurl }}"
+# with_items: "{{ os_services }}"
+# when: inventory_hostname == groups['controller'][0]
- name: update api-paste.ini
template: src=api-paste.ini dest=/etc/nova/api-paste.ini backup=yes
-#- name: update proxy-server conf
-# template: src=proxy-server.conf dest=/etc/swift/proxy-server.conf backup=yes
+# - name: update proxy-server conf
+# template: src=proxy-server.conf dest=/etc/swift/proxy-server.conf backup=yes
# restart nova
- name: restart nova
@@ -230,9 +227,9 @@
- nova-consoleauth
- nova-scheduler
-# restart swift
-#- name: restart swift
-# service: name={{ item }} state=restarted enabled=yes
-# with_items:
-# - swift-proxy
-# - memcached
+# restart swift
+# - name: restart swift
+# service: name={{ item }} state=restarted enabled=yes
+# with_items:
+# - swift-proxy
+# - memcached
diff --git a/deploy/adapters/ansible/roles/moon/vars/main.yml b/deploy/adapters/ansible/roles/moon/vars/main.yml
index cff8c7c2..6793c189 100644
--- a/deploy/adapters/ansible/roles/moon/vars/main.yml
+++ b/deploy/adapters/ansible/roles/moon/vars/main.yml
@@ -60,22 +60,6 @@ os_services:
internalurl: "http://{{ internal_vip.ip }}:8042"
adminurl: "http://{{ internal_vip.ip }}:8042"
-# - name: cinder
-# type: volume
-# region: RegionOne
-# description: "OpenStack Block Storage"
-# publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s"
-# internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
-# adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
-#
-# - name: cinderv2
-# type: volumev2
-# region: RegionOne
-# description: "OpenStack Block Storage v2"
-# publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s"
-# internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
-# adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
-
- name: heat
type: orchestration
region: RegionOne
@@ -92,6 +76,22 @@ os_services:
internalurl: "http://{{ internal_vip.ip }}:8000/v1"
adminurl: "http://{{ internal_vip.ip }}:8000/v1"
+# - name: cinder
+# type: volume
+# region: RegionOne
+# description: "OpenStack Block Storage"
+# publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s"
+# internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
+# adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
+
+# - name: cinderv2
+# type: volumev2
+# region: RegionOne
+# description: "OpenStack Block Storage v2"
+# publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s"
+# internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
+# adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
+
# - name: swift
# type: object-store
# region: RegionOne
@@ -163,10 +163,3 @@ os_users:
role: heat_stack_user
tenant: demo
tenant_description: "Demo Tenant"
-
-# - user: swift
-# password: "{{ CINDER_PASS }}"
-# email: swift@admin.com
-# role: admin
-# tenant: service
-# tenant_description: "Service Tenant"
diff --git a/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_cluster.yml b/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_cluster.yml
index 50c062f5..b67df280 100644
--- a/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_cluster.yml
+++ b/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_cluster.yml
@@ -9,7 +9,8 @@
---
- name: check if i in the node list
shell: |
- rabbitmqctl -q cluster_status | grep '\[{nodes,'|grep {{ inventory_hostname }}
+ rabbitmqctl -q cluster_status | grep '\[{nodes,' \
+ | grep {{ inventory_hostname }}
changed_when: is_member.rc != 0
failed_when: false
register: is_member
diff --git a/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_install.yml b/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_install.yml
index d6f7759e..01a92efd 100755
--- a/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_install.yml
+++ b/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_install.yml
@@ -31,7 +31,7 @@
service:
name: rabbitmq-server
state: stopped
- enabled: yes
+ enabled: "yes"
- name: replace cookie
copy:
diff --git a/deploy/adapters/ansible/roles/mq/vars/Debian.yml b/deploy/adapters/ansible/roles/mq/vars/Debian.yml
index a8f73e11..b9f46bdf 100644
--- a/deploy/adapters/ansible/roles/mq/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/mq/vars/Debian.yml
@@ -9,4 +9,3 @@
---
services: []
packages: []
-
diff --git a/deploy/adapters/ansible/roles/mq/vars/RedHat.yml b/deploy/adapters/ansible/roles/mq/vars/RedHat.yml
index a8f73e11..b9f46bdf 100644
--- a/deploy/adapters/ansible/roles/mq/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/mq/vars/RedHat.yml
@@ -9,4 +9,3 @@
---
services: []
packages: []
-
diff --git a/deploy/adapters/ansible/roles/neutron-common/handlers/main.yml b/deploy/adapters/ansible/roles/neutron-common/handlers/main.yml
index a86a1456..fcb7e1cb 100644
--- a/deploy/adapters/ansible/roles/neutron-common/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-common/handlers/main.yml
@@ -8,4 +8,5 @@
##############################################################################
---
- name: restart neutron-plugin-openvswitch-agent
- service: name={{ neutron_plugin_openvswitch_agent_services }} state=restarted enabled=yes
+ service: name={{ neutron_plugin_openvswitch_agent_services }}
+ state=restarted enabled=yes
diff --git a/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml b/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
index 3e58cc38..2f3d6491 100644
--- a/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
@@ -40,14 +40,17 @@
- name: fix openstack neutron plugin config file
shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' \
+ /usr/lib/systemd/system/neutron-openvswitch-agent.service
systemctl daemon-reload
when: ansible_os_family == 'RedHat'
- name: fix openstack neutron plugin config file ubuntu
shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' \
+ /etc/init/neutron-openvswitch-agent.conf
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' \
+ /etc/init.d/neutron-openvswitch-agent
when: ansible_os_family == "Debian"
- name: generate neutron compute service list
@@ -60,11 +63,16 @@
backup=yes
- name: ln plugin.ini
- file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
+ file:
+ src: /etc/neutron/plugins/ml2/ml2_conf.ini
+ dest: /etc/neutron/plugin.ini
+ state: link
- name: config neutron
- template: src=neutron.conf
- dest=/etc/neutron/neutron.conf backup=yes
+ template:
+ src: neutron.conf
+ dest: /etc/neutron/neutron.conf
+ backup: "yes"
notify:
- restart neutron compute service
- restart nova-compute services
diff --git a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml
index 47345f7a..6953a17b 100644
--- a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml
+++ b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml
@@ -9,9 +9,11 @@
---
- name: neutron-db-manage upgrade
- shell: neutron-db-manage --config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini upgrade head
+ shell: |
+ neutron-db-manage --config-file=/etc/neutron/neutron.conf \
+ --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini upgrade head
register: result
- run_once: True
+ run_once: "True"
until: result.rc == 0
retries: 10
delay: 5
diff --git a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
index cc222224..63a80fa4 100644
--- a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
+++ b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
@@ -40,7 +40,13 @@
template: src=templates/neutron.conf dest=/etc/neutron/neutron.conf backup=yes
- name: update ml2 plugin conf
- template: src=templates/ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes
+ template:
+ src: templates/ml2_conf.ini
+ dest: /etc/neutron/plugins/ml2/ml2_conf.ini
+ backup: "yes"
- name: ln plugin.ini
- file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
+ file:
+ src: /etc/neutron/plugins/ml2/ml2_conf.ini
+ dest: /etc/neutron/plugin.ini
+ state: link
diff --git a/deploy/adapters/ansible/roles/neutron-controller/vars/main.yml b/deploy/adapters/ansible/roles/neutron-controller/vars/main.yml
index 928b0bda..aae4fe3c 100644
--- a/deploy/adapters/ansible/roles/neutron-controller/vars/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-controller/vars/main.yml
@@ -11,4 +11,3 @@ packages_noarch: []
services_noarch:
- neutron-server
-
diff --git a/deploy/adapters/ansible/roles/neutron-network/handlers/main.yml b/deploy/adapters/ansible/roles/neutron-network/handlers/main.yml
index a279b5d5..cd98581a 100644
--- a/deploy/adapters/ansible/roles/neutron-network/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-network/handlers/main.yml
@@ -24,8 +24,8 @@
- name: kill dnsmasq
command: killall dnsmasq
- ignore_errors: True
+ ignore_errors: "True"
- name: restart xorp
service: name=xorp state=restarted enabled=yes sleep=10
- ignore_errors: True
+ ignore_errors: "True"
diff --git a/deploy/adapters/ansible/roles/neutron-network/tasks/firewall.yml b/deploy/adapters/ansible/roles/neutron-network/tasks/firewall.yml
index 49b6a7f1..9aa24045 100755
--- a/deploy/adapters/ansible/roles/neutron-network/tasks/firewall.yml
+++ b/deploy/adapters/ansible/roles/neutron-network/tasks/firewall.yml
@@ -27,4 +27,5 @@
when: ansible_os_family == "Debian"
- name: update firewall related conf
- shell: crudini --set --list /etc/neutron/neutron.conf DEFAULT service_plugins firewall
+ shell: |
+ crudini --set --list /etc/neutron/neutron.conf DEFAULT service_plugins firewall
diff --git a/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml b/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
index 753d281a..1e204405 100644
--- a/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
@@ -54,14 +54,17 @@
- name: fix openstack neutron plugin config file
shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' \
+ /usr/lib/systemd/system/neutron-openvswitch-agent.service;
systemctl daemon-reload
when: ansible_os_family == 'RedHat'
- name: fix openstack neutron plugin config file ubuntu
shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' \
+ /etc/init/neutron-openvswitch-agent.con;
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' \
+ /etc/init.d/neutron-openvswitch-agent;
when: ansible_os_family == "Debian"
- name: config l3 agent
@@ -86,7 +89,10 @@
backup=yes
- name: ln plugin.ini
- file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
+ file:
+ src: /etc/neutron/plugins/ml2/ml2_conf.ini
+ dest: /etc/neutron/plugin.ini
+ state: link
- name: config neutron
template: src=templates/neutron.conf
diff --git a/deploy/adapters/ansible/roles/neutron-network/tasks/odl.yml b/deploy/adapters/ansible/roles/neutron-network/tasks/odl.yml
index dd1e4786..1105b45d 100644
--- a/deploy/adapters/ansible/roles/neutron-network/tasks/odl.yml
+++ b/deploy/adapters/ansible/roles/neutron-network/tasks/odl.yml
@@ -15,7 +15,11 @@
register: ovs_uuid
- name: set bridge_mappings
- command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:bridge_mappings=physnet1:{{ INTERFACE_NAME }}
+ command: |
+ ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} \
+ other_config:bridge_mappings=physnet1:{{ INTERFACE_NAME }}
- name: set local ip
- command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:local_ip={{ internal_ip }}
+ command: |
+ ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} \
+ other_config:local_ip={{ internal_ip }}
diff --git a/deploy/adapters/ansible/roles/neutron-network/tasks/vpn.yml b/deploy/adapters/ansible/roles/neutron-network/tasks/vpn.yml
index 925eb709..22f4411a 100755
--- a/deploy/adapters/ansible/roles/neutron-network/tasks/vpn.yml
+++ b/deploy/adapters/ansible/roles/neutron-network/tasks/vpn.yml
@@ -27,9 +27,12 @@
when: ansible_os_family == "Debian"
- name: update vpn related conf
- shell: crudini --set /etc/neutron/l3_agent.ini vpnagent vpn_device_driver neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec.StrongSwanDriver;
- crudini --set --list /etc/neutron/neutron.conf DEFAULT service_plugins vpnaas
- crudini --set /etc/neutron/neutron_vpnaas.conf service_providers service_provider 'VPN:strongswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default'
+ shell: |
+ crudini --set /etc/neutron/l3_agent.ini vpnagent vpn_device_driver \
+ neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec.StrongSwanDriver;
+ crudini --set --list /etc/neutron/neutron.conf DEFAULT service_plugins vpnaas;
+ crudini --set /etc/neutron/neutron_vpnaas.conf service_providers service_provider \
+ 'VPN:strongswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default'
- name: make sure rootwrap.d dir exist
file: path=/etc/neutron/rootwrap.d state=directory mode=0755
@@ -44,4 +47,3 @@
- strongswan
notify:
- restart vpn agent service
-
diff --git a/deploy/adapters/ansible/roles/neutron-network/vars/RedHat.yml b/deploy/adapters/ansible/roles/neutron-network/vars/RedHat.yml
index aa35dde6..1d0243a4 100644
--- a/deploy/adapters/ansible/roles/neutron-network/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/neutron-network/vars/RedHat.yml
@@ -26,4 +26,3 @@ openvswitch_agent: neutron-openvswitch-agent
xorp_packages:
- openssl098e
- #- xorp
diff --git a/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml b/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
index cdbb6472..321edaab 100644
--- a/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
@@ -45,7 +45,7 @@
- name: generate neutron control service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: "{{ services | union(services_noarch) }}"
-#'
+
- name: remove nova sqlite db
shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed
diff --git a/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml b/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml
index f332c97a..f3c4687d 100644
--- a/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml
+++ b/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml
@@ -9,7 +9,7 @@
---
- name: nova api db sync
shell: su -s /bin/sh -c "nova-manage api_db sync" nova
- ignore_errors: True
+ ignore_errors: "True"
notify:
- restart nova service
diff --git a/deploy/adapters/ansible/roles/odl_cluster/handlers/main.yml b/deploy/adapters/ansible/roles/odl_cluster/handlers/main.yml
index 17b8c113..2650d072 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/handlers/main.yml
@@ -9,4 +9,3 @@
---
- name: restart odl service
service: name=opendaylight state=restarted
- #command: su -s /bin/sh -c "{{ odl_home }}/bin/stop;{{ odl_home }}/bin/start;"
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_00_download_packages.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_00_download_packages.yml
index efd359db..ebca876a 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_00_download_packages.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_00_download_packages.yml
@@ -13,22 +13,21 @@
register: http_server
- name: download oracle-jdk8 package file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }}
-
-#"
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}"
+ dest: /opt/{{ jdk8_pkg_name }}
- name: download oracle-jdk8 script file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/
-
-#"
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}"
+ dest: /opt/
- name: download odl package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/odl/{{ odl_pkg_url }}" dest=/opt/{{ odl_pkg_name }}
-
-# "
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/odl/{{ odl_pkg_url }}"
+ dest: /opt/{{ odl_pkg_name }}
- name: download odl pip package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/pip/{{ networking_odl_pkg_name }}" dest=/opt/{{ networking_odl_pkg_name }}
-
-#"
-
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/pip/{{ networking_odl_pkg_name }}"
+ dest: /opt/{{ networking_odl_pkg_name }}
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_02_unarchive_odl_and_jdk.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_02_unarchive_odl_and_jdk.yml
index ff82eba1..816e435a 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_02_unarchive_odl_and_jdk.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_02_unarchive_odl_and_jdk.yml
@@ -15,4 +15,6 @@
command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh"
- name: extract odl package
- command: su -s /bin/sh -c "tar xzf /opt/{{ odl_pkg_name }} -C {{ odl_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" odl
+ command: |
+ su -s /bin/sh -c "tar xzf /opt/{{ odl_pkg_name }} -C {{ odl_home }} \
+ --strip-components 1 --no-overwrite-dir -k --skip-old-files" odl
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml
index 8d71606f..f4e87955 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml
@@ -49,5 +49,3 @@
template:
src: jetty.xml
dest: "{{ odl_home }}/etc/jetty.xml"
-
-
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml
index f44b373b..7fc10e7d 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml
@@ -8,7 +8,7 @@
##############################################################################
---
-- name: turn off neutron-server neutron-plugins-openvswitch-agent Daemon on control node
+- name: turn off neutron server and agent Daemon on control node
shell: >
sed -i '/{{ service_ovs_agent_name }}/d' /opt/service ;
sed -i '/neutron-server/d' /opt/service;
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_07_start_check_odl.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_07_start_check_odl.yml
index 32fb885a..ee8c3585 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_07_start_check_odl.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_07_start_check_odl.yml
@@ -26,4 +26,6 @@
when: ansible_os_family == "RedHat"
- name: check if opendaylight running
- shell: netstat -lpen --tcp | grep java | grep 6653; while [ $? -ne 0 ]; do sleep 10; netstat -lpen --tcp | grep java | grep 6653; done
+ shell: |
+ netstat -lpen --tcp | grep java | grep 6653;
+ while [ $? -ne 0 ]; do sleep 10; netstat -lpen --tcp | grep java grep 6653; done
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_08_configure_neutron.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_08_configure_neutron.yml
index abb91c35..12c1fd25 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_08_configure_neutron.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_08_configure_neutron.yml
@@ -9,26 +9,39 @@
---
- name: configure l2 configuration
- shell: crudini --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge br-prv;
+ shell: |
+ crudini --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge \
+ br-prv;
when: odl_l3_agent == "Disable"
- name: configure l3 configuration
- shell: crudini --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge br-ex;
+ shell: |
+ crudini --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge \
+ br-ex;
when: odl_l3_agent == "Enable"
- name: configure odl l3 driver
- shell: crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin;
+ shell: |
+ crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins \
+ networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin;
when: odl_l3_agent == "Enable"
- name: configure metadata for l3 configuration
- shell: crudini --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata True;
+ shell: |
+ crudini --set /etc/neutron/dhcp_agent.ini DEFAULT \
+ enable_isolated_metadata "True";
when: odl_l3_agent == "Enable"
- name: drop and recreate neutron database
- shell: mysql -e "drop database if exists neutron;";
- mysql -e "create database neutron character set utf8;";
- mysql -e "grant all on neutron.* to 'neutron'@'%' identified by '{{ NEUTRON_DBPASS }}';";
- su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron;
+ shell: |
+ mysql -e "drop database if exists neutron;";
+ mysql -e "create database neutron character set utf8;";
+ mysql -e "grant all on neutron.* to 'neutron'@'%' identified by \
+ '{{ NEUTRON_DBPASS }}';";
+ su -s /bin/sh -c "neutron-db-manage \
+ --config-file /etc/neutron/neutron.conf \
+ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini \
+ upgrade head" neutron;
when: inventory_hostname == haproxy_hosts.keys()[0]
tags:
- test_odl
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_00_switch_off_neutron_openvswitch_agent.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_00_switch_off_neutron_openvswitch_agent.yml
index 50359df4..08f8a07c 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_00_switch_off_neutron_openvswitch_agent.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_00_switch_off_neutron_openvswitch_agent.yml
@@ -16,6 +16,8 @@
- name: remove Neutron's openvswitch agent services
shell: >
update-rc.d -f {{ service_ovs_agent_name }} remove;
- mv /etc/init.d/{{ service_ovs_agent_name }} /home/{{ service_ovs_agent_name }};
- mv /etc/init/{{ service_ovs_agent_name }}.conf /home/{{ service_ovs_agent_name }}.conf;
+ mv /etc/init.d/{{ service_ovs_agent_name }} \
+ /home/{{ service_ovs_agent_name }};
+ mv /etc/init/{{ service_ovs_agent_name }}.conf \
+ /home/{{ service_ovs_agent_name }}.conf;
when: ansible_os_family == "Debian"
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
index 04f0ec61..f060e7bb 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
@@ -11,20 +11,23 @@
- name: restart keepalived to recover external IP before check br-int
shell: service keepalived restart
when: inventory_hostname in groups['odl']
- ignore_errors: True
+ ignore_errors: "True"
-- name: restart opendaylight (for newton, opendaylight doesn't listen 6640 port, need restart)
+- name: restart opendaylight
shell: service opendaylight restart; sleep 60
when: inventory_hostname in groups['odl']
- ignore_errors: True
+ ignore_errors: "True"
- name: set opendaylight as the manager
- command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ internal_vip.ip }}:6640;"
+ command: |
+ su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ internal_vip.ip }}:6640;"
- name: check br-int
- shell: ovs-vsctl list-br | grep br-int; while [ $? -ne 0 ]; do sleep 10; ovs-vsctl list-br | grep br-int; done
+ shell: |
+ ovs-vsctl list-br | grep br-int; while [ $? -ne 0 ]; do sleep 10; \
+ ovs-vsctl list-br | grep br-int; done
- name: set local ip in openvswitch
- shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config={'local_ip'=' {{ internal_ip }} '};
-
-#'
+ shell: |
+ ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) \
+ other_config={'local_ip'=' {{ internal_ip }} '};
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_03_00_recover_external_network_l3.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_03_00_recover_external_network_l3.yml
index 7cf590a9..f315ebbf 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_03_00_recover_external_network_l3.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_03_00_recover_external_network_l3.yml
@@ -8,7 +8,9 @@
##############################################################################
---
- name: check br-ex
- shell: ovs-vsctl list-br | grep br-ex; while [ $? -ne 0 ]; do sleep 10; ovs-vsctl list-br | grep br-ex; done
+ shell: |
+ ovs-vsctl list-br | grep br-ex;
+ while [ $? -ne 0 ]; do sleep 10; ovs-vsctl list-br | grep br-ex; done
- name: add ovs uplink
openvswitch_port: bridge=br-ex port={{ item["interface"] }} state=present
@@ -19,12 +21,14 @@
shell: sleep 10
- name: set external nic in openvswitch
- shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config:provider_mappings=br-ex:{{ item["interface"] }}
+ shell: |
+ ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) \
+ other_config:provider_mappings=br-ex:{{ item["interface"] }}
with_items: "{{ network_cfg['provider_net_mappings'] }}"
when: item["type"] == "ovs"
- name: copy recovery script
- copy: src={{ item }} dest=/opt/setup_networks
+ copy: src={{ item }} dest=/opt/setup_networks
with_items:
- recover_network_odl_l3.py
- setup_networks_odl_l3.py
@@ -37,4 +41,5 @@
when: inventory_hostname in groups['odl']
- name: modify net-init
- shell: sed -i 's/setup_networks.py/setup_networks_odl_l3.py/g' /etc/init.d/net_init
+ shell: sed -i 's/setup_networks.py/setup_networks_odl_l3.py/g' \
+ /etc/init.d/net_init
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_03_01_recover_external_network_l2.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_03_01_recover_external_network_l2.yml
index ad204cf9..54630898 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_03_01_recover_external_network_l2.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_03_01_recover_external_network_l2.yml
@@ -13,12 +13,13 @@
when: item["type"] == "ovs"
- name: add ovs uplink
- openvswitch_port: bridge={{ item["name"] }} port={{ item["interface"] }} state=present
+ openvswitch_port: bridge={{ item["name"] }} port={{ item["interface"] }}
+ state=present
with_items: "{{ network_cfg['provider_net_mappings'] }}"
when: item["type"] == "ovs"
- name: copy recovery script
- copy: src={{ item }} dest=/opt/setup_networks
+ copy: src={{ item }} dest=/opt/setup_networks
with_items:
- recover_network.py
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_03_recover_external_network.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_03_recover_external_network.yml
index 40963260..bffaf79f 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_03_recover_external_network.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_03_recover_external_network.yml
@@ -19,4 +19,4 @@
- name: restart keepalived to recover external IP
shell: service keepalived restart
when: inventory_hostname in groups['odl']
- ignore_errors: True
+ ignore_errors: "True"
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_04_setup_ml2.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_04_setup_ml2.yml
index 24149735..ae0dd6cc 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_04_setup_ml2.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_04_setup_ml2.yml
@@ -10,9 +10,12 @@
- name: configure opendaylight -> ml2
shell: >
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs enable_tunneling True;
+ crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 \
+ mechanism_drivers opendaylight;
+ crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 \
+ tenant_network_types vxlan;
+ crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs \
+ enable_tunneling "True";
- name: copy ml2 configuration script
template:
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/04_odl_l3_nova.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/04_odl_l3_nova.yml
index 4e53f4aa..8a6435b5 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/04_odl_l3_nova.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/04_odl_l3_nova.yml
@@ -12,4 +12,4 @@
shell: crudini --set /etc/nova/nova.conf DEFAULT force_config_drive true
- name: restart all nova service
- shell: for i in `cat /opt/service | grep nova` ; do service $i restart; done
+ shell: for i in `cat /opt/service | grep nova` ; do service $i restart; done
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml
index 7eddf7fa..c9d93709 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml
@@ -12,23 +12,23 @@
shell: rm -f /opt/cluster; touch /opt/cluster;
- name: combine odl controller
- shell: echo "{{ ip_settings[item.1]['mgmt']['ip'] }} \c" >> /opt/cluster; >> /opt/cluster;
+ shell: |
+ echo "{{ ip_settings[item.1]['mgmt']['ip'] }} \c" >> /opt/cluster; \
+ >> /opt/cluster;
with_indexed_items: groups['odl']
- name: combine odl controller
shell: cat /opt/cluster
register: cluster
-#- debug: msg="{{ cluster.stdout_lines[0] }}"
-
- name: combine odl controller
shell: uname -n | cut -b 5,5
register: number
-#- debug: msg="{{ number.stdout_lines[0] }}"
-
-- debug: msg="{{ odl_home }}/bin/configure_cluster.sh {{ number.stdout_lines[0] }} {{ cluster.stdout_lines[0] }}"
+- debug: msg="{{ odl_home }}/bin/configure_cluster.sh
+ {{ number.stdout_lines[0] }} {{ cluster.stdout_lines[0] }}"
- name: configure odl controller in cluster
- shell: "{{ odl_home }}/bin/configure_cluster.sh {{ number.stdout_lines[0] }} {{ cluster.stdout_lines[0] }}"
-
+ shell: |
+ "{{ odl_home }}/bin/configure_cluster.sh {{ number.stdout_lines[0] }} \
+ {{ cluster.stdout_lines[0] }}"
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/moon-odl.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/moon-odl.yml
index b89b2823..ec05f6ce 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/moon-odl.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/moon-odl.yml
@@ -19,13 +19,18 @@
shell: rm -rf {{ odl_home }}/system/org/opendaylight/aaa/
- name: download apache maven package file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/moon/apache-maven-3.3.9-bin.tar.gz" dest=/opt/apache-maven-3.3.9-bin.tar.gz
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/moon/apache-maven-3.3.9-bin.tar.gz"
+ dest: /opt/apache-maven-3.3.9-bin.tar.gz
- name: create maven folder
shell: mkdir -p /opt/apache-maven-3.3.9/
- name: extract maven
- command: su -s /bin/sh -c "tar zxf /opt/apache-maven-3.3.9-bin.tar.gz -C /opt/apache-maven-3.3.9/ --strip-components 1 --no-overwrite-dir -k --skip-old-files" root
+ command: |
+ su -s /bin/sh -c "tar zxf /opt/apache-maven-3.3.9-bin.tar.gz -C \
+ /opt/apache-maven-3.3.9/ --strip-components 1 --no-overwrite-dir -k \
+ --skip-old-files" root
- name: install maven
shell: ln -s /opt/apache-maven-3.3.9/bin/mvn /usr/local/bin/mvn;
@@ -36,18 +41,17 @@
- name: copy settings.xml
template: src=settings.xml dest=/root/.m2/settings.xml
-#- name: upload swift lib
-# unarchive: src=odl-aaa-moon.tar.gz dest=/home/
-
- name: download odl-aaa-moon package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/moon/{{ odl_aaa_moon }}" dest=/home/
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/moon/{{ odl_aaa_moon }}"
+ dest: /home/
- name: unarchive odl-aaa-moon package
command: su -s /bin/sh -c "tar xvf /home/{{ odl_aaa_moon }} -C /home/"
- name: install aaa
shell: >
- export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/usr/lib/jvm/java-8-oracle/bin:/opt/apache-maven-3.3.3/bin";
+ export PATH=$PATH:/usr/lib/jvm/java-8-oracle/bin:/opt/apache-maven-3.3.3/bin;
export JAVA_HOME="/usr/lib/jvm/java-8-oracle";
export _JAVA_OPTIONS="-Djava.net.preferIPv4Stack=true";
export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=512m";
diff --git a/deploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml b/deploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml
index 640a264a..46a0d6fb 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml
@@ -15,7 +15,7 @@ service_ovs_name: openvswitch-switch
service_ovs_agent_name: neutron-openvswitch-agent
service_file:
- src: opendaylight.service
- dst: /lib/systemd/system/opendaylight.service
+ src: opendaylight.service
+ dst: /lib/systemd/system/opendaylight.service
networking_odl_pkg_name: networking-odl-2.0.0.tar.gz
diff --git a/deploy/adapters/ansible/roles/odl_cluster/vars/RedHat.yml b/deploy/adapters/ansible/roles/odl_cluster/vars/RedHat.yml
index 1adadd5d..ef92dff6 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/vars/RedHat.yml
@@ -15,5 +15,5 @@ service_ovs_name: openvswitch
service_ovs_agent_name: neutron-openvswitch-agent
service_file:
- src: opendaylight
- dst: /etc/init.d/opendaylight
+ src: opendaylight
+ dst: /etc/init.d/opendaylight
diff --git a/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml b/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml
index e5f52b42..88d4d07d 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml
@@ -11,12 +11,38 @@ odl_username: admin
odl_password: admin
odl_api_port: 8181
-#odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.3.0-Lithium/distribution-karaf-0.3.0-Lithium.tar.gz
+# odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/
+# opendaylight/integration/distribution-karaf/0.3.0-Lithium/
+# distribution-karaf-0.3.0-Lithium.tar.gz
odl_pkg_url: karaf.tar.gz
odl_pkg_name: karaf.tar.gz
odl_home: "/opt/opendaylight-0.3.0/"
-odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'odl-restconf','odl-l2switch-switch','odl-openflowplugin-all','odl-mdsal-apidocs','odl-dlux-all','odl-adsal-northbound','odl-nsf-all','odl-ovsdb-openstack','odl-ovsdb-northbound','odl-dlux-core']
-odl_extra_features: ['odl-restconf-all','odl-mdsal-clustering','odl-openflowplugin-flow-services','http','jolokia-osgi']
+odl_base_features:
+ - config
+ - standard
+ - region
+ - package
+ - kar
+ - ssh
+ - management
+ - odl-restconf
+ - odl-l2switch-switch
+ - odl-openflowplugin-all
+ - odl-mdsal-apidocs
+ - odl-dlux-all
+ - odl-adsal-northbound
+ - odl-nsf-all
+ - odl-ovsdb-openstack
+ - odl-ovsdb-northbound
+ - odl-dlux-core
+
+odl_extra_features:
+ - odl-restconf-all
+ - odl-mdsal-clustering
+ - odl-openflowplugin-flow-services
+ - http
+ - jolokia-osgi
+
odl_features: "{{ odl_base_features + odl_extra_features }}"
odl_aaa_moon: odl-aaa-moon.tar.gz
@@ -28,4 +54,3 @@ common_packages_noarch: []
odl_pip:
- networking_odl
-
diff --git a/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml b/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml
index c8ce1155..37f689f6 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml
@@ -14,24 +14,24 @@
update-rc.d neutron-plugin-openvswitch-agent remove;
sed -i /neutron-plugin-openvswitch-agent/d /opt/service
when: groups['onos']|length !=0
- ignore_errors: True
+ ignore_errors: "True"
- name: shut down and disable Neutron's agent services
service: name=neutron-plugin-openvswitch-agent state=stopped
when: groups['onos']|length !=0
- ignore_errors: True
+ ignore_errors: "True"
- name: remove neutron-l3-agent auto start
shell: >
update-rc.d neutron-l3-agent remove;
sed -i /neutron-l3-agent/d /opt/service
when: inventory_hostname in groups['onos']
- ignore_errors: True
+ ignore_errors: "True"
- name: shut down and disable Neutron's l3 agent services
service: name=neutron-l3-agent state=stopped
when: inventory_hostname in groups['onos']
- ignore_errors: True
+ ignore_errors: "True"
- name: Stop the Open vSwitch service and clear existing OVSDB
shell: >
@@ -40,7 +40,7 @@
ovs-vsctl del-manager ;
ip link delete onos_port1 type veth peer name onos_port2;
when: groups['onos']|length !=0
- ignore_errors: True
+ ignore_errors: "True"
- name: Install ONOS Cluster on Controller
include: onos_controller.yml
diff --git a/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml b/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml
index d51151a9..1f7a0e76 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml
@@ -13,10 +13,14 @@
register: http_server
- name: download onos driver packages
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_driver }}" dest=/opt/
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_driver }}"
+ dest: /opt/
- name: upload onos sfc driver package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_sfc_driver }}" dest=/opt/
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_sfc_driver }}"
+ dest: /opt/
- name: unarchive onos driver package
command: su -s /bin/sh -c "tar xvf /opt/networking-onos.tar -C /opt/"
@@ -33,24 +37,30 @@
- name: install onos required packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
with_items: packages
-
+
- name: download oracle-jdk8 package file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }}
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}"
+ dest: /opt/{{ jdk8_pkg_name }}
- name: download oracle-jdk8 script file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}"
+ dest: /opt/
- name: unarchive onos driver package
command: su -s /bin/sh -c "tar xvf /opt/install_jdk8.tar -C /opt/"
-
+
- name: install install_jdk8 package
- command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh"
-
+ command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh"
+
- name: create JAVA_HOME environment variable
shell: >
export J2SDKDIR=/usr/lib/jvm/java-8-oracle;
export J2REDIR=/usr/lib/jvm/java-8-oracle/jre;
- export PATH=$PATH:/usr/lib/jvm/java-8-oracle/bin:/usr/lib/jvm/java-8-oracle/db/bin:/usr/lib/jvm/java-8-oracle/jre/bin;
+ export PATH=$PATH:/usr/lib/jvm/java-8-oracle/bin;
+ export PATH=$PATH:/usr/lib/jvm/java-8-oracle/db/bin;
+ export PATH=$PATH:/usr/lib/jvm/java-8-oracle/jre/bin;
export JAVA_HOME=/usr/lib/jvm/java-8-oracle;
export DERBY_HOME=/usr/lib/jvm/java-8-oracle/db;
@@ -67,20 +77,26 @@
shell: "/bin/false"
- name: download onos package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_pkg_name }}" dest=/opt/{{ onos_pkg_name }}
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_pkg_name }}"
+ dest: /opt/{{ onos_pkg_name }}
- name: create new jar repository
command: su -s /bin/sh -c "mkdir ~/.m2"
- ignore_errors: True
+ ignore_errors: "True"
- name: download jar repository
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ repository }}" dest=~/.m2/
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ repository }}"
+ dest: ~/.m2/
- name: extract jar repository
command: su -s /bin/sh -c "tar xvf ~/.m2/repository.tar -C ~/.m2/"
- name: extract onos package
- command: su -s /bin/sh -c "tar xzf /opt/{{ onos_pkg_name }} -C {{ onos_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" onos
+ command: |
+ su -s /bin/sh -c "tar xzf /opt/{{ onos_pkg_name }} -C {{ onos_home }} \
+ --strip-components 1 --no-overwrite-dir -k --skip-old-files" onos
- name: configure onos service
shell: >
@@ -88,13 +104,15 @@
echo 'export ONOS_USER=root' >> {{ onos_home }}/options;
mkdir {{ onos_home }}/var;
mkdir {{ onos_home }}/config;
- sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' {{ onos_home }}/init/onos.conf;
+ sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' \
+ {{ onos_home }}/init/onos.conf;
cp -rf {{ onos_home }}/init/onos.conf /etc/init/;
cp -rf {{ onos_home }}/init/onos.conf /etc/init.d/;
-
+
- name: configure onos boot feature
shell: >
- sed -i '/^featuresBoot=/c\featuresBoot={{ onos_boot_features }}' {{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg;
+ sed -i '/^featuresBoot=/c\featuresBoot={{ onos_boot_features }}' \
+ {{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg;
- name: wait for config time
shell: "sleep 10"
@@ -109,14 +127,15 @@
shell: >
echo "onos">>/opt/service
-##########################################################################################################
-################################ ONOS connect with OpenStack ################################
-##########################################################################################################
- name: Configure Neutron1
shell: >
- crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_sfc.services.sfc.plugin.SfcPlugin, networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin, onos_router;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers onos_ml2;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
+ crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins \
+ networking_sfc.services.sfc.plugin.SfcPlugin, \
+ networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin, onos_router;
+ crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 \
+ mechanism_drivers onos_ml2;
+ crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 \
+ tenant_network_types vxlan;
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers vxlan
- name: Create ML2 Configuration File
@@ -133,8 +152,11 @@
mysql -e "drop database if exists neutron_ml2;";
mysql -e "create database neutron_ml2 character set utf8;";
mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';";
- su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron;
- su -s /bin/sh -c "neutron-db-manage --subproject networking-sfc upgrade head" neutron;
+ su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
+ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" \
+ neutron;
+ su -s /bin/sh -c "neutron-db-manage --subproject networking-sfc \
+ upgrade head" neutron;
- name: Restart neutron-server
service: name=neutron-server state=restarted
diff --git a/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml
index aac787ea..4dfa6d45 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml
@@ -9,49 +9,62 @@
---
- name: set veth port
- shell: >
+ shell: >
ip link add onos_port1 type veth peer name onos_port2;
ifconfig onos_port1 up;
ifconfig onos_port2 up;
- ignore_errors: True
+ ignore_errors: "True"
- name: add openflow-base feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow-base'";
+ command: |
+ su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow-base'";
when: inventory_hostname in groups['onos']
- name: add openflow feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow'";
+ command: |
+ su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow'";
when: inventory_hostname in groups['onos']
- name: add ovsdatabase feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdatabase'";
+ command: |
+ su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdatabase'";
when: inventory_hostname in groups['onos']
- name: add ovsdb-base feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-base'";
+ command: |
+ su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-base'";
when: inventory_hostname in groups['onos']
- name: add onos driver ovsdb feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-drivers-ovsdb'";
+ command: |
+ su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-drivers-ovsdb'";
when: inventory_hostname in groups['onos']
- name: add ovsdb provider host feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-provider-host'";
+ command: |
+ su -s /bin/sh -c \
+ "/opt/onos/bin/onos 'feature:install onos-ovsdb-provider-host'";
when: inventory_hostname in groups['onos']
- name: add vtn feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-app-vtn-onosfw'";
+ command: |
+ su -s /bin/sh -c \
+ "/opt/onos/bin/onos 'feature:install onos-app-vtn-onosfw'";
when: inventory_hostname in groups['onos']
- name: set public eth card start
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'externalportname-set -n onos_port2'"
+ command: |
+ su -s /bin/sh -c "/opt/onos/bin/onos 'externalportname-set -n onos_port2'"
when: inventory_hostname in groups['onos']
-- name: Set ONOS as the manager
- command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:6640;"
+- name: set ONOS as the manager
+ command: |
+ su -s /bin/sh -c \
+ "ovs-vsctl set-manager \
+ tcp:{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:6640;"
- name: delete default gateway
shell: >
route delete default;
when: inventory_hostname not in groups['onos']
- ignore_errors: True
+ ignore_errors: "True"
diff --git a/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml b/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml
index f11f1102..9389ed6b 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml
@@ -1,3 +1,4 @@
+---
##############################################################################
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
#
@@ -14,6 +15,25 @@ jdk8_script_name: install_jdk8.tar
onos_driver: networking-onos.tar
onos_sfc_driver: networking-sfc.tar
repository: repository.tar
-onos_boot_features: config,standard,region,package,kar,ssh,management,webconsole,onos-api,onos-core,onos-incubator,onos-cli,onos-rest,onos-gui,onos-openflow-base, onos-openflow, onos-ovsdatabase, onos-ovsdb-base, onos-drivers-ovsdb, onos-ovsdb-provider-host, onos-app-vtn-onosfw
-
-
+onos_boot_features:
+ - config
+ - standard
+ - region
+ - package
+ - kar
+ - ssh
+ - management
+ - webconsole
+ - onos-api
+ - onos-core
+ - onos-incubator
+ - onos-cli
+ - onos-rest
+ - onos-gui
+ - onos-openflow-base
+ - onos-openflow
+ - onos-ovsdatabase
+ - onos-ovsdb-base
+ - onos-drivers-ovsdb
+ - onos-ovsdb-provider-host
+ - onos-app-vtn-onosfw
diff --git a/deploy/adapters/ansible/roles/open-contrail/files/recover_network_opencontrail.py b/deploy/adapters/ansible/roles/open-contrail/files/recover_network_opencontrail.py
deleted file mode 100755
index 554093b5..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/files/recover_network_opencontrail.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import yaml
-import netaddr
-import os
-import log as logging
-
-LOG = logging.getLogger("net-recover-opencontrail")
-config_path = os.path.join(os.path.dirname(__file__), "network.cfg")
-
-
-def setup_bondings(bond_mappings):
- print bond_mappings
-
-
-def setup_ips_new(config):
- LOG.info("setup_ips_new enter")
- network = netaddr.IPNetwork(config["ip_settings"]["br-prv"]["cidr"])
- intf_name = config["provider_net_mappings"][0]["interface"]
- cmd = "ip addr add %s/%s brd %s dev %s;" \
- % (config["ip_settings"]["br-prv"]["ip"], config["ip_settings"]["br-prv"]["netmask"], str(network.broadcast), intf_name) # noqa
- # cmd = "ip link set br-ex up;"
- # cmd += "ip addr add %s/%s brd %s dev %s;" \
- # % (config["ip_settings"]["br-prv"]["ip"], config["ip_settings"]["br-prv"]["netmask"], str(network.broadcast), 'br-ex') # noqa
- cmd += "route del default;"
- cmd += "ip route add default via %s dev %s" % (
- config["ip_settings"]["br-prv"]["gw"], intf_name)
- # cmd += "ip route add default via %s dev %s" % (config["ip_settings"]["br-prv"]["gw"], 'br-ex') # noqa
- LOG.info("setup_ips_new: cmd=%s" % cmd)
- os.system(cmd)
-
-
-def main(config):
- setup_ips_new(config)
-
-if __name__ == "__main__":
- config = yaml.load(open(config_path))
- main(config)
diff --git a/deploy/adapters/ansible/roles/open-contrail/files/setup_networks_opencontrail.py b/deploy/adapters/ansible/roles/open-contrail/files/setup_networks_opencontrail.py
deleted file mode 100755
index a427ee3c..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/files/setup_networks_opencontrail.py
+++ /dev/null
@@ -1,121 +0,0 @@
-import yaml
-import netaddr
-import os
-import log as logging
-
-LOG = logging.getLogger("net-init-opencontrail")
-config_path = os.path.join(os.path.dirname(__file__), "network.cfg")
-
-
-def setup_bondings(bond_mappings):
- print bond_mappings
-
-
-def add_vlan_link(interface, ifname, vlan_id):
- LOG.info("add_vlan_link enter")
- cmd = "ip link add link %s name %s type vlan id %s; " % (
- ifname, interface, vlan_id)
- cmd += "ip link set %s up; ip link set %s up" % (interface, ifname)
- LOG.info("add_vlan_link: cmd=%s" % cmd)
- os.system(cmd)
-
-# def add_ovs_port(ovs_br, ifname, uplink, vlan_id=None):
-# LOG.info("add_ovs_port enter")
-# cmd = "ovs-vsctl --may-exist add-port %s %s" % (ovs_br, ifname)
-# if vlan_id:
-# cmd += " tag=%s" % vlan_id
-# cmd += " -- set Interface %s type=internal;" % ifname
-# cmd += "ip link set dev %s address `ip link show %s |awk '/link\/ether/{print $2}'`;" \ # noqa
-# % (ifname, uplink)
-# cmd += "ip link set %s up;" % ifname
-# LOG.info("add_ovs_port: cmd=%s" % cmd)
-# os.system(cmd)
-
-
-def setup_intfs(sys_intf_mappings, uplink_map):
- LOG.info("setup_intfs enter")
- for intf_name, intf_info in sys_intf_mappings.items():
- if intf_info["type"] == "vlan":
- add_vlan_link(
- intf_name,
- intf_info["interface"],
- intf_info["vlan_tag"])
-# elif intf_info["type"] == "ovs":
-# add_ovs_port(
-# intf_info["interface"],
-# intf_name,
-# uplink_map[intf_info["interface"]],
-# vlan_id=intf_info.get("vlan_tag"))
- else:
- pass
-
-
-def setup_ips(ip_settings, sys_intf_mappings):
- LOG.info("setup_ips enter")
- for intf_info in ip_settings.values():
- network = netaddr.IPNetwork(intf_info["cidr"])
- if sys_intf_mappings[intf_info["name"]]["type"] == "ovs":
- intf_name = intf_info["name"]
- else:
- intf_name = intf_info["alias"]
- if "gw" in intf_info:
- continue
- cmd = "ip addr add %s/%s brd %s dev %s;" \
- % (intf_info["ip"], intf_info["netmask"], str(network.broadcast), intf_name) # noqa
-# if "gw" in intf_info:
-# cmd += "route del default;"
-# cmd += "ip route add default via %s dev %s" % (intf_info["gw"], intf_name) # noqa
- LOG.info("setup_ips: cmd=%s" % cmd)
- os.system(cmd)
-
-
-def setup_ips_new(config):
- LOG.info("setup_ips_new enter")
- network = netaddr.IPNetwork(config["ip_settings"]["br-prv"]["cidr"])
- intf_name = config["provider_net_mappings"][0]["interface"]
- cmd = "ip addr add %s/%s brd %s dev %s;" \
- % (config["ip_settings"]["br-prv"]["ip"], config["ip_settings"]["br-prv"]["netmask"], str(network.broadcast), intf_name) # noqa
-# cmd = "ip link set br-ex up;"
-# cmd += "ip addr add %s/%s brd %s dev %s;" \
-# % (config["ip_settings"]["br-prv"]["ip"], config["ip_settings"]["br-prv"]["netmask"], str(network.broadcast), 'br-ex') # noqa
- cmd += "route del default;"
- cmd += "ip route add default via %s dev %s" % (
- config["ip_settings"]["br-prv"]["gw"], intf_name)
-# cmd += "ip route add default via %s dev %s" % (config["ip_settings"]["br-prv"]["gw"], 'br-ex') # noqa
- LOG.info("setup_ips_new: cmd=%s" % cmd)
- os.system(cmd)
-
-
-def setup_default_router(config):
- LOG.info("setup_ips_new enter")
-# network = netaddr.IPNetwork(config["ip_settings"]["br-prv"]["cidr"])
-# intf_name = config["provider_net_mappings"][0]["interface"]
- cmd = "route del default;"
- cmd += "ip route add default via %s dev %s" % (
- config["ip_settings"]["br-prv"]["gw"], "vhost0")
- LOG.info("setup_default_router: cmd=%s" % cmd)
- os.system(cmd)
-
-
-def remove_ovs_kernel_mod(config):
- LOG.info("remove_ovs_kernel_mod enter")
- cmd = "rmmod vport_vxlan; rmmod openvswitch;"
- LOG.info("remove_ovs_kernel_mod: cmd=%s" % cmd)
- os.system(cmd)
-
-
-def main(config):
- uplink_map = {}
- setup_bondings(config["bond_mappings"])
- remove_ovs_kernel_mod(config)
- for provider_net in config["provider_net_mappings"]:
- uplink_map[provider_net['name']] = provider_net['interface']
-
- setup_intfs(config["sys_intf_mappings"], uplink_map)
- setup_ips(config["ip_settings"], config["sys_intf_mappings"])
-# setup_ips_new(config)
- setup_default_router(config)
-
-if __name__ == "__main__":
- config = yaml.load(open(config_path))
- main(config)
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/ext-net.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/ext-net.yml
deleted file mode 100644
index 3ef327ee..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/ext-net.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: add ext-network router of vgw on controller for open-contrail
- shell: >
- ip route add {{ public_net_info.floating_ip_cidr }} via {{ ip_settings[groups['compute'][0]]['br-prv']['ip'] }} dev {{ network_cfg.public_vip.interface }} ;
- echo "ip route add {{ public_net_info.floating_ip_cidr }} via {{ ip_settings[groups['compute'][0]]['br-prv']['ip'] }} dev {{ network_cfg.public_vip.interface }}" >> /opt/contrail/bin/if-vhost0 ;
- when: inventory_hostname in groups['opencontrail']
-
-
-- name: create vgw for open-contrail
- shell: >
- echo "lsof -ni :9090 ; while [ $? -ne 0 ]; do sleep 10; lsof -ni :9090; done" >> /etc/init.d/net_init;
- echo "sleep 10" >> /etc/init.d/net_init;
- echo "python /opt/contrail/utils/provision_vgw_interface.py --oper create --interface vgw1 --subnets {{ public_net_info.floating_ip_cidr }} --routes 0.0.0.0/0 --vrf default-domain:admin:{{ public_net_info.network }}:{{ public_net_info.network }}" >> /etc/init.d/net_init;
- when: groups['opencontrail']|length !=0 and inventory_hostname == groups['compute'][0]
-
-
-- name: add vgw router on compute(without vgw) for open-contrail
- shell: echo "ip route add {{ public_net_info.floating_ip_cidr }} via {{ ip_settings[groups['compute'][0]]['br-prv']['ip'] }} dev vhost0" >> /etc/init.d/net_init
- when: groups['opencontrail']|length !=0 and inventory_hostname not in groups['opencontrail'] and inventory_hostname != groups['compute'][0]
-
-
-
-
-
-# create a file with vgw ip on CompassCore, so that Jumper Host could access this to get vgw ip
-- name: add vgw file on compass
- local_action: file path=/home/opencontrail1.rc state=touch mode=0777
- run_once: True
- when: groups['opencontrail']|length !=0
-
-- name: update vgw file
- local_action: lineinfile dest=/home/opencontrail1.rc line={{ ip_settings[groups['compute'][0]]['br-prv']['ip'] }}
- run_once: True
- when: groups['opencontrail']|length !=0
-
-- name: add vgw file on compass
- local_action: file path=/home/opencontrail2.rc state=touch mode=0777
- run_once: True
- when: groups['opencontrail']|length !=0
-
-- name: update vgw file
- local_action: lineinfile dest=/home/opencontrail2.rc line={{ public_net_info.floating_ip_cidr }}
- run_once: True
- when: groups['opencontrail']|length !=0
-
-
-
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-collector.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-collector.yml
deleted file mode 100755
index d3022893..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-collector.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: collector
-# sudo: yes
-# tasks:
-
-- name: "temporary disable supervisor analytics"
- template:
-# src: "templates/override.j2"
- src: "../../templates/install/override.j2"
- dest: "/etc/init/supervisor-analytics.override"
-
-- name: "install contrail openstack analytics package"
-# apt:
-# name: "contrail-openstack-analytics"
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
- with_items: collector_package
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-common.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-common.yml
deleted file mode 100755
index e94621bc..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-common.yml
+++ /dev/null
@@ -1,104 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: all
-# sudo: yes
-# tasks:
-#- name: "copy contrail install package temporary"
-# sudo: True
-# copy:
-# src: "{{ package }}"
-# dest: "/tmp/{{ package }}"
-
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: download OpenContrail package file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/open-contrail/{{ package }}" dest=/tmp/{{ package }}
-#"
-
-- name: "install contrail install package"
-# sudo: True
- apt:
- deb: "/tmp/{{ package }}"
- force: yes
-
-- name: "delete temporary contrail install package"
-# sudo: True
- file:
- dest: "/tmp/{{ package }}"
- state: "absent"
-
-- name: "make directory for contrail binary files"
-# sudo: True
- file:
- path: "/opt/contrail/bin"
- state: "directory"
-
-- name: "make directory for contrail repository"
-# sudo: True
- file:
- path: "/opt/contrail/contrail_install_repo"
- state: "directory"
-
-- name: "unarchive contrail packages"
-# sudo: True
- unarchive:
- src: "/opt/contrail/contrail_packages/contrail_debs.tgz"
- dest: "/opt/contrail/contrail_install_repo"
- copy: no
-
-- name: "find required packages in advance"
-# sudo: True
- shell: "find /opt/contrail/contrail_install_repo -name binutils_*.deb -or -name make_*.deb -or -name libdpkg-perl_*.deb -or -name dpkg-dev_*.deb -or -name patch_*.deb -type f"
- register: required_packages
- changed_when: no
-
-- name: "install required packages"
-# sudo: True
- apt:
- deb: "{{ item }}"
- force: yes
- with_items: required_packages.stdout_lines
- ignore_errors: True
-
-- name: modify source list
-# sudo: True
- lineinfile:
- dest: "/etc/apt/sources.list"
- line: "deb file:/opt/contrail/contrail_install_repo ./"
- insertbefore: "BOF"
-
-- name: "modify apt configuration"
-# sudo: True
- lineinfile:
- dest: "/etc/apt/apt.conf"
- line: "APT::Get::AllowUnauthenticated \"true\";"
- create: "yes"
-
-- name: "copy apt preferences file"
-# sudo: True
- shell: "cp /opt/contrail/contrail_packages/preferences /etc/apt/preferences"
- args:
- creates: "/etc/apt/preferences"
-
-- name: create contrail packages list
-# sudo: True
- shell: "dpkg-scanpackages . | gzip -9c > Packages.gz"
- args:
- chdir: "/opt/contrail/contrail_install_repo"
- creates: "Packages.gz"
-
-- name: install contrail setup package
- sudo: True
- apt:
- name: "contrail-setup"
- update_cache: yes
- force: yes
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-compute.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-compute.yml
deleted file mode 100755
index 4e4a5ad5..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-compute.yml
+++ /dev/null
@@ -1,55 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: [compute, tsn]
-# sudo: yes
-# tasks:
-- name: "temporary disable supervisor vrouter"
-# sudo: True
- template:
- src: "../../templates/install/override.j2"
- dest: "/etc/init/supervisor-vrouter.override"
-
-# - name: "install nova-compute for contrail package"
-# apt:
-# name: "nova-compute"
-# when: install_nova
-
-- name: "install contrail vrouter 3.13.0-40 package"
-# apt:
-# name: "contrail-vrouter-3.13.0-40-generic"
-# when: ansible_kernel == "3.13.0-40-generic"
-# sudo: True
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
- with_items: vrouter_package
- when: ansible_kernel == kernel_required
-
-- name: "install contrail vrouter dkms package"
-# apt:
-# name: "contrail-vrouter-dkms"
-# when: ansible_kernel != "3.13.0-40-generic"
-# sudo: True
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
- with_items: dkms_package
- when: ansible_kernel != kernel_required
-
-# - name: "install contrail vrouter common package"
-# apt:
-# name: "contrail-vrouter-common"
-
-# - name: "install contrail nova vif package"
-# apt:
-# name: "contrail-nova-vif"
-
-- name: "install contrail vrouter common & nova vif package"
-# sudo: True
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: compute_package | union(compute_package_noarch)
-
-
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-config.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-config.yml
deleted file mode 100755
index b66e3e45..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-config.yml
+++ /dev/null
@@ -1,51 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: config
-# sudo: yes
-# tasks:
-- name: "temporary disable supervisor config"
-# sudo: True
- template:
-# src: "templates/override.j2"
- src: "../../templates/install/override.j2"
- dest: "/etc/init/supervisor-config.override"
-
-- name: "temporary disable neutron server"
-# sudo: True
- template:
-# src: "templates/override.j2"
- src: "../../templates/install/override.j2"
- dest: "/etc/init/neutron-server.override"
-
-###############################################
-################ workaround #################
-###############################################
-- name: "backup keepalived conf"
- shell: mv /etc/keepalived/keepalived.conf /home/keepalived.conf
-
-- name: "uninstall keepalived"
- action: "{{ ansible_pkg_mgr }} name=keepalived state=absent"
-
-- name: "install iproute"
- action: "{{ ansible_pkg_mgr }} name=iproute state=present"
-
-- name: "install iproute"
- action: "{{ ansible_pkg_mgr }} name=keepalived state=present"
-
-- name: "restore keepalived conf"
- shell: mv /home/keepalived.conf /etc/keepalived/keepalived.conf
-###############################################
-
-- name: "install contrail openstack config package"
-# sudo: True
-# apt:
-# name: "contrail-openstack-config"
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
- with_items: config_package
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-control.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-control.yml
deleted file mode 100755
index ab7d4ad3..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-control.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: control
-# sudo: yes
-# tasks:
-- name: "temporary disable supervisor control"
-# sudo: True
- template:
-# src: "templates/override.j2"
- src: "../../templates/install/override.j2"
- dest: "/etc/init/supervisor-control.override"
-
-- name: "temporary disable supervisor dns"
-# sudo: True
- template:
-# src: "templates/override.j2"
- src: "../../templates/install/override.j2"
- dest: "/etc/init/supervisor-dns.override"
-
-- name: "install contrail openstack control package"
-# sudo: True
-# apt:
-# name: "contrail-openstack-control"
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
- with_items: control_package
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-database.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-database.yml
deleted file mode 100755
index 5c89ede9..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-database.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: database
-# sudo: yes
-# tasks:
-- name: "temporary disable supervisor database"
-# sudo: True
- template:
-# src: "templates/override.j2"
- src: "../../templates/install/override.j2"
- dest: "/etc/init/supervisor-database.override"
-
-- name: "install contrail openstack database package"
-# sudo: True
-# apt:
-# name: "contrail-openstack-database"
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
- with_items: database_package
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-interface.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-interface.yml
deleted file mode 100755
index 3f7b43c1..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-interface.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: all
-# sudo: yes
-# tasks:
-#- name: get last ip address
-# shell: expr substr `cat /etc/hostname` 5 1
-# register: lastip
-
-#- name: "configure interface"
-## sudo: True
-# lineinfile:
-# dest: "/etc/network/interfaces"
-# line: "{{ item }}"
-# with_items:
-# - "auto {{ contrail_vhost_device }}"
-# - "iface {{ contrail_vhost_device }} inet static"
-# - "\taddress {{ contrail_vhost_address }}"
-# - "\tnetmask {{ contrail_vhost_netmask }}"
-
-- name: "set interface address"
-# sudo: True
- shell: "ifconfig {{ contrail_vhost_device }} {{ contrail_vhost_address }} netmask {{ contrail_vhost_netmask }}"
-
-- name: "up interface"
-# sudo: True
- shell: "ifconfig {{ contrail_vhost_device }} up"
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-kernel.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-kernel.yml
deleted file mode 100755
index be9a8ac9..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-kernel.yml
+++ /dev/null
@@ -1,60 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: all
-# sudo: yes
-# tasks:
-
-- name: "install Ubuntu kernel"
-# sudo: True
-# apt:
-# name: "linux-headers-3.13.0-40"
-# name: "linux-headers-3.13.0-40-generic"
-# name: "linux-image-3.13.0-40-generic"
-# name: "linux-image-extra-3.13.0-40-generic"
-# when: (kernel_install) and (ansible_kernel != "3.13.0-40-generic")
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
- with_items: kernel_package | union(kernel_package_noarch)
- when: (kernel_install) and (ansible_kernel != kernel_required)
-
-- name: "setup grub"
-# sudo: True
- lineinfile:
- dest: "/etc/default/grub"
- regexp: "GRUB_DEFAULT=.*"
- line: "GRUB_DEFAULT='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-40-generic'"
-# when: (kernel_install) and (ansible_kernel != "3.13.0-40-generic")
- when: (kernel_install) and (ansible_kernel != kernel_required)
-
-- name: "reflect grub"
-# sudo: True
- shell: "update-grub2"
-# when: (kernel_install) and (ansible_kernel != "3.13.0-40-generic")
- when: (kernel_install) and (ansible_kernel != kernel_required)
-
-- name: "reboot Server"
-# sudo: True
- shell: "shutdown -r now"
- async: 0
- poll: 0
- ignore_errors: true
- notify: Wait for server to come back
-# when: (kernel_install) and (ansible_kernel != "3.13.0-40-generic")
- when: (kernel_install) and (ansible_kernel != kernel_required)
-
-# handlers:
-- name: "Wait for server to come back"
- local_action:
- module: wait_for
- host={{ inventory_hostname }}
- port=22
- delay=30
- timeout=600
-# when: (kernel_install) and (ansible_kernel != "3.13.0-40-generic")
- when: (kernel_install) and (ansible_kernel != kernel_required)
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-webui.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-webui.yml
deleted file mode 100755
index 6dbe1e74..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-webui.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: webui
-# sudo: yes
-# tasks:
-
-- name: "temporary disable supervisor webui"
-# sudo: True
- template:
-# src: "templates/override.j2"
- src: "../../templates/install/override.j2"
- dest: "/etc/init/supervisor-webui.override"
-
-- name: "install contrail openstack webui package"
-# sudo: True
-# apt:
-# name: "contrail-openstack-webui"
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
- with_items: webui_package
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/main.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/main.yml
deleted file mode 100755
index 7d0f1a9e..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/main.yml
+++ /dev/null
@@ -1,151 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: backup rabbitmq-server
- shell: cp /etc/init.d/rabbitmq-server /home/rabbitmq-server
- when: inventory_hostname in groups['opencontrail']
-
-- name: Disable Service Daemon
- shell: if [ -f “\/opt\/service” ] ; then mv /opt/service /opt/service.bak ; fi
- when: groups['opencontrail']|length !=0
-
-- name: Install common on all hosts for Open Contrail
- include: install/install-common.yml
- when: groups['opencontrail']|length !=0
- # Compass install OpenStack with not only OpenContrail but also ODL or ONOS, and sometimes user just installs OpenStack, so item 'opencontrail_control' is kind of a mark that whether Compass install OpenContrail or not.
-
-#- name: Install kernal on all hosts for Open Contrail
-# include: install/install-kernel.yml
-# when: groups['opencontrail_control']|length !=0
-
-- name: Install database for Open Contrail
- include: install/install-database.yml
- when: inventory_hostname in groups['opencontrail']
-
-- name: Install config for Open Contrail
- include: install/install-config.yml
- when: inventory_hostname in groups['opencontrail']
-
-- name: Install config for Open Contrail
- include: install/install-control.yml
- when: inventory_hostname in groups['opencontrail']
-
-- name: Install collector for Open Contrail
- include: install/install-collector.yml
- when: inventory_hostname in groups['opencontrail']
-
-- name: Install webui for Open Contrail
- include: install/install-webui.yml
- when: inventory_hostname in groups['opencontrail']
-
-- name: Install compute for Open Contrail
- include: install/install-compute.yml
- when: groups['opencontrail']|length !=0 and inventory_hostname not in groups['opencontrail']
-# or inventory_hostname in groups['opencontrail_tsn']
-
-
-# change vhost0 on eth1
-#- name: Install interface on all hosts for Open Contrail
-# include: install/install-interface.yml
-# when: groups['opencontrail']|length !=0
-
-#- include: install/install-common.yml
-#- include: install/install-kernel.yml
-#- include: install/install-database.yml
-#- include: install/install-config.yml
-#- include: install/install-control.yml
-#- include: install/install-collector.yml
-#- include: install/install-webui.yml
-#- include: install/install-compute.yml
-#- include: install/install-interface.yml
-
-
-#- name: Provision route on all hosts for Open Contrail
-# include: provision/provision-route.yml
-# when: groups['opencontrail_control']|length !=0
-
-
-- name: Provision RabbitMQ on OpenContrail config nodes
- include: provision/provision-rabbitmq.yml
- when: inventory_hostname in groups['opencontrail']
-
-- name: Provision increase limits for Open Contrail
- include: provision/provision-increase-limits.yml
- when: inventory_hostname in groups['opencontrail']
-#or inventory_hostname in groups['opencontrail_config'] or inventory_hostname in groups['opencontrail_collector'] or inventory_hostname in groups['opencontrail_database']
-
-
-- name: Provision database for Open Contrail
- include: provision/provision-database.yml
- when: inventory_hostname in groups['opencontrail']
-
-
-- name: Provision config for Open Contrail
- include: provision/provision-config.yml
- when: inventory_hostname in groups['opencontrail']
-
-- name: Provision control for Open Contrail
- include: provision/provision-control.yml
- when: inventory_hostname in groups['opencontrail']
-
-
-- name: Provision collector for Open Contrail
- include: provision/provision-collector.yml
- when: inventory_hostname in groups['opencontrail']
-
-
-- name: Provision add nodes for Open Contrail
- include: provision/provision-add-nodes.yml
- when: inventory_hostname in groups['opencontrail']
-
-
-- name: Provision webui for Open Contrail
- include: provision/provision-webui.yml
- when: inventory_hostname in groups['opencontrail']
-
-
-- name: Provision compute for Open Contrail
- include: provision/provision-compute.yml
- when: groups['opencontrail']|length !=0 and inventory_hostname not in groups['opencontrail']
-
-- name: Remove openvswitch on compute
- include: uninstall-openvswitch.yml
- when: groups['opencontrail']|length !=0 and inventory_hostname not in groups['opencontrail']
-
-- name: Config ext-net network
- include: ext-net.yml
-
-- name: Enable Service Daemon
- shell: if [ -f “\/opt\/service.bak” ] ; then mv /opt/service.bak /opt/service ; fi
- when: groups['opencontrail']|length !=0
-
-#- name: Provision tsn for Open Contrail
-# include: provision/provision-tsn.yml
-# when: inventory_hostname in groups['opencontrail_tsn']
-
-
-#- name: Provision toragent for Open Contrail
-# include: provision/provision-toragent.yml
-# when: inventory_hostname in groups['opencontrail_tsn']
-
-#- include: provision/provision-route.yml
-#- include: provision/provision-rabbitmq.yml
-#- include: provision/provision-increase-limits.yml
-#- include: provision/provision-database.yml
-#- include: provision/provision-config.yml
-#- include: provision/provision-control.yml
-#- include: provision/provision-collector.yml
-#- include: provision/provision-add-nodes.yml
-#- include: provision/provision-webui.yml
-#- include: provision/provision-compute.yml
-#- include: provision/provision-tsn.yml
-#- include: provision/provision-toragent.yml
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-node-common.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-node-common.yml
deleted file mode 100755
index 759f940f..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-node-common.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: "disable ufw"
- ufw:
- state: "disabled"
-
-- name: "change value of kernel.core_pattern"
- sysctl:
- name: "kernel.core_pattern"
- value: "/var/crashes/core.%e.%p.%h.%t"
-
-- name: "change value of net.ipv4.ip_forward"
- sysctl:
- name: "net.ipv4.ip_forward"
- value: "1"
-
-- name: "make crashes directory"
- file:
- path: "/var/crashes"
- state: "directory"
- mode: 0777
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-rabbitmq-stop.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-rabbitmq-stop.yml
deleted file mode 100644
index ec6b2fe0..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-rabbitmq-stop.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- name: 'stop rabbitmq server'
- service:
- name: 'rabbitmq-server'
- state: 'stopped'
-
-- name: 'check beam process'
- shell: 'ps ax | grep -v grep | grep beam'
- register: beam_process
- changed_when: no
- ignore_errors: yes
-
-- name: 'kill beam processes'
- shell: 'pkill -9 beam'
- when: beam_process.stdout
-
-- name: 'check epmd process'
- shell: 'ps ax | grep -v grep | grep epmd'
- register: epmd_process
- changed_when: no
- ignore_errors: yes
-
-- name: 'kill epmd processes'
- shell: 'pkill -9 epmd'
- when: epmd_process.stdout
-
-- name: 'remove mnesia directory'
- file:
- name: '/var/lib/rabbitmq/mnesia'
- state: 'absent'
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-redis-setup.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-redis-setup.yml
deleted file mode 100755
index c4a66240..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-redis-setup.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: "stop redis server"
- service:
- name: "redis-server"
- state: "stopped"
-
-- name: "modify redis server configuration"
- replace:
- dest: "/etc/redis/redis.conf"
- regexp: "{{ item.regexp }}"
- replace: "{{ item.replace }}"
- with_items:
- - { regexp: "^\\s*bind", replace: "#bind" }
- - { regexp: "^\\s*save", replace: "#save" }
- - { regexp: "^\\s*dbfilename", replace: "#dbfilename" }
- - { regexp: "^\\s*lua-time-limit\\s*\\d*", replace: "lua-time-limit 15000" }
-
-- name: "delete redis dump"
- file:
- dest: "/var/lib/redis/dump.rdb"
- state: "absent"
-
-- name: "start redis server"
- service:
- name: "redis-server"
- state: "started"
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-vrouter-compute-setup.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-vrouter-compute-setup.yml
deleted file mode 100755
index be1879af..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-vrouter-compute-setup.yml
+++ /dev/null
@@ -1,115 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: "change owner nova log directory"
- file:
- dest: "/var/log/nova"
- state: "directory"
- owner: "nova"
- group: "nova"
- recurse: yes
-
-- name: "delete values from nova config"
- ini_file:
- dest: "/etc/nova/nova.conf"
- section: "{{ item.section }}"
- option: "{{ item.option }}"
- with_items:
- - { section: "DEFAULT", option: "sql_connection" }
- - { section: "DEFAULT", option: "quantum_admin_tenant_name" }
- - { section: "DEFAULT", option: "quantum_admin_username" }
- - { section: "DEFAULT", option: "quantum_admin_password" }
- - { section: "DEFAULT", option: "quantum_admin_auth_url" }
- - { section: "DEFAULT", option: "quantum_auth_strategy" }
- - { section: "DEFAULT", option: "quantum_url" }
-
-- name: "set values to nova config"
- ini_file:
- dest: "/etc/nova/nova.conf"
- section: "{{ item.section }}"
- option: "{{ item.option }}"
- value: "{{ item.value }}"
- with_items:
- - { section: "DEFAULT", option: "auth_strategy", value: "keystone" }
- - { section: "DEFAULT", option: "libvirt_nonblocking", value: "True" }
- - { section: "DEFAULT", option: "libvirt_inject_partition", value: "-1" }
- - { section: "DEFAULT", option: "rabbit_host", value: "{{ contrail_haproxy_address }}" }
- - { section: "DEFAULT", option: "rabbit_port", value: "5672" }
- - { section: "DEFAULT", option: "glance_host", value: "{{ contrail_haproxy_address }}" }
- - { section: "DEFAULT", option: "glance_port", value: "9292" }
- - { section: "DEFAULT", option: "neutron_admin_tenant_name", value: "service" }
- - { section: "DEFAULT", option: "neutron_admin_username", value: "neutron" }
- - { section: "DEFAULT", option: "neutron_admin_password", value: "{{ contrail_admin_password }}" }
- - { section: "DEFAULT", option: "neutron_admin_auth_url", value: "http://{{ contrail_haproxy_address }}:35357/v2.0/" }
- - { section: "DEFAULT", option: "neutron_url", value: "http://{{ contrail_haproxy_address }}:9696/" }
- - { section: "DEFAULT", option: "neutron_url_timeout", value: "300" }
- - { section: "DEFAULT", option: "network_api_class", value: "nova.network.neutronv2.api.API" }
- - { section: "DEFAULT", option: "compute_driver", value: "libvirt.LibvirtDriver" }
- - { section: "DEFAULT", option: "network_api_class", value: " nova_contrail_vif.contrailvif.ContrailNetworkAPI" }
- - { section: "DEFAULT", option: "ec2_private_dns_show_ip", value: "False" }
- - { section: "DEFAULT", option: "novncproxy_base_url", value: "http://{{ contrail_haproxy_address }}:5999/vnc_auto.html" }
- - { section: "DEFAULT", option: "vncserver_enabled", value: "True" }
- - { section: "DEFAULT", option: "vncserver_listen", value: "{{ contrail_address }}" }
- - { section: "DEFAULT", option: "vncserver_proxyclient_address", value: "{{ contrail_address }}" }
- - { section: "DEFAULT", option: "security_group_api", value: "neutron" }
- - { section: "DEFAULT", option: "heal_instance_info_cache_interval", value: "0" }
- - { section: "DEFAULT", option: "image_cache_manager_interval", value: "0" }
- - { section: "DEFAULT", option: "libvirt_cpu_mode", value: "none" }
- - { section: "DEFAULT", option: "libvirt_vif_driver", value: "nova_contrail_vif.contrailvif.VRouterVIFDriver" }
- - { section: "database", option: "connection", value: "mysql://nova:nova@{{ contrail_haproxy_address }}/nova?charset=utf8" }
- - { section: "database", option: "idle_timeout", value: "180" }
- - { section: "database", option: "max_retries", value: "-1" }
- - { section: "keystone_authtoken", option: "admin_tenant_name", value: "service" }
- - { section: "keystone_authtoken", option: "admin_user", value: "nova" }
- - { section: "keystone_authtoken", option: "admin_password", value: "{{ contrail_admin_password }}" }
- - { section: "keystone_authtoken", option: "auth_protocol", value: "http" }
- - { section: "keystone_authtoken", option: "auth_host", value: "{{ contrail_haproxy_address }}" }
- - { section: "keystone_authtoken", option: "signing_dir", value: "/tmp/keystone-signing-nova" }
-
-
-
-#- { section: "DEFAULT", option: "rabbit_host", value: "{{ hostvars[groups['config'][0]]['contrail_address'] }}" }
-#- { section: "DEFAULT", option: "glance_host", value: "{{ hostvars[groups['openstack'][0]]['contrail_address'] }}" }
-#- { section: "DEFAULT", option: "neutron_admin_auth_url", value: "http://{{ hostvars[groups['openstack'][0]]['contrail_address'] }}:35357/v2.0/" }
-#- { section: "DEFAULT", option: "neutron_url", value: "http://{{ hostvars[groups['config'][0]]['contrail_address'] }}:9696/" }
-#- { section: "DEFAULT", option: "novncproxy_base_url", value: "http://{{ hostvars[groups['openstack'][0]]['contrail_mgmt_address'] }}:5999/vnc_auto.html" }
-#- { section: "database", option: "connection", value: "mysql://nova:nova@{{ hostvars[groups['openstack'][0]]['contrail_address'] }}/nova?charset=utf8" }
-#- { section: "keystone_authtoken", option: "auth_host", value: "{{ hostvars[groups['openstack'][0]]['contrail_address'] }}" }
-
-
-
-- name: "change database address if same node as first openstack node"
- ini_file:
- dest: "/etc/nova/nova.conf"
- section: "database"
- option: "connection"
- value: "mysql://nova:nova@127.0.0.1/nova?charset=utf8"
- when: groups['openstack'][0] == inventory_hostname
-
-- name: "add respawn to nova compute config"
- lineinfile:
- dest: "/etc/init/nova-compute.conf"
- line: "respawn"
- insertbefore: "pre-start script"
-
-- name: "add respawn limit to nova compute config"
- lineinfile:
- dest: "/etc/init/nova-compute.conf"
- line: "respawn limit 10 90"
- insertafter: "respawn"
-
-- name: "restart nova compute"
- service:
- name: "nova-compute"
- state: "restarted"
-
-- name: "delete nova sqlite database"
- file:
- dest: "/var/lib/nova/nova.sqlite"
- state: "absent"
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml
deleted file mode 100755
index 58a6bb07..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml
+++ /dev/null
@@ -1,91 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: config
-# sudo: yes
-# tasks:
-
-#- name: "recover rabbitmq-server service script"
-# shell: cp /home/rabbitmq-server /etc/init.d/rabbitmq-server
-
-#- name: "restart rabbitmq-server"
-# service:
-# name: "rabbitmq-server"
-# state: "restarted"
-
-#- name: "wait rabbitmq-server start"
-# shell: sleep 5
-
-- name: "restart contrail-discovery"
- service:
- name: "contrail-discovery"
- state: "restarted"
-
-- name: "wait contrail-discovery"
- shell: sleep 5
-
-- name: "restart contrail-api"
- service:
- name: "contrail-api"
- state: "restarted"
-
-- name: "restart apache2"
- service:
- name: "apache2"
- state: "restarted"
-
-- name: "check contrail-api"
- shell: lsof -ni :8082 ; while [ $? -ne 0 ]; do sleep 10; lsof -ni :8082; done; sleep 20;
-
-- name: "wait contrail-api"
- shell: sleep 20
-
-- name: "provision config node"
- shell: "python /opt/contrail/utils/provision_config_node.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }}"
-# when: inventory_hostname in groups['opencontrail_config']
-
-#- hosts: database
-# sudo: yes
-# tasks:
-- name: "provision database node"
- shell: "python /opt/contrail/utils/provision_database_node.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }}"
-# when: inventory_hostname in groups['opencontrail_database']
-
-
-#- hosts: collector
-# sudo: yes
-# tasks:
-- name: "provision collector node"
- shell: "python /opt/contrail/utils/provision_analytics_node.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }}"
-# when: inventory_hostname in groups['opencontrail_collector']
-
-#- hosts: control
-# sudo: yes
-# tasks:
-- name: "provision control node"
- shell: "python /opt/contrail/utils/provision_control.py --api_server_ip {{ contrail_haproxy_address }} --api_server_port 8082 --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }} --router_asn {{ contrail_router_asn }}"
-# when: inventory_hostname in groups['opencontrail_control']
-
-#- hosts: config
-# sudo: yes
-# tasks:
-- name: "provision metadata services"
- shell: "python /opt/contrail/utils/provision_linklocal.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --ipfabric_service_ip 10.84.50.1 --ipfabric_service_port 8775 --linklocal_service_name metadata --linklocal_service_ip 169.254.169.254 --linklocal_service_port 80"
- run_once: yes
-# when: inventory_hostname in groups['opencontrail_config']
-
-
-#- hosts: config
-# sudo: yes
-# tasks:
-- name: "provision encap"
- shell: "python /opt/contrail/utils/provision_encap.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --oper add --encap_priority MPLSoUDP,MPLSoGRE,VXLAN"
- run_once: yes
-# when: inventory_hostname in groups['opencontrail_config']
-
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-collector.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-collector.yml
deleted file mode 100755
index b09f83a8..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-collector.yml
+++ /dev/null
@@ -1,106 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: collector
-# sudo: yes
-# tasks:
-
-- name: "enable supervisor analytics"
- file:
- path: "/etc/init/supervisor-analytics.override"
- state: "absent"
-
-
-- name: "redis-setup"
- include: -redis-setup.yml
-
-
-- name: "node-common"
- include: -node-common.yml
-
-
-- name: "fix up contrail collector config"
- template:
- src: "../../templates/provision/contrail-collector-conf.j2"
- dest: "/etc/contrail/contrail-collector.conf"
-
-
-- name: "fix up contrail query engine config"
- template:
- src: "../../templates/provision/contrail-query-engine-conf.j2"
- dest: "/etc/contrail/contrail-query-engine.conf"
-
-
-- name: "fix up contrail analytics api config"
- template:
- src: "../../templates/provision/contrail-analytics-api-conf.j2"
- dest: "/etc/contrail/contrail-analytics-api.conf"
-
-
-- name: "modify contrail analytics nodemgr config"
- ini_file:
- dest: "/etc/contrail/contrail-analytics-nodemgr.conf"
- section: "DISCOVERY"
- option: "server"
- value: "{{ contrail_haproxy_address }}"
-
-
-- name: "fix up contrail keystone auth config"
- template:
- src: "../../templates/provision/contrail-keystone-auth-conf.j2"
- dest: "/etc/contrail/contrail-keystone-auth.conf"
- force: no
-
-
-- name: "delete contrail alarm gen supervisord config file"
- file:
- dest: "/etc/contrail/supervisord_analytics_files/contrail-alarm-gen.ini"
- state: "absent"
-
-
-- name: "modify contrail snmp collector config file"
- ini_file:
- dest: "/etc/contrail/contrail-snmp-collector.conf"
- section: "{{ item.section }}"
- option: "{{ item.option }}"
- value: "{{ item.value }}"
- with_items:
- - { section: "DEFAULTS", option: "zookeeper", value: "{{ contrail_address }}:2181" }
- - { section: "DISCOVERY", option: "disc_server_ip", value: "{{ contrail_haproxy_address }}" }
- - { section: "DISCOVERY", option: "disc_server_port", value: "5998" }
-
-
-- name: "modify contrail snmp collector ini file"
- ini_file:
- dest: "/etc/contrail/supervisord_analytics_files/contrail-snmp-collector.ini"
- section: "program:contrail-snmp-collector"
- option: "command"
- value: "/usr/bin/contrail-snmp-collector --conf_file /etc/contrail/contrail-snmp-collector.conf --conf_file /etc/contrail/contrail-keystone-auth.conf"
-
-
-- name: "modify contrail topology config file"
- ini_file:
- dest: "/etc/contrail/contrail-topology.conf"
- section: "DEFAULTS"
- option: "zookeeper"
- value: "{{ contrail_address }}"
-
-
-- name: "modify contrail topology ini file"
- ini_file:
- dest: "/etc/contrail/supervisord_analytics_files/contrail-topology.ini"
- section: "program:contrail-topology"
- option: "command"
- value: "/usr/bin/contrail-topology --conf_file /etc/contrail/contrail-topology.conf"
-
-
-- name: "restart supervisor analytics"
- service:
- name: "supervisor-analytics"
- state: "restarted"
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-compute.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-compute.yml
deleted file mode 100755
index 374c4e13..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-compute.yml
+++ /dev/null
@@ -1,269 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: compute
-# sudo: yes
-# tasks:
-- name: "enable supervisor vrouter"
- file:
- path: "/etc/init/supervisor-vrouter.override"
- state: "absent"
-
-- include: -node-common.yml
-
-- name: "check cgroup device acl in qemu conf"
- shell: "grep -q '^\\s*cgroup_device_acl' /etc/libvirt/qemu.conf"
- register: deviceacl
- ignore_errors: yes
- changed_when: no
-
-- name: "create cgroup device acl for qemu conf"
- template:
- src: "../../templates/provision/qemu-device-acl-conf.j2"
- dest: "/tmp/qemu-device-acl.conf"
- when: deviceacl | failed
-
-- name: "combination of the qemu configuration"
- shell: "cat /tmp/qemu-device-acl.conf >> /etc/libvirt/qemu.conf"
- when: deviceacl | failed
-
-- name: "delete temporary configuration file"
- file:
- dest: "/tmp/qemu-device-acl.conf"
- state: "absent"
- when: deviceacl | failed
-
-- name: "fix up vrouter nodemgr param"
- template:
- src: "../../templates/provision/vrouter-nodemgr-param.j2"
- dest: "/etc/contrail/vrouter_nodemgr_param"
-
-- name: "set contrail device name for ansible"
- set_fact:
- contrail_ansible_device: "ansible_{{ contrail_vhost_device }}"
-
-- name: "fix up default pmac"
- template:
- src: "../../templates/provision/default-pmac.j2"
- dest: "/etc/contrail/default_pmac"
-
-- name: "copy agent param config from template"
- shell: "cp /etc/contrail/agent_param.tmpl /etc/contrail/agent_param"
-
-- name: "modify agent param config"
- lineinfile:
- dest: "/etc/contrail/agent_param"
- regexp: "dev=__DEVICE__"
- line: "dev={{ contrail_vhost_device }}"
-
-#- name: "get last ip address"
-# shell: expr substr `cat /etc/hostname` 5 1
-# register: lastip
-
-- name: "fix up contrail vrouter agent config"
- template:
- src: "../../templates/provision/contrail-vrouter-agent-conf.j2"
- dest: "/etc/contrail/contrail-vrouter-agent.conf"
-
-- name: "delete lines for contrail interface"
- shell: "{{ item }}"
- with_items:
- - "sed -e '/auto {{ contrail_vhost_device }}/,$d' /etc/network/interfaces > /tmp/contrail-interfaces-top"
- - "sed -n -e '/auto {{ contrail_vhost_device }}/,$p' /etc/network/interfaces > /tmp/contrail-interfaces-bottom"
- - "sed -i -e '/auto {{ contrail_vhost_device }}/d' /tmp/contrail-interfaces-bottom"
- - "sed -i -n -e '/auto .*/,$p' /tmp/contrail-interfaces-bottom"
- - "cat /tmp/contrail-interfaces-top /tmp/contrail-interfaces-bottom > /etc/network/interfaces"
-
-- name: "delete lines for vrouter interface"
- shell: "{{ item }}"
- with_items:
- - "sed -e '/auto vhost0/,$d' /etc/network/interfaces > /tmp/contrail-interfaces-top"
- - "sed -n -e '/auto vhost0/,$p' /etc/network/interfaces > /tmp/contrail-interfaces-bottom"
- - "sed -i -e '/auto vhost0/d' /tmp/contrail-interfaces-bottom"
- - "sed -i -n -e '/auto .*/,$p' /tmp/contrail-interfaces-bottom"
- - "cat /tmp/contrail-interfaces-top /tmp/contrail-interfaces-bottom > /etc/network/interfaces"
-
-#- name: get last ip address
-# shell: expr substr `cat /etc/hostname` 5 1
-# register: lastip
-
-- name: "configure interface"
- lineinfile:
- dest: "/etc/network/interfaces"
- line: "{{ item }}"
- state: "present"
- with_items:
- - "auto {{ contrail_vhost_device }}"
- - "iface {{ contrail_vhost_device }} inet manual"
- - "\tpre-up ifconfig {{ contrail_vhost_device }} up"
- - "\tpost-down ifconfig {{ contrail_vhost_device }} down"
- - "auto vhost0"
- - "iface vhost0 inet static"
- - "\tpre-up /opt/contrail/bin/if-vhost0"
- - "\tnetwork_name application"
- - "\taddress {{ contrail_vhost_address }}"
- - "\tnetmask {{ contrail_vhost_netmask }}"
-
-##################################################################################
-
-- name: "copy vrouter script to compute"
- template:
- src: "../../templates/vrouter-functions.sh"
- dest: "/opt/contrail/bin/vrouter-functions.sh"
-
-- name: "load vrouter driver"
- command: su -s /bin/sh -c "insmod /var/lib/dkms/vrouter/2.21/build/vrouter.ko"
- ignore_errors: true
-
-- name: "run vhost0 script"
- command: su -s /bin/sh -c "/opt/contrail/bin/if-vhost0"
- ignore_errors: true
-
-##################################################################################
-
-- name: "delete temporary files"
- file:
- dest: "{{ item }}"
- state: "absent"
- with_items:
- - "/tmp/contrail-interfaces-top"
- - "/tmp/contrail-interfaces-bottom"
-
-##################################################################################
-
-- name: "fix up contrail vrouter nodemgr config"
- ini_file:
- dest: "/etc/contrail/contrail-vrouter-nodemgr.conf"
- section: "DISCOVERY"
- option: "server"
- value: "{{ contrail_haproxy_address }}"
-
-
-##################################################################################
-########################### restart vrouter services ###########################
-
-- name: "restart supervisor service"
- service:
- name: "supervisor"
- state: "restarted"
-
-- name: "restart vrouter nodemgr"
- shell: ps aux | grep contrail-nodemgr | grep -v grep | awk '{print $2}' | xargs kill -9;
-
-- name: "restart vrouter agent"
- service:
- name: "contrail-vrouter-agent"
- state: "restarted"
-
-
-##################################################################################
-
-
-- name: "restart libvirt bin"
- service:
- name: "libvirt-bin"
- state: "restarted"
-
-#- name: "set value of nova to nova config"
-# template:
-# src: "provision/nova.j2"
-# dest: "/etc/nova/nova.conf"
-# when: install_nova
-
-#- name: "delete values from nova config"
-# ini_file:
-# dest: "/etc/nova/nova.conf"
-# section: "{{ item.section }}"
-# option: "{{ item.option }}"
-# with_items:
-# - { section: "DEFAULT", option: "quantum_auth_strategy" }
-# - { section: "DEFAULT", option: "quantum_admin_auth_url" }
-# - { section: "DEFAULT", option: "quantum_admin_tenant_name" }
-# - { section: "DEFAULT", option: "quantum_admin_username" }
-# - { section: "DEFAULT", option: "quantum_admin_password" }
-# - { section: "DEFAULT", option: "quantum_url" }
-
-#- name: "set values of neutron to nova config"
-# ini_file:
-# dest: "/etc/nova/nova.conf"
-# section: "{{ item.section }}"
-# option: "{{ item.option }}"
-# value: "{{ item.value }}"
-# state: "present"
-# with_items:
-# - { section: "DEFAULT", option: "neutron_admin_auth_url", value: "http://{{ contrail_keystone_address }}:5000/v2.0" }
-# - { section: "DEFAULT", option: "neutron_admin_username", value: "neutron" }
-# - { section: "DEFAULT", option: "neutron_admin_password", value: "{{ contrail_admin_password }}" }
-# - { section: "DEFAULT", option: "neutron_admin_tenant_name", value: "service" }
-# - { section: "DEFAULT", option: "neutron_url", value: "http://{{ contrail_haproxy_address }}:9696/" }
-# - { section: "DEFAULT", option: "neutron_url_timeout", value: "300" }
-# - { section: "DEFAULT", option: "network_api_class", value: "nova.network.neutronv2.api.API" }
-# - { section: "DEFAULT", option: "libvirt_vif_driver", value: "nova_contrail_vif.contrailvif.VRouterVIFDriver" }
-
-- name: "set values to nova config"
- ini_file:
- dest: "/etc/nova/nova.conf"
- section: "{{ item.section }}"
- option: "{{ item.option }}"
- value: "{{ item.value }}"
- with_items:
- - { section: "DEFAULT", option: "network_api_class", value: "nova_contrail_vif.contrailvif.ContrailNetworkAPI" }
-
-
-
-#######################################################################
-###################### nova plugin workaround #######################
-#######################################################################
-
-#- name: "copy nova plugs on compute"
-# copy:
-# src: "../../templates/nova_contrail_vif.tar.gz"
-# dest: "/opt/nova_contrail_vif.tar.gz"
-
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: download nova plugin package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/open-contrail/{{ nova_plugin }}" dest=/opt/
-
-- name: "unzip nova plugs"
- command: su -s /bin/sh -c "tar xzf /opt/nova_contrail_vif.tar.gz -C /opt/"
-
-- name: "remove original nova plugs"
- shell: rm -rf /usr/lib/python2.7/dist-packages/nova_contrail_vif/
-
-- name: "use new nova plugs"
- shell: mv /opt/nova_contrail_vif/ /usr/lib/python2.7/dist-packages/nova_contrail_vif/
-
-#################################################
-
-- name: "restart nova compute"
- service:
- name: "nova-compute"
- state: "restarted"
-
-- name: "add vrouter to contrail"
- shell: "python /opt/contrail/utils/provision_vrouter.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --openstack_ip {{ contrail_keystone_address }} --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }}"
-
-#- name: "reboot Server"
-# shell: "shutdown -r now"
-# async: 0
-# poll: 0
-# ignore_errors: true
-# notify: Wait for server to come back
-#
-#handlers:
-#- name: "Wait for server to come back"
-# local_action:
-# module: wait_for
-# host={{ inventory_hostname }}
-# port=22
-# delay=30
-# timeout=600
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-config.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-config.yml
deleted file mode 100755
index 615ac281..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-config.yml
+++ /dev/null
@@ -1,350 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: config
-# sudo: yes
-# tasks:
-- name: "enable supervisor config"
- file:
- path: "/etc/init/supervisor-config.override"
- state: "absent"
-
-- name: "enable neutron server"
- file:
- path: "/etc/init/neutron-server.override"
- state: "absent"
-
-# Compass is using this
-#- name: "enable haproxy"
-# replace:
-# dest: "/etc/default/haproxy"
-# regexp: "^ENABLED\\s*=.*$"
-# replace: "ENABLED=1"
-
-# Compass is using this
-#- name: "modify haproxy global configuration"
-# lineinfile:
-# dest: "/etc/haproxy/haproxy.cfg"
-# regexp: "{{ item.regexp }}"
-# line: "{{ item.line }}"
-# insertafter: "^global"
-# with_items:
-# - { regexp: "^\\s*tune.bufsize", line: "\ttune.bufsize 16384" }
-# - { regexp: "^\\s*tune.maxrewrite", line: "\ttune.maxrewrite 1024" }
-
-#chenshuai, add later
-#- name: "delete haproxy configuration for contrail"
-# shell: "sed -i -e '/^#contrail-marker-start/,/^#contrail-marker-end/d' /etc/haproxy/haproxy.cfg"
-
-#chenshuai, add later
-#- name: "create haproxy configuration for contrail"
-# template:
-# src: "provision/haproxy-contrail-cfg.j2"
-# src: "../../templates/provision/haproxy-contrail-cfg.j2"
-# dest: "/tmp/haproxy-contrail.cfg"
-
-#chenshuai, add later
-#- name: "combination of the haproxy configuration"
-# shell: "cat /tmp/haproxy-contrail.cfg >> /etc/haproxy/haproxy.cfg"
-
-#chenshuai, add later
-#- name: "delete temporary configuration file"
-# file:
-# dest: "/tmp/haproxy-contrail.cfg"
-# state: "absent"
-
-#chenshuai, add later
-#- name: "restart haproxy"
-# service:
-# name: "haproxy"
-# state: "restarted"
-
-# Compass is using this
-#- name: "create keepalived configuration"
-# template:
-# src: "../../templates/provision/keepalived-conf.j2"
-# dest: "/etc/keepalived/keepalived.conf"
-# with_indexed_items: groups['opencontrail_config']
-# when: contrail_keepalived and item.1 == inventory_hostname
-
-#- name: "restart keepalived"
-# service:
-# name: "keepalived"
-# state: "restarted"
-# when: contrail_keepalived
-
-- name: "node-common"
- include: -node-common.yml
-
-- name: "fix up contrail keystone auth config"
- template:
- src: "../../templates/provision/contrail-keystone-auth-conf.j2"
- dest: "/etc/contrail/contrail-keystone-auth.conf"
-
-- name: "fix up ifmap server log4j properties"
- template:
- src: "../../templates/provision/ifmap-log4j-properties.j2"
- dest: "/etc/ifmap-server/log4j.properties"
-
-- name: "fix up ifmap server authorization properties"
- template:
- src: "../../templates/provision/ifmap-authorization-properties.j2"
- dest: "/etc/ifmap-server/authorization.properties"
-
-- name: "fix up ifmap server basicauthusers properties"
- template:
- src: "../../templates/provision/ifmap-basicauthusers-properties.j2"
- dest: "/etc/ifmap-server/basicauthusers.properties"
-
-- name: "fix up ifmap server publisher properties"
- template:
- src: "../../templates/provision/ifmap-publisher-properties.j2"
- dest: "/etc/ifmap-server/publisher.properties"
-
-- name: "fix up contrail api config"
- template:
- src: "../../templates/provision/contrail-api-conf.j2"
- dest: "/etc/contrail/contrail-api.conf"
-
-- name: "fix up contrail api supervisord config"
- template:
- src: "../../templates/provision/contrail-api-supervisord-conf.j2"
- dest: "/etc/contrail/supervisord_config_files/contrail-api.ini"
-
-- name: "modify contrail api init script"
- lineinfile:
- dest: "/etc/init.d/contrail-api"
- regexp: "supervisorctl -s unix:///tmp/supervisord_config.sock"
- line: "supervisorctl -s unix:///tmp/supervisord_config.sock ${1} `basename ${0}:0`"
-
-- name: "fix up contrail schema config"
- template:
- src: "../../templates/provision/contrail-schema-conf.j2"
- dest: "/etc/contrail/contrail-schema.conf"
-
-- name: "fix up contrail device manager config"
- template:
- src: "../../templates/provision/contrail-device-manager-conf.j2"
- dest: "/etc/contrail/contrail-device-manager.conf"
-
-- name: "fix up contrail svc monitor config"
- template:
- src: "../../templates/provision/contrail-svc-monitor-conf.j2"
- dest: "/etc/contrail/contrail-svc-monitor.conf"
-
-- name: "fix up contrail discovery supervisord config"
- template:
- src: "../../templates/provision/contrail-discovery-supervisord-conf.j2"
- dest: "/etc/contrail/supervisord_config_files/contrail-discovery.ini"
-
-- name: "fix up contrail discovery config"
- template:
- src: "../../templates/provision/contrail-discovery-conf.j2"
- dest: "/etc/contrail/contrail-discovery.conf"
-
-- name: "modify contrail discovery init script"
- lineinfile:
- dest: "/etc/init.d/contrail-discovery"
- regexp: "supervisorctl -s unix:///tmp/supervisord_config.sock"
- line: "supervisorctl -s unix:///tmp/supervisord_config.sock ${1} `basename ${0}:0`"
-
-- name: "fix up contrail vnc api library config"
- template:
- src: "../../templates/provision/contrail-vnc-api-lib-ini.j2"
- dest: "/etc/contrail/vnc_api_lib.ini"
-
-- name: "fix up contrail config nodemgr config"
- ini_file:
- dest: "/etc/contrail/contrail-config-nodemgr.conf"
- section: "DISCOVERY"
- option: "server"
- value: "{{ contrail_haproxy_address }}"
-
-- name: "fix up contrail sudoers"
- template:
- src: "../../templates/provision/contrail-sudoers.j2"
- dest: "/etc/sudoers.d/contrail_sudoers"
- mode: 0440
-
-- name: "create directory for neutron plugins"
- file:
- dest: "/etc/neutron/plugins/opencontrail"
- state: "directory"
-
-- name: "fix up contrail plugin for nuetron"
- template:
- src: "../../templates/provision/neutron-contrail-plugin-ini.j2"
- dest: "/etc/neutron/plugins/opencontrail/ContrailPlugin.ini"
-
-- name: "modify neutron server configuration"
- lineinfile:
- dest: "/etc/default/neutron-server"
- regexp: "NEUTRON_PLUGIN_CONFIG="
- line: "NEUTRON_PLUGIN_CONFIG=\"/etc/neutron/plugins/opencontrail/ContrailPlugin.ini\""
-
-#- name: "change owner neutron log directory"
-# file:
-# dest: "/var/log/neutron"
-# state: "directory"
-# owner: "neutron"
-# group: "neutron"
-# recurse: yes
-
-- name: "set values to neutron config"
- ini_file:
- dest: "/etc/neutron/neutron.conf"
- section: "{{ item.section }}"
- option: "{{ item.option }}"
- value: "{{ item.value }}"
- with_items:
-# - { section: "DEFAULT", option: "bind_port", value: "9697" }
-# - { section: "DEFAULT", option: "auth_strategy", value: "keystone" }
-# - { section: "DEFAULT", option: "allow_overlapping_ips", value: "True" }
- - { section: "DEFAULT", option: "core_plugin", value: "neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2" }
- - { section: "DEFAULT", option: "api_extensions_path", value: "/usr/lib/python2.7/dist-packages/neutron_plugin_contrail/extensions" }
-# - { section: "DEFAULT", option: "rabbit_host", value: "{{ contrail_haproxy_address }}" }
-# - { section: "DEFAULT", option: "rabbit_port", value: "5673" }
-# - { section: "DEFAULT", option: "service_plugins", value: "neutron_plugin_contrail.plugins.opencontrail.loadbalancer.plugin.LoadBalancerPlugin" }
- - { section: "DEFAULT", option: "service_plugins", value: " " }
- - { section: "DEFAULT", option: "notify_nova_on_port_data_changes", value: "False" }
- - { section: "service_providers", option: "service_provider", value: "LOADBALANCER:Opencontrail:neutron_plugin_contrail.plugins.opencontrail.loadbalancer.driver.OpencontrailLoadbalancerDriver:default" }
- - { section: "quotas", option: "quota_driver", value: "neutron_plugin_contrail.plugins.opencontrail.quota.driver.QuotaDriver" }
-# - { section: "quotas", option: "quota_network", value: "-1" }
-# - { section: "quotas", option: "quota_subnet", value: "-1" }
-# - { section: "quotas", option: "quota_port", value: "-1" }
-# - { section: "keystone_authtoken", option: "admin_tenant_name", value: "admin" }
-# - { section: "keystone_authtoken", option: "admin_user", value: "{{ contrail_admin_user }}" }
-# - { section: "keystone_authtoken", option: "admin_password", value: "{{ contrail_admin_password }}" }
-# - { section: "keystone_authtoken", option: "auth_host", value: "{{ contrail_keystone_address }}" }
-# - { section: "keystone_authtoken", option: "auth_protocol", value: "http" }
-
-#- name: "add respawn to neutron server config"
-# lineinfile:
-# dest: "/etc/init/neutron-server.conf"
-# line: "respawn"
-# insertbefore: "pre-start script"
-
-#- name: "add respawn limit to neutron server config"
-# lineinfile:
-# dest: "/etc/init/neutron-server.conf"
-# line: "respawn limit 10 90"
-# insertafter: "respawn"
-
-- name: "restart supervisor config"
- service:
- name: "supervisor-config"
- state: "restarted"
-
-
-
-###########################################################
-############# neutron plugins workaround ##################
-###########################################################
-
-#- name: "copy neutron plugs on controller"
-# copy:
-# src: "../../templates/neutron_plugin_contrail.tar.gz"
-# dest: "/opt/neutron_plugin_contrail.tar.gz"
-
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: download neutron_plugin_contrail package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/open-contrail/{{ neutron_plugin }}" dest=/opt/
-
-- name: "unzip neutron plugs"
- command: su -s /bin/sh -c "tar xzf /opt/neutron_plugin_contrail.tar.gz -C /opt/"
-
-- name: "remove original neutron plugs"
- shell: rm -rf /usr/lib/python2.7/dist-packages/neutron_plugin_contrail/
-
-- name: "use new neutron plugs"
- shell: mv /opt/neutron_plugin_contrail/ /usr/lib/python2.7/dist-packages/neutron_plugin_contrail/
-
-###########################################################
-
-
-
-- name: "restart neutron-server"
- service:
- name: "neutron-server"
- state: "restarted"
-
-# Compass configured
-#- name: "add neutron service"
-# shell: "keystone service-get 'neutron' || keystone service-create --name 'neutron' --type 'network' --description 'Neutron Network Service'"
-# environment:
-# OS_AUTH_URL: "http://{{ contrail_keystone_address }}:35357/v2.0"
-# OS_USERNAME: "{{ contrail_admin_user }}"
-# OS_PASSWORD: "{{ contrail_admin_password }}"
-# OS_TENANT_NAME: "admin"
-# run_once: yes
-# when: keystone_provision
-#
-#
-# Compass configured
-#- name: "add neutron endpoint"
-# shell: "keystone endpoint-list | grep -q $(keystone service-get 'neutron' | grep '| *id *|' | awk '{print $4}') || keystone endpoint-create --region 'RegionOne' --service 'neutron' --publicurl 'http://{{ contrail_haproxy_address }}:9696' --internal 'http://{{ contrail_haproxy_address }}:9696' --adminurl 'http://{{ contrail_haproxy_address }}:9696'"
-# environment:
-# OS_AUTH_URL: "http://{{ contrail_keystone_address }}:35357/v2.0"
-# OS_USERNAME: "{{ contrail_admin_user }}"
-# OS_PASSWORD: "{{ contrail_admin_password }}"
-# OS_TENANT_NAME: "admin"
-# run_once: yes
-# when: keystone_provision
-#
-#- name: "add neutron user"
-# keystone_user:
-# user: "neutron"
-# password: "{{ contrail_admin_password }}"
-# email: "neutron@example.com"
-# tenant: "service"
-# endpoint: "http://{{ contrail_keystone_address }}:35357/v2.0"
-# login_user: "{{ contrail_admin_user }}"
-# login_password: "{{ contrail_admin_password }}"
-# login_tenant_name: "admin"
-# run_once: yes
-# when: keystone_provision
-#
-#- name: "apply role to user"
-# keystone_user:
-# tenant: "service"
-# user: "neutron"
-# role: "admin"
-# endpoint: "http://{{ contrail_keystone_address }}:35357/v2.0"
-# login_user: "{{ contrail_admin_user }}"
-# login_password: "{{ contrail_admin_password }}"
-# login_tenant_name: "admin"
-# run_once: yes
-# when: keystone_provision
-
-
-
-#- name: "set values to nova config"
-# ini_file:
-# dest: "/etc/nova/nova.conf"
-# section: "{{ item.section }}"
-# option: "{{ item.option }}"
-# value: "{{ item.value }}"
-# with_items:
-# - { section: "DEFAULT", option: "network_api_class", value: "nova_contrail_vif.contrailvif.ContrailNetworkAPI" }
-
-
-#- name: "restart nova-server"
-# service:
-# name: "{{ item }}"
-# state: "restarted"
-# with_items:
-# - nova-api
-# - nova-cert
-# - nova-conductor
-# - nova-consoleauth
-# - nova-novncproxy
-# - nova-scheduler
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-control.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-control.yml
deleted file mode 100755
index e719a461..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-control.yml
+++ /dev/null
@@ -1,69 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: control
-# sudo: yes
-# tasks:
-- name: "enable supervisor control"
- file:
- path: "/etc/init/supervisor-control.override"
- state: "absent"
-
-- name: "enable supervisor dns"
- file:
- path: "/etc/init/supervisor-dns.override"
- state: "absent"
-
-- name: "modify ifmap server basicauthusers properties for control"
- lineinfile:
- dest: "/etc/ifmap-server/basicauthusers.properties"
-# line: "{{ hostvars[item]['contrail_address' ] }}:{{ hostvars[item]['contrail_address' ] }}"
- line: "{{ ip_settings[item]['br-prv']['ip'] }}:{{ ip_settings[item]['br-prv']['ip'] }}"
- with_items: groups['opencontrail']
-
-- name: "modify ifmap server basicauthusers properties for dns"
- lineinfile:
- dest: "/etc/ifmap-server/basicauthusers.properties"
-# line: "{{ hostvars[item]['contrail_address' ] }}.dns:{{ hostvars[item]['contrail_address' ] }}.dns"
- line: "{{ ip_settings[item]['br-prv']['ip'] }}.dns:{{ ip_settings[item]['br-prv']['ip'] }}.dns"
- with_items: groups['opencontrail']
-
-- name: "node-common"
- include: -node-common.yml
-
-- name: "fix up contrail control config"
- template:
- src: "../../templates/provision/contrail-control-conf.j2"
- dest: "/etc/contrail/contrail-control.conf"
-
-- name: "fix up contrail dns config"
- template:
- src: "../../templates/provision/contrail-dns-conf.j2"
- dest: "/etc/contrail/contrail-dns.conf"
-
-- name: "fix up contrail control nodemgr config"
- ini_file:
- dest: "/etc/contrail/contrail-control-nodemgr.conf"
- section: "DISCOVERY"
- option: "server"
- value: "{{ contrail_haproxy_address }}"
-
-- name: "modify dns configuration"
- replace:
- dest: "/etc/contrail/dns/{{ item }}"
- regexp: "secret \"secret123\""
- replace: "secret \"xvysmOR8lnUQRBcunkC6vg==\""
- with_items:
- - "contrail-rndc.conf"
- - "contrail-named.conf"
-
-- name: "restart supervisor control"
- service:
- name: "supervisor-control"
- state: "restarted"
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-database.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-database.yml
deleted file mode 100755
index 9c99270d..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-database.yml
+++ /dev/null
@@ -1,209 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: database
-# sudo: yes
-# tasks:
-
-
-- name: "enable supervisor database"
-# sudo: True
- file:
- path: "/etc/init/supervisor-database.override"
- state: "absent"
-
-
-- name: "-node-common"
-# sudo: True
- include: -node-common.yml
-
-
-- name: "update hosts"
-# sudo: True
- lineinfile:
- dest: "/etc/hosts"
-# regexp: "^{{ contrail_address }}\t{{ ansible_hostname }}( .*)?$"
-# line: "{{ contrail_address }}\t{{ ansible_hostname }}\\1"
- regexp: "^{{ contrail_address }}\t{{ inventory_hostname }}( .*)?$"
- line: "{{ contrail_address }}\t{{ inventory_hostname }}\\1"
- backrefs: yes
-
-
-- name: "make directory for contrail analytics"
-# sudo: True
- file:
- path: "/var/lib/cassandra/data/ContrailAnalytics"
- state: "directory"
-
-
-- name: "modify cassandra conf"
-# sudo: True
- lineinfile:
- dest: "/etc/cassandra/cassandra.yaml"
- regexp: "{{ item.regexp }}"
- line: "{{ item.line }}"
- with_items:
- - { regexp: "^(#(\\s*)?)?listen_address:", line: "listen_address: {{ contrail_address }}"}
- - { regexp: "^(#(\\s*)?)?cluster_name:", line: "cluster_name: \"Contrail\"" }
- - { regexp: "^(#(\\s*)?)?rpc_address:", line: "rpc_address: {{ contrail_address }}" }
- - { regexp: "^(#(\\s*)?)?num_tokens:", line: "num_tokens: 256" }
- - { regexp: "^(#(\\s*)?)?initial_token:", line: "# initial_token:" }
-
-
-
-- name: "set first database host seed"
-# sudo: True
- set_fact:
-# dbseeds: "{{ hostvars[item.1][ contrail_address ] }}"
- dbseeds: "{{ ip_settings[item.1]['br-prv']['ip'] }}"
- with_indexed_items: groups['opencontrail']
- when: item.0 == 0
-
-
-
-
-
-- name: "set second database host seed"
-# sudo: True
- set_fact:
-# dbseeds: "{{ dbseeds }},{{ hostvars[item.1]['contrail_address'] }}"
- dbseeds: "{{ dbseeds }},{{ ip_settings[item.1]['br-prv']['ip'] }}"
- with_indexed_items: groups['opencontrail']
- when: item.0 == 1
-
-
-- name: "modify seeds list in cassandra conf"
-# sudo: True
- replace:
- dest: "/etc/cassandra/cassandra.yaml"
- regexp: "- seeds:.*$"
- replace: "- seeds: {{ dbseeds }}"
-
-
-- name: "modify cassandra env"
-# sudo: True
- replace:
- dest: "/etc/cassandra/cassandra-env.sh"
- regexp: "{{ item.regexp }}"
- replace: "{{ item.replace }}"
- with_items:
- - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -XX:\\+PrintGCDetails\"", replace: "JVM_OPTS=\"$JVM_OPTS -XX:+PrintGCDetails\"" }
- - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -Xss\\d+k\"", replace: "JVM_OPTS=\"$JVM_OPTS -Xss512k\"" }
- - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -XX:\\+PrintGCDateStamps\"", replace: "JVM_OPTS=\"$JVM_OPTS -XX:+PrintGCDateStamps\"" }
- - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -XX:\\+PrintHeapAtGC\"", replace: "JVM_OPTS=\"$JVM_OPTS -XX:+PrintHeapAtGC\"" }
- - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -XX:\\+PrintTenuringDistribution\"", replace: "JVM_OPTS=\"$JVM_OPTS -XX:+PrintTenuringDistribution\"" }
- - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -XX:\\+PrintGCApplicationStoppedTime\"", replace: "JVM_OPTS=\"$JVM_OPTS -XX:+PrintGCApplicationStoppedTime\"" }
- - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -XX:\\+PrintPromotionFailure\"", replace: "JVM_OPTS=\"$JVM_OPTS -XX:+PrintPromotionFailure\"" }
- - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -XX:PrintFLSStatistics=1\"", replace: "JVM_OPTS=\"$JVM_OPTS -XX:PrintFLSStatistics=1\"" }
- - { regexp: "(#(\\s*)?)?JVM_OPTS=\"\\$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date \\+%s`\\.log\"", replace: "JVM_OPTS=\"$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date +%s`.log\"" }
-
-
-- name: "modify zookeeper conf"
-# sudo: True
- lineinfile:
- dest: "/etc/zookeeper/conf/zoo.cfg"
- line: "{{ item }}"
- with_items:
- - "maxSessionTimeout=120000"
- - "autopurge.purgeInterval=3"
-
-
-- name: "modify zookeeper log4j properties"
-# sudo: True
- lineinfile:
- dest: "/etc/zookeeper/conf/log4j.properties"
- regexp: "(log4j.appender.ROLLINGFILE.MaxBackupIndex=.*)$"
- line: "\\1"
- backrefs: yes
-
-
-- name: "add server addresses to zookeeper config"
-# sudo: True
- lineinfile:
- dest: "/etc/zookeeper/conf/zoo.cfg"
- regexp: "server.{{ item.0 + 1 }}="
-# line: "server.{{ item.0 + 1 }}={{ hostvars[item.1]['contrail_address'] }}:2888:3888"
- line: "server.{{ item.0 + 1 }}={{ ip_settings[item.1]['br-prv']['ip'] }}:2888:3888"
- with_indexed_items: groups['opencontrail']
-
-
-- name: "set zookeeper unique id"
-# sudo: True
- template:
- src: "../../templates/provision/zookeeper-unique-id.j2"
- dest: "/var/lib/zookeeper/myid"
- with_indexed_items: groups['opencontrail']
- when: item.1 == inventory_hostname
-
-
-- name: "remove kafka ini file"
-# sudo: True
- file:
- path: "/etc/contrail/supervisord_database_files/kafka.ini"
- state: "absent"
-
-
-- name: "set first zookeeper host address"
-# sudo: True
- set_fact:
-# zkaddrs: "{{ hostvars[item.1]['contrail_address'] }}:2181"
- zkaddrs: "{{ ip_settings[item.1]['br-prv']['ip'] }}:2181"
- with_indexed_items: groups['opencontrail']
- when: item.0 == 0
-
-
-- name: "set second or more zookeeper host addresses"
-# sudo: True
- set_fact:
-# zkaddrs: "{{ zkaddrs }},{{ hostvars[item.1]['contrail_address'] }}:2181"
- zkaddrs: "{{ zkaddrs }},{{ ip_settings[item.1]['br-prv']['ip'] }}:2181"
- with_indexed_items: groups['opencontrail']
- when: item.0 > 0
-
-
-- name: "modify zookeeper host addresses in kafka properties"
-# sudo: True
- lineinfile:
- dest: "/usr/share/kafka/config/server.properties"
- regexp: "zookeeper.connect="
- line: "zookeeper.connect={{ zkaddrs }}"
-
-
-- name: "modify kafka properties"
-# sudo: True
- lineinfile:
- dest: "/usr/share/kafka/config/server.properties"
- regexp: "default.replication.factor="
- line: "default.replication.factor=2"
-
-
-- name: "fix up contrail database nodemgr config"
-# sudo: True
- ini_file:
- dest: "/etc/contrail/contrail-database-nodemgr.conf"
- section: "{{ item.section }}"
- option: "{{ item.option }}"
- value: "{{ item.value }}"
- with_items:
- - { section: "DEFAULT", option: "hostip", value: "{{ contrail_address }}" }
- - { section: "DISCOVERY", option: "server", value: "{{ contrail_haproxy_address }}" }
-
-
-- name: "restart zookeeper"
-# sudo: True
- service:
- name: "zookeeper"
- state: "restarted"
-
-
-- name: "restart supervisor database"
-# sudo: True
- service:
- name: "supervisor-database"
- state: "restarted"
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-increase-limits.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-increase-limits.yml
deleted file mode 100755
index 89a4966f..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-increase-limits.yml
+++ /dev/null
@@ -1,60 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: [database, config, control, collector]
-# sudo: yes
-# tasks:
-- name: "delete line"
-# sudo: True
- lineinfile:
- dest: "/etc/limits.conf"
- regexp: "^root\\s*soft\\s*nproc\\s*.*"
- state: "absent"
-
-- name: "check EOF"
-# sudo: True
- lineinfile:
- dest: "/etc/security/limits.conf"
- regexp: "^# End of file"
- line: "# End of file"
-
-- name: "add lines"
-# sudo: True
- lineinfile:
- dest: "/etc/security/limits.conf"
- regexp: "{{ item.regexp }}"
- line: "{{ item.line }}"
- insertbefore: "^# End of file"
- with_items:
- - { regexp: "^root\\s*hard\\s*nofile\\s*.*", line: "root hard nofile 65535" }
- - { regexp: "^root\\s*soft\\s*nofile\\s*.*", line: "root soft nofile 65535" }
- - { regexp: "^\\*\\s*hard\\s*nofile\\s*.*", line: "* hard nofile 65535" }
- - { regexp: "^\\*\\s*soft\\s*nofile\\s*.*", line: "* soft nofile 65535" }
- - { regexp: "^\\*\\s*hard\\s*nproc\\s*.*", line: "* hard nproc 65535" }
- - { regexp: "^\\*\\s*soft\\s*nproc\\s*.*", line: "* soft nproc 65535" }
-
-- name: change value of sysctl fs.file-max
-# sudo: True
- sysctl:
- name: "fs.file-max"
- value: "65535"
-
-- name: "find supervisord conf files"
-# sudo: True
- shell: "find /etc/contrail -name supervisor*.conf -type f"
- register: supervisordconfs
- changed_when: no
-
-- name: "modify supervisord conf"
-# sudo: True
- replace:
- dest: "{{ item }}"
- regexp: "^minfds=\\d*"
- replace: "minfds=10240"
- with_items: supervisordconfs.stdout_lines
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-rabbitmq.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-rabbitmq.yml
deleted file mode 100644
index d3426590..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-rabbitmq.yml
+++ /dev/null
@@ -1,87 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: config
-# sudo: yes
-# tasks:
-- name: turn off rabbitmq server on control node
- shell: sed -i '/rabbitmq-server/d' /opt/service ;
-
-- name: "start supervisor support service"
- service:
- name: "supervisor-support-service"
- state: "started"
-
-- name: "stop rabbitmq server via supervisor"
- supervisorctl:
- name: "rabbitmq-server"
- state: "stopped"
- server_url: "unix:///tmp/supervisord_support_service.sock"
-
-- include: -rabbitmq-stop.yml
-
-- name: "update hosts"
- lineinfile:
- dest: "/etc/hosts"
- line: "{{ ip_settings[item]['br-prv']['ip'] }}\t{{ hostvars[item]['ansible_hostname'] }} {{ hostvars[item]['ansible_hostname'] }}-ctrl"
- with_items: groups['opencontrail']
-
-- name: "fix up rabbitmq env"
- template:
- src: "../../templates/provision/rabbitmq-env-conf.j2"
- dest: "/etc/rabbitmq/rabbitmq-env.conf"
-
-- name: "fix up rabbitmq config for single node"
- template:
- src: "../../templates/provision/rabbitmq-conf-single.j2"
- dest: "/etc/rabbitmq/rabbitmq.config"
- when: groups['opencontrail'][1] is not defined
-
-- name: fix up rabbitmq config for multi nodes
- template:
- src: "../../templates/provision/rabbitmq-conf.j2"
- dest: "/etc/rabbitmq/rabbitmq.config"
- when: groups['opencontrail'][1] is defined
-
-- include: -rabbitmq-stop.yml
-
-#- name: "create cookie uuid temporary"
-# local_action:
-# module: "template"
-# src: "templates/rabbitmq-cookie.j2"
-# dest: "/tmp/tmp-rabbitmq-cookie"
-# run_once: yes
-#
-#- name: "update cookie uuid"
-# copy:
-# src: "/tmp/tmp-rabbitmq-cookie"
-# dest: "/var/lib/rabbitmq/.erlang.cookie"
-# owner: "rabbitmq"
-# group: "rabbitmq"
-# mode: 0400
-#
-#- name: "delete temporary cookie uuid"
-# local_action:
-# module: "file"
-# dest: "/tmp/tmp-rabbitmq-cookie"
-# state: "absent"
-# run_once: yes
-
-- name: "start rabbitmq server"
- service:
- name: "rabbitmq-server"
- state: "started"
-
-- name: add rabbitmq user
- shell: >
- rabbitmqctl add_user {{ RABBIT_USER }} {{ RABBIT_PASS }} ;
- rabbitmqctl set_permissions {{ RABBIT_USER }} ".*" ".*" ".*" ;
-
-- name: "check rabbitmq server"
- shell: netstat -lpen --tcp | grep beam | grep 5672; while [ $? -ne 0 ]; do sleep 10; netstat -lpen --tcp | grep beam | grep 5672; done
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-route.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-route.yml
deleted file mode 100755
index 01687281..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-route.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: all
-# sudo: yes
-# tasks:
-- name: "delete existing route file"
-# sudo: True
- file:
- path: "/etc/network/if-up.d/routes"
- state: absent
- when: contrail_route
-
-- name: "create route file"
-# sudo: True
- file:
- path: "/etc/network/if-up.d/routes"
- owner: "root"
- mode: 0755
- state: touch
- when: contrail_route
-
-
-- name: "add template"
-# sudo: True
- lineinfile:
- dest: "/etc/network/if-up.d/routes"
- line: "{{ item }}"
- with_items:
- - "#!/bin/bash"
- - "[ \"$IFACE\" != {{ contrail_route[0].device }} ] && exit 0"
- when: contrail_route
-
-
-- name: "add static route"
-# sudo: True
- lineinfile:
- dest: "/etc/network/if-up.d/routes"
- line: "ip route add {{ item.ip }} via {{ item.gw }} dev {{ item.device }}"
- state: "present"
- with_items:
- - "{{ contrail_route }}"
- when: contrail_route
-
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-toragent.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-toragent.yml
deleted file mode 100755
index 3ae0bec9..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-toragent.yml
+++ /dev/null
@@ -1,85 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: tsn
-# sudo: yes
-# tasks:
-- name: "create temporary directory for ssl files"
- local_action:
- module: "file"
- dest: "/tmp/tmp-toragent-{{ item }}"
- state: "directory"
- with_items:
- - "certs"
- - "private"
- run_once: yes
-
-- name: "create ssl files"
- local_action: "shell openssl req -new -x509 -days 3650 -text -sha256 -newkey rsa:4096 -nodes -subj \"/C=US/ST=Global/O={{ item.1.vendor_name }}/CN={{ ansible_fqdn }}\" -keyout /tmp/tmp-toragent-private/tor.{{ item.0 }}.privkey.pem -out /tmp/tmp-toragent-certs/tor.{{ item.0 }}.cert.pem"
- with_indexed_items: contrail_tor_agents
- run_once: yes
-
-- name: "set tor agent list"
- set_fact:
- toragent_index: "{{ item.0 }}"
- toragent_params: "{{ item.1 }}"
- register: contrail_toragent_list
- with_indexed_items: contrail_tor_agents
- when: inventory_hostname in item.1.tsn_names
-
-- name: "fix up tor agent conf"
- template:
- src: "templates/contrail-tor-agent-conf.j2"
- dest: "/etc/contrail/contrail-tor-agent-{{ item.ansible_facts.toragent_index }}.conf"
- with_items: contrail_toragent_list.results
-
-- name: "fix up tor agent ini"
- template:
- src: "provision/contrail-tor-agent-ini.j2"
- dest: "/etc/contrail/supervisord_vrouter_files/contrail-tor-agent-{{ item.ansible_facts.toragent_index }}.ini"
- with_items: contrail_toragent_list.results
-
-- name: "copy init script"
- shell: "cp /etc/init.d/contrail-vrouter-agent /etc/init.d/contrail-tor-agent-{{ item.ansible_facts.toragent_index }}"
- with_items: contrail_toragent_list.results
-
-- name: "copy ssl certs"
- copy:
- src: "/tmp/tmp-toragent-certs/tor.{{ item.ansible_facts.toragent_index }}.cert.pem"
- dest: "/etc/contrail/ssl/certs/tor.{{ item.ansible_facts.toragent_index }}.cert.pem"
- with_items: contrail_toragent_list.results
-
-- name: "copy ssl private"
- copy:
- src: "/tmp/tmp-toragent-private/tor.{{ item.ansible_facts.toragent_index }}.privkey.pem"
- dest: "/etc/contrail/ssl/private/tor.{{ item.ansible_facts.toragent_index }}.privkey.pem"
- with_items: contrail_toragent_list.results
-
-- name: "copy ca cert"
- copy:
- src: "files/cacert.pem"
- dest: "/etc/contrail/ssl/certs/cacert.pem"
-
-- name: "delete temporary directory"
- local_action:
- module: "file"
- dest: "/tmp/tmp-toragent-{{ item }}"
- state: "absent"
- with_items:
- - "certs"
- - "private"
- run_once: yes
-
-- name: "add tor agent to contrail"
- shell: "python /opt/contrail/utils/provision_vrouter.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --openstack_ip {{ contrail_keystone_address }} --oper add --host_name {{ inventory_hostname }}-{{ item.ansible_facts.toragent_index }} --host_ip {{ contrail_address }} --router_type tor-agent"
- with_items: contrail_toragent_list.results
-
-- name: "add device to contrail"
- shell: "python /opt/contrail/utils/provision_physical_device.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --openstack_ip {{ contrail_keystone_address }} --oper add --device_name {{ item.ansible_facts.toragent_params.name }} --vendor_name {{ item.ansible_facts.toragent_params.vendor_name }} --product_name {{ item.ansible_facts.toragent_params.product_name }} --device_mgmt_ip {{ item.ansible_facts.toragent_params.address }} --device_tunnel_ip {{ item.ansible_facts.toragent_params.tunnel_address }} --device_tor_agent {{ inventory_hostname }}-{{ item.ansible_facts.toragent_index }} --device_tsn {{ inventory_hostname }}"
- with_items: contrail_toragent_list.results
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-tsn.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-tsn.yml
deleted file mode 100755
index 8bd6dc06..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-tsn.yml
+++ /dev/null
@@ -1,104 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: tsn
-# sudo: yes
-# tasks:
-
-- name: "enable supervisor vrouter"
- file:
- path: "/etc/init/supervisor-vrouter.override"
- state: "absent"
-
-- include: -node-common.yml
-
-- name: "fix up vrouter nodemgr param"
- template:
- src: "provision/vrouter-nodemgr-param.j2"
- dest: "/etc/contrail/vrouter_nodemgr_param"
-
-- name: "set contrail device name for ansible"
- set_fact:
- contrail_ansible_device: "ansible_{{ contrail_vhost_device }}"
-
-- name: "fix up default pmac"
- template:
- src: "provision/default-pmac.j2"
- dest: "/etc/contrail/default_pmac"
-
-- name: "copy agent param config from template"
- shell: "cp /etc/contrail/agent_param.tmpl /etc/contrail/agent_param"
-
-- name: "modify agent param config"
- lineinfile:
- dest: "/etc/contrail/agent_param"
- regexp: "dev=__DEVICE__"
- line: "dev={{ contrail_vhost_device }}"
-
-- name: "set vrouter agent mode"
- set_fact:
- contrail_vrouter_mode: "tsn"
-
-- name: "fix up contrail vrouter agent config"
- template:
- src: "../../templates/provision/contrail-vrouter-agent-conf.j2"
- dest: "/etc/contrail/contrail-vrouter-agent.conf"
-
-- name: "delete lines for contrail interface"
- shell: "{{ item }}"
- with_items:
- - "sed -e '/auto {{ contrail_vhost_device }}/,$d' /etc/network/interfaces > /tmp/contrail-interfaces-top"
- - "sed -n -e '/auto {{ contrail_vhost_device }}/,$p' /etc/network/interfaces > /tmp/contrail-interfaces-bottom"
- - "sed -i -e '/auto {{ contrail_vhost_device }}/d' /tmp/contrail-interfaces-bottom"
- - "sed -i -n -e '/auto .*/,$p' /tmp/contrail-interfaces-bottom"
- - "cat /tmp/contrail-interfaces-top /tmp/contrail-interfaces-bottom > /etc/network/interfaces"
-
-- name: "delete lines for vrouter interface"
- shell: "{{ item }}"
- with_items:
- - "sed -e '/auto vhost0/,$d' /etc/network/interfaces > /tmp/contrail-interfaces-top"
- - "sed -n -e '/auto vhost0/,$p' /etc/network/interfaces > /tmp/contrail-interfaces-bottom"
- - "sed -i -e '/auto vhost0/d' /tmp/contrail-interfaces-bottom"
- - "sed -i -n -e '/auto .*/,$p' /tmp/contrail-interfaces-bottom"
- - "cat /tmp/contrail-interfaces-top /tmp/contrail-interfaces-bottom > /etc/network/interfaces"
-
-- name: "configure interface"
- lineinfile:
- dest: "/etc/network/interfaces"
- line: "{{ item }}"
- state: "present"
- with_items:
- - "auto {{ contrail_vhost_device }}"
- - "iface {{ contrail_vhost_device }} inet manual"
- - "\tpre-up ifconfig {{ contrail_vhost_device }} up"
- - "\tpost-down ifconfig {{ contrail_vhost_device }} down"
- - "auto vhost0"
- - "iface vhost0 inet static"
- - "\tpre-up /opt/contrail/bin/if-vhost0"
- - "\tnetwork_name application"
- - "\taddress {{ contrail_vhost_address }}"
- - "\tnetmask {{ contrail_vhost_netmask }}"
-
-- name: "delete temporary files"
- file:
- dest: "{{ item }}"
- state: "absent"
- with_items:
- - "/tmp/contrail-interfaces-top"
- - "/tmp/contrail-interfaces-bottom"
-
-- name: "fix up contrail vrouter nodemgr config"
- ini_file:
- dest: "/etc/contrail/contrail-vrouter-nodemgr.conf"
- section: "DISCOVERY"
- option: "server"
- value: "{{ contrail_haproxy_address }}"
-
-- name: "add tsn to contrail"
- shell: "python /opt/contrail/utils/provision_vrouter.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --openstack_ip {{ contrail_keystone_address }} --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }} --router_type tor-service-node"
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-webui.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-webui.yml
deleted file mode 100755
index 525f4334..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-webui.yml
+++ /dev/null
@@ -1,75 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#- hosts: webui
-# sudo: yes
-# tasks:
-
-- name: "enable supervisor webui"
- file:
- path: "/etc/init/supervisor-webui.override"
- state: "absent"
-
-- name: "redis-setup"
- include: -redis-setup.yml
-
-- name: "node-common"
- include: -node-common.yml
-
-- name: "set first cassandra host address"
- set_fact:
-# cassandra_addrs: "'{{ hostvars[item.1]['contrail_address'] }}'"
- cassandra_addrs: "'{{ ip_settings[item.1]['br-prv']['ip'] }}'"
- with_indexed_items: groups['opencontrail']
- when: item.0 == 0
-
-- name: "set second or more cassandra host addresses"
- set_fact:
-# cassandra_addrs: "{{ cassandra_addrs }}, '{{ hostvars[item.1]['contrail_address'] }}'"
- cassandra_addrs: "{{ cassandra_addrs }}, '{{ ip_settings[item.1]['br-prv']['ip'] }}'"
- with_indexed_items: groups['opencontrail']
- when: item.0 > 0
-
-- name: "modify webui global js"
- lineinfile:
- dest: "/etc/contrail/config.global.js"
- regexp: "{{ item.regexp }}"
- line: "{{ item.line }}"
- with_items:
- - { regexp: "^\\s*config.networkManager.ip", line: "config.networkManager.ip = '{{ contrail_haproxy_address }}';" }
- - { regexp: "^\\s*config.imageManager.ip", line: "config.imageManager.ip = '{{ contrail_keystone_address }}';" }
- - { regexp: "^\\s*config.computeManager.ip", line: "config.computeManager.ip = '{{ contrail_keystone_address }}';" }
- - { regexp: "^\\s*config.identityManager.ip", line: "config.identityManager.ip = '{{ contrail_keystone_address }}';" }
- - { regexp: "^\\s*config.storageManager.ip", line: "config.storageManager.ip = '{{ contrail_keystone_address }}';" }
- - { regexp: "^\\s*config.cnfg.server_ip", line: "config.cnfg.server_ip = '{{ contrail_haproxy_address }}';" }
- - { regexp: "^\\s*config.analytics.server_ip", line: "config.analytics.server_ip = '{{ contrail_haproxy_address }}';" }
-# TODO: when I update ansibel version to 2.2, this playbook can't pass the test. ERROR log: "'cassandra_addrs' is undefined".
-# - { regexp: "^\\s*config.cassandra.server_ips", line: "config.cassandra.server_ips = [{{ cassandra_addrs }}];" }
-
-- name: "modify webui userauth js"
- lineinfile:
- dest: "/etc/contrail/contrail-webui-userauth.js"
- regexp: "{{ item.regexp }}"
- line: "{{ item.line }}"
- with_items:
- - { regexp: "^\\s*auth.admin_user", line: "auth.admin_user = '{{ contrail_admin_user }}';" }
- - { regexp: "^\\s*auth.admin_password", line: "auth.admin_password = '{{ contrail_admin_password }}';" }
- - { regexp: "^\\s*auth.admin_tenant_name", line: "auth.admin_tenant_name = 'admin';" }
-
-- name: "create symbolic link from nodejs to node"
- file:
- src: "/usr/bin/node"
- dest: "/usr/bin/nodejs"
- state: "link"
-
-- name: "restart supervisor webui"
- service:
- name: "supervisor-webui"
- state: "restarted"
-
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml
deleted file mode 100755
index 836cb78b..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: del ovs bridge
- shell: ovs-vsctl del-br br-int; ovs-vsctl del-br br-tun; ovs-vsctl del-br br-prv;
-
-- name: remove ovs and ovs-plugin daeman
- shell: >
- sed -i '/neutron-openvswitch-agent/d' /opt/service ;
- sed -i '/openvswitch-switch/d' /opt/service ;
-
-- name: stop ovs and ovs-plugin
- shell: service openvswitch-switch stop; service neutron-openvswitch-agent stop;
-
-- name: remove ovs and ovs-plugin files
- shell: >
- update-rc.d -f neutron-openvswitch-agent remove;
- mv /etc/init.d/neutron-openvswitch-agent /home/neutron-openvswitch-agent;
- mv /etc/init/neutron-openvswitch-agent.conf /home/neutron-openvswitch-agent.conf;
- update-rc.d -f openvswitch-switch remove ;
- mv /etc/init.d/openvswitch-switch /home/openvswitch-switch ;
- mv /etc/init/openvswitch-switch.conf /home/openvswitch-switch.conf ;
- update-rc.d -f neutron-ovs-cleanup remove ;
- mv /etc/init.d/neutron-ovs-cleanup /home/neutron-ovs-cleanup ;
- mv /etc/init/neutron-ovs-cleanup.conf /home/neutron-ovs-cleanup.conf ;
-
-- name: remove ovs kernel module
- shell: rmmod vport_vxlan; rmmod openvswitch;
- ignore_errors: True
-
-- name: copy recovery script
- copy: src={{ item }} dest=/opt/setup_networks
- with_items:
-# - recover_network_opencontrail.py
- - setup_networks_opencontrail.py
-
-#- name: recover external script
-# shell: python /opt/setup_networks/recover_network_opencontrail.py
-
-- name: modify net-init
- shell: sed -i 's/setup_networks.py/setup_networks_opencontrail.py/g' /etc/init.d/net_init
-
-- name: resolve dual NIC problem
- shell: >
- echo "net.ipv4.conf.all.arp_ignore=1" >> /etc/sysctl.conf ;
- /sbin/sysctl -p ;
- echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore ;
-
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/install/override.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/install/override.j2
deleted file mode 100755
index 2905494b..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/install/override.j2
+++ /dev/null
@@ -1 +0,0 @@
-manual
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-analytics-api-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-analytics-api-conf.j2
deleted file mode 100755
index 21fb733b..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-analytics-api-conf.j2
+++ /dev/null
@@ -1,31 +0,0 @@
-[DEFAULTS]
-host_ip = {{ contrail_haproxy_address }}
-rest_api_ip = {{ contrail_haproxy_address }}
-rest_api_port = 8081
-#cassandra_server_list = {% for cur_host in groups['controller'] %}{{ ip_settings[cur_host]['mgmt']['ip'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
-#chenshuai: This kind of written is also correct, but the following is better, this as record
-cassandra_server_list = {% for cur_host in groups['opencontrail'] %}{{ ip_settings[cur_host]['br-prv']['ip'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
-
-collectors = {{ contrail_haproxy_address }}:8086
-http_server_port = 8090
-log_file = /var/log/contrail/contrail-analytics-api.log
-log_level = SYS_NOTICE
-log_local = 1
-
-# Time-to-live in hours of the various data stored by collector into
-# cassandra
-# analytics_config_audit_ttl, if not set (or set to -1), defaults to analytics_data_ttl
-# analytics_statistics_ttl, if not set (or set to -1), defaults to analytics_data_ttl
-# analytics_flow_ttl, if not set (or set to -1), defaults to analytics_statsdata_ttl
-analytics_data_ttl = 48
-analytics_config_audit_ttl = -1
-analytics_statistics_ttl = -1
-analytics_flow_ttl = -1
-
-[DISCOVERY]
-disc_server_ip = {{ contrail_haproxy_address }}
-disc_server_port = 5998
-
-[REDIS]
-redis_server_port = 6379
-redis_query_port = 6379
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-api-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-api-conf.j2
deleted file mode 100755
index 19004ce3..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-api-conf.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-[DEFAULTS]
-listen_ip_addr = {{ contrail_haproxy_address }}
-listen_port = 8082
-ifmap_server_ip = {{ contrail_haproxy_address }}
-ifmap_server_port = 8443
-ifmap_username = api-server
-ifmap_password = api-server
-zk_server_ip = {% for cur_host in groups['opencontrail'] %}{{ ip_settings[cur_host]['br-prv']['ip'] }}:2181{% if not loop.last %}, {% endif %}{% endfor %}
-
-cassandra_server_list = {% for cur_host in groups['opencontrail'] %}{{ ip_settings[cur_host]['br-prv']['ip'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
-
-disc_server_ip = {{ contrail_haproxy_address }}
-disc_server_port = 5998
-rabbit_server = {{ contrail_haproxy_address }}
-rabbit_port = 5672
-rabbit_user = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-multi_tenancy = True
-list_optimization_enabled = True
-log_file = /var/log/contrail/contrail-api.log
-log_level = SYS_NOTICE
-log_local = 1
-auth = keystone
-
-[SECURITY]
-use_certs = False
-keyfile = /etc/contrail/ssl/private_keys/apiserver_key.pem
-certfile = /etc/contrail/ssl/certs/apiserver.pem
-ca_certs = /etc/contrail/ssl/certs/ca.pem
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-api-supervisord-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-api-supervisord-conf.j2
deleted file mode 100755
index ad3e0387..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-api-supervisord-conf.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-[program:contrail-api]
-command=/usr/bin/contrail-api --conf_file /etc/contrail/contrail-api.conf --conf_file /etc/contrail/contrail-keystone-auth.conf --worker_id %(process_num)s
-numprocs=1
-process_name=%(process_num)s
-redirect_stderr=true
-stdout_logfile=/var/log/contrail/contrail-api-%(process_num)s-stdout.log
-stderr_logfile=/dev/null
-priority=440
-autostart=true
-killasgroup=true
-stopsignal=KILL
-exitcodes=0
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-collector-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-collector-conf.j2
deleted file mode 100755
index 11509603..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-collector-conf.j2
+++ /dev/null
@@ -1,86 +0,0 @@
-[DEFAULT]
-# Everything in this section is optional
-
-# Time-to-live in hours of the various data stored by collector into
-# cassandra
-# analytics_config_audit_ttl, if not set (or set to -1), defaults to analytics_data_ttl
-# analytics_statistics_ttl, if not set (or set to -1), defaults to analytics_data_ttl
-# analytics_flow_ttl, if not set (or set to -1), defaults to analytics_statsdata_ttl
-analytics_data_ttl = 48
-analytics_config_audit_ttl = -1
-analytics_statistics_ttl = -1
-analytics_flow_ttl = -1
-
-# IP address and port to be used to connect to cassandra.
-# Multiple IP:port strings separated by space can be provided
-cassandra_server_list = {% for cur_host in groups['opencontrail'] %}{{ ip_settings[cur_host]['br-prv']['ip'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
-
-
-# IP address and port to be used to connect to kafka.
-# Multiple IP:port strings separated by space can be provided
-kafka_broker_list =
-
-# IP address of analytics node. Resolved IP of 'hostname'
-hostip = {{ contrail_address }}
-
-# Hostname of analytics node. If this is not configured value from `hostname`
-# will be taken
-# hostname =
-
-# Http server port for inspecting collector state (useful for debugging)
-http_server_port = 8089
-
-# Category for logging. Default value is '*'
-# log_category =
-
-# Local log file name
-log_file = /var/log/contrail/contrail-collector.log
-
-# Maximum log file rollover index
-# log_files_count = 10
-
-# Maximum log file size
-# log_file_size = 1048576 # 1MB
-
-# Log severity levels. Possible values are SYS_EMERG, SYS_ALERT, SYS_CRIT,
-# SYS_ERR, SYS_WARN, SYS_NOTICE, SYS_INFO and SYS_DEBUG. Default is SYS_DEBUG
-log_level = SYS_NOTICE
-
-# Enable/Disable local file logging. Possible values are 0 (disable) and
-# 1 (enable)
-log_local = 1
-
-# TCP and UDP ports to listen on for receiving syslog messages. -1 to disable.
-syslog_port = -1
-
-# UDP port to listen on for receiving sFlow messages. -1 to disable.
-# sflow_port = 6343
-
-# UDP port to listen on for receiving ipfix messages. -1 to disable.
-# ipfix_port = 4739
-
-[COLLECTOR]
-# Everything in this section is optional
-
-# Port to listen on for receiving Sandesh messages
-port = 8086
-
-# IP address to bind to for listening
-# server = 0.0.0.0
-
-# UDP port to listen on for receiving Google Protocol Buffer messages
-# protobuf_port = 3333
-
-[DISCOVERY]
-# Port to connect to for communicating with discovery server
-# port = 5998
-
-# IP address of discovery server
-server = {{ contrail_haproxy_address }}
-
-[REDIS]
-# Port to connect to for communicating with redis-server
-port = 6379
-
-# IP address of redis-server
-server = 127.0.0.1
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-control-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-control-conf.j2
deleted file mode 100755
index 83792b2c..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-control-conf.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-[DEFAULT]
-hostip = {{ contrail_address }}
-hostname = {{ ansible_hostname }}
-log_file = /var/log/contrail/contrail-control.log
-log_level = SYS_NOTICE
-log_local = 1
-
-[DISCOVERY]
-server = {{ contrail_haproxy_address }}
-port = 5998
-
-[IFMAP]
-certs_store =
-user = {{ contrail_address }}
-password = {{ contrail_address }}
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-device-manager-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-device-manager-conf.j2
deleted file mode 100755
index a13a00b4..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-device-manager-conf.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-[DEFAULTS]
-api_server_ip = {{ contrail_haproxy_address }}
-api_server_port = 8082
-zk_server_ip = {% for cur_host in groups['opencontrail'] %}{{ ip_settings[cur_host]['br-prv']['ip'] }}:2181{% if not loop.last %}, {% endif %}{% endfor %}
-
-cassandra_server_list = {% for cur_host in groups['opencontrail'] %}{{ ip_settings[cur_host]['br-prv']['ip'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
-
-disc_server_ip = {{ contrail_haproxy_address }}
-disc_server_port = 5998
-rabbit_server = {{ contrail_haproxy_address }}
-rabbit_port = 5672
-rabbit_user = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-log_file = /var/log/contrail/contrail-device-manager.log
-log_level = SYS_NOTICE
-log_local = 1
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-discovery-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-discovery-conf.j2
deleted file mode 100755
index f54fdc94..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-discovery-conf.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-[DEFAULTS]
-listen_ip_addr = {{ contrail_haproxy_address }}
-listen_port = 5998
-zk_server_ip = {% for cur_host in groups['opencontrail'] %}{{ ip_settings[cur_host]['br-prv']['ip'] }}{% if not loop.last %}, {% endif %}{% endfor %}
-
-zk_server_port = 2181
-cassandra_server_list = {% for cur_host in groups['opencontrail'] %}{{ ip_settings[cur_host]['br-prv']['ip'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
-
-log_file = /var/log/contrail/contrail-discovery.log
-log_level = SYS_NOTICE
-log_local = 1
-
-# minimim time to allow client to cache service information (seconds)
-ttl_min = 300
-
-# maximum time to allow client to cache service information (seconds)
-ttl_max = 1800
-
-# health check ping interval < = 0 for disabling
-hc_interval = 5
-
-# maximum hearbeats to miss before server will declare publisher out of
-# service.
-hc_max_miss = 3
-
-# use short TTL for agressive rescheduling if all services are not up
-ttl_short = 1
-
-# for DNS service, we use fixed policy
-# even when the cluster has more than two control nodes, only two of these
-# should provide the DNS service
-[DNS-SERVER]
-policy = fixed
-
-######################################################################
-# Other service specific knobs ...
-
-# use short TTL for agressive rescheduling if all services are not up
-# ttl_short = 1
-
-# specify policy to use when assigning services
-# policy = [load-balance | round-robin | fixed]
-######################################################################
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-discovery-supervisord-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-discovery-supervisord-conf.j2
deleted file mode 100755
index 541568de..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-discovery-supervisord-conf.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-[program:contrail-discovery]
-command=/usr/bin/contrail-discovery --conf_file /etc/contrail/contrail-discovery.conf --worker_id %(process_num)s
-numprocs=1
-process_name=%(process_num)s
-redirect_stderr=true
-stdout_logfile=/var/log/contrail/contrail-discovery-%(process_num)s-stdout.log
-stderr_logfile=/dev/null
-priority=430
-autostart=true
-killasgroup=true
-stopsignal=KILL
-exitcodes=0
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-dns-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-dns-conf.j2
deleted file mode 100755
index 9d415563..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-dns-conf.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-[DEFAULT]
-hostip = {{ contrail_address }}
-hostname = {{ inventory_hostname }}
-log_file = /var/log/contrail/contrail-dns.log
-log_level = SYS_NOTICE
-log_local = 1
-
-[DISCOVERY]
-server = {{ contrail_haproxy_address }}
-port = 5998
-
-[IFMAP]
-certs_store =
-user = {{ contrail_address }}.dns
-password = {{ contrail_address }}.dns
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-keystone-auth-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-keystone-auth-conf.j2
deleted file mode 100755
index f362ef45..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-keystone-auth-conf.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-[KEYSTONE]
-auth_protocol = http
-auth_host = {{ contrail_keystone_address }}
-auth_port = 35357
-admin_tenant_name = admin
-admin_user = {{ contrail_admin_user }}
-admin_password = {{ contrail_admin_password }}
-insecure = False
-
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-query-engine-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-query-engine-conf.j2
deleted file mode 100755
index d947addb..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-query-engine-conf.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-[DEFAULT]
-hostip = {{ contrail_address }}
-
-
-#cassandra_server_list = {% for cur_host in groups['controller'] %}{{ ip_settings[cur_host]['mgmt']['ip'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
-
-
-cassandra_server_list = {% for cur_host in groups['opencontrail'] %}{{ ip_settings[cur_host]['br-prv']['ip'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
-
-collectors = {{ contrail_haproxy_address }}:8086
-http_server_port = 8091
-log_file = /var/log/contrail/contrail-query-engine.log
-log_level = SYS_NOTICE
-log_local = 1
-
-[REDIS]
-server = 127.0.0.1
-port = 6379
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-schema-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-schema-conf.j2
deleted file mode 100755
index d112dbee..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-schema-conf.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-[DEFAULTS]
-ifmap_server_ip = {{ contrail_haproxy_address }}
-ifmap_server_port = 8443
-ifmap_username = schema-transformer
-ifmap_password = schema-transformer
-api_server_ip = {{ contrail_haproxy_address }}
-api_server_port = 8082
-zk_server_ip = {% for cur_host in groups['opencontrail'] %}{{ ip_settings[cur_host]['br-prv']['ip'] }}:2181{% if not loop.last %}, {% endif %}{% endfor %}
-
-cassandra_server_list = {% for cur_host in groups['opencontrail'] %}{{ ip_settings[cur_host]['br-prv']['ip'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
-
-disc_server_ip = {{ contrail_haproxy_address }}
-disc_server_port = 5998
-log_file = /var/log/contrail/contrail-schema.log
-log_level = SYS_NOTICE
-log_local = 1
-
-[SECURITY]
-use_certs = False
-keyfile = /etc/contrail/ssl/private_keys/apiserver_key.pem
-certfile = /etc/contrail/ssl/certs/apiserver.pem
-ca_certs = /etc/contrail/ssl/certs/ca.pem
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-sudoers.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-sudoers.j2
deleted file mode 100755
index 1ff43563..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-sudoers.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-Defaults:contrail !requiretty
-
-Cmnd_Alias CONFIGRESTART = /usr/sbin/service supervisor-config restart
-
-contrail ALL = (root) NOPASSWD:CONFIGRESTART
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-svc-monitor-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-svc-monitor-conf.j2
deleted file mode 100755
index 0c6bfc07..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-svc-monitor-conf.j2
+++ /dev/null
@@ -1,31 +0,0 @@
-[DEFAULTS]
-ifmap_server_ip = {{ contrail_haproxy_address }}
-ifmap_server_port = 8443
-ifmap_username = svc-monitor
-ifmap_password = svc-monitor
-api_server_ip = {{ contrail_haproxy_address }}
-api_server_port = 8082
-zk_server_ip = {% for cur_host in groups['opencontrail'] %}{{ ip_settings[cur_host]['br-prv']['ip'] }}:2181{% if not loop.last %}, {% endif %}{% endfor %}
-
-cassandra_server_list = {% for cur_host in groups['opencontrail'] %}{{ ip_settings[cur_host]['br-prv']['ip'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
-
-disc_server_ip = {{ contrail_haproxy_address }}
-disc_server_port = 5998
-rabbit_server = {{ contrail_haproxy_address }}
-rabbit_port = 5672
-rabbit_user = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-region_name = RegionOne
-log_file = /var/log/contrail/contrail-svc-monitor.log
-log_level = SYS_NOTICE
-log_local = 1
-
-[SECURITY]
-use_certs = False
-keyfile = /etc/contrail/ssl/private_keys/apiserver_key.pem
-certfile = /etc/contrail/ssl/certs/apiserver.pem
-ca_certs = /etc/contrail/ssl/certs/ca.pem
-
-[SCHEDULER]
-analytics_server_ip = {{ contrail_haproxy_address }}
-analytics_server_port = 8081
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-tor-agent-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-tor-agent-conf.j2
deleted file mode 100755
index 8d336e52..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-tor-agent-conf.j2
+++ /dev/null
@@ -1,111 +0,0 @@
-#
-# Vnswad configuration options
-#
-
-[CONTROL-NODE]
-# IP address to be used to connect to control-node. Maximum of 2 IP addresses
-# (separated by a space) can be provided. If no IP is configured then the
-# value provided by discovery service will be used. (optional)
-# server = 10.0.0.1 10.0.0.2
-
-[DEFAULT]
-agent_name = {{ ansible_hostname }}-{{ item.ansible_facts.toragent_index }}
-# Everything in this section is optional
-
-# IP address and port to be used to connect to collector. If these are not
-# configured, value provided by discovery service will be used. Multiple
-# IP:port strings separated by space can be provided
-# collectors = 127.0.0.1:8086
-
-# Enable/disable debug logging. Possible values are 0 (disable) and 1 (enable)
-# debug = 0
-
-# Aging time for flow-records in seconds
-# flow_cache_timeout = 0
-
-# Hostname of compute-node. If this is not configured value from `hostname`
-# will be taken
-# hostname =
-
-# Category for logging. Default value is '*'
-# log_category =
-
-# Local log file name
-log_file = /var/log/contrail/contrail-tor-agent-{{ item.ansible_facts.toragent_index }}.log
-
-# Log severity levels. Possible values are SYS_EMERG, SYS_ALERT, SYS_CRIT,
-# SYS_ERR, SYS_WARN, SYS_NOTICE, SYS_INFO and SYS_DEBUG. Default is SYS_DEBUG
-# log_level = SYS_DEBUG
-
-# Enable/Disable local file logging. Possible values are 0 (disable) and 1 (enable)
-# log_local = 0
-
-# Enable/Disable local flow message logging. Possible values are 0 (disable) and 1 (enable)
-# log_flow = 0
-
-# Encapsulation type for tunnel. Possible values are MPLSoGRE, MPLSoUDP, VXLAN
-# tunnel_type =
-
-# Enable/Disable headless mode for agent. In headless mode agent retains last
-# known good configuration from control node when all control nodes are lost.
-# Possible values are true(enable) and false(disable)
-# headless_mode =
-
-# Define agent mode. Only supported value is "tor"
-agent_mode = tor
-
-# Http server port for inspecting vnswad state (useful for debugging)
-# http_server_port = 8085
-http_server_port = {{ item.ansible_facts.toragent_params.http_server_port }}
-
-[DISCOVERY]
-#If DEFAULT.collectors and/or CONTROL-NODE and/or DNS is not specified this
-#section is mandatory. Else this section is optional
-
-# IP address of discovery server
-server = {{ contrail_haproxy_address }}
-
-# Number of control-nodes info to be provided by Discovery service. Possible
-# values are 1 and 2
-# max_control_nodes = 1
-
-[DNS]
-# IP address to be used to connect to dns-node. Maximum of 2 IP addresses
-# (separated by a space) can be provided. If no IP is configured then the
-# value provided by discovery service will be used. (Optional)
-# server = 10.0.0.1 10.0.0.2
-
-[NETWORKS]
-# control-channel IP address used by WEB-UI to connect to vnswad to fetch
-# required information (Optional)
-control_network_ip = {{ contrail_haproxy_address }}
-
-[TOR]
-# IP address of the TOR to manage
-tor_ip = {{ item.ansible_facts.toragent_params.address }}
-
-# Identifier for ToR. Agent will subscribe to ifmap-configuration by this name
-tor_id = {{ item.ansible_facts.toragent_index }}
-
-# ToR management scheme is based on this type. Only supported value is "ovs"
-tor_type = ovs
-
-# OVS server port number on the ToR
-tor_ovs_port = {{ item.ansible_facts.toragent_params.ovs_port }}
-
-# IP-Transport protocol used to connect to tor. Supported values are "tcp", "pssl"
-tor_ovs_protocol = {{ item.ansible_facts.toragent_params.ovs_protocol }}
-
-# Path to ssl certificate for tor-agent, needed for pssl
-ssl_cert = /etc/contrail/ssl/certs/tor.{{ item.ansible_facts.toragent_index }}.cert.pem
-
-# Path to ssl private-key for tor-agent, needed for pssl
-ssl_privkey = /etc/contrail/ssl/private/tor.{{ item.ansible_facts.toragent_index }}.privkey.pem
-
-# Path to ssl cacert for tor-agent, needed for pssl
-ssl_cacert = /etc/contrail/ssl/certs/cacert.pem
-
-tsn_ip = {{ contrail_address }}
-
-# OVS keep alive timer interval in milliseconds
-tor_keepalive_interval = 10000
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-tor-agent-ini.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-tor-agent-ini.j2
deleted file mode 100755
index db6944c9..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-tor-agent-ini.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-[program:contrail-tor-agent-{{ item.ansible_facts.toragent_index }}]
-command=/usr/bin/contrail-tor-agent --config_file /etc/contrail/contrail-tor-agent-{{ item.ansible_facts.toragent_index }}.conf
-priority=420
-autostart=true
-killasgroup=true
-stopsignal=KILL
-stdout_capture_maxbytes=1MB
-redirect_stderr=true
-stdout_logfile=/var/log/contrail/contrail-tor-agent-{{ item.ansible_facts.toragent_index }}-stdout.log
-stderr_logfile=/dev/null
-startsecs=5
-exitcodes=0 ; 'expected' exit codes for process (default 0,2)
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-vnc-api-lib-ini.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-vnc-api-lib-ini.j2
deleted file mode 100755
index 85a7b63a..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-vnc-api-lib-ini.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-[global]
-WEB_SERVER=127.0.0.1
-WEB_PORT=8082 ; connection to api-server directly
-BASE_URL=/
-
-[auth]
-AUTHN_TYPE=keystone
-AUTHN_PROTOCOL=http
-AUTHN_SERVER={{ contrail_keystone_address }}
-AUTHN_PORT=35357
-AUTHN_URL=/v2.0/tokens
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-vrouter-agent-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-vrouter-agent-conf.j2
deleted file mode 100755
index d64cc21c..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-vrouter-agent-conf.j2
+++ /dev/null
@@ -1,177 +0,0 @@
-#
-# Vnswad configuration options
-#
-
-[CONTROL-NODE]
-# IP address to be used to connect to control-node. Maximum of 2 IP addresses
-# (separated by a space) can be provided. If no IP is configured then the
-# value provided by discovery service will be used. (Optional)
-# server = 10.0.0.1 10.0.0.2
-
-[DEFAULT]
-# Everything in this section is optional
-
-# IP address and port to be used to connect to collector. If these are not
-# configured, value provided by discovery service will be used. Multiple
-# IP:port strings separated by space can be provided
-# collectors = 127.0.0.1:8086
-
-# Agent mode : can be vrouter / tsn / tor (default is vrouter)
-{% if contrail_vrouter_mode is defined %}agent_mode = {{ contrail_vrouter_mode }}
-{% else %}# agent_mode =
-{% endif %}
-
-# Enable/disable debug logging. Possible values are 0 (disable) and 1 (enable)
-# debug = 0
-
-# Aging time for flow-records in seconds
-# flow_cache_timeout = 0
-
-# Hostname of compute-node. If this is not configured value from `hostname`
-# will be taken
-# hostname =
-
-# Http server port for inspecting vnswad state (useful for debugging)
-# http_server_port = 8085
-
-# Category for logging. Default value is '*'
-# log_category =
-
-# Local log file name
-log_file = /var/log/contrail/contrail-vrouter-agent.log
-
-# Log severity levels. Possible values are SYS_EMERG, SYS_ALERT, SYS_CRIT,
-# SYS_ERR, SYS_WARN, SYS_NOTICE, SYS_INFO and SYS_DEBUG. Default is SYS_DEBUG
-log_level = SYS_NOTICE
-
-# Enable/Disable local file logging. Possible values are 0 (disable) and 1 (enable)
-log_local = 1
-
-# Encapsulation type for tunnel. Possible values are MPLSoGRE, MPLSoUDP, VXLAN
-# tunnel_type =
-
-# Enable/Disable headless mode for agent. In headless mode agent retains last
-# known good configuration from control node when all control nodes are lost.
-# Possible values are true(enable) and false(disable)
-# headless_mode =
-
-# DHCP relay mode (true or false) to determine if a DHCP request in fabric
-# interface with an unconfigured IP should be relayed or not
-# dhcp_relay_mode =
-
-# DPDK or legacy work mode
-platform = default
-
-# Physical address of PCI used by dpdk
-physical_interface_address =
-
-# MAC address of device used by dpdk
-physical_interface_mac = {{ hostvars[inventory_hostname]['ansible_'+contrail_vhost_device]['macaddress'] }}
-
-[DISCOVERY]
-# If COLLECTOR and/or CONTROL-NODE and/or DNS is not specified this section is
-# mandatory. Else this section is optional
-
-# IP address of discovery server
-server = {{ contrail_haproxy_address }}
-
-# Number of control-nodes info to be provided by Discovery service. Possible
-# values are 1 and 2
-max_control_nodes = {{ groups['opencontrail'] | length }}
-
-[DNS]
-# IP address and port to be used to connect to dns-node. Maximum of 2 IP
-# addresses (separated by a space) can be provided. If no IP is configured then
-# the value provided by discovery service will be used.
-# server = 10.0.0.1:53 10.0.0.2:53
-
-[HYPERVISOR]
-# Everything in this section is optional
-
-# Hypervisor type. Possible values are kvm, xen and vmware
-type = kvm
-vmware_mode =
-
-# Link-local IP address and prefix in ip/prefix_len format (for xen)
-# xen_ll_ip =
-
-# Link-local interface name when hypervisor type is Xen
-# xen_ll_interface =
-
-# Physical interface name when hypervisor type is vmware
-vmware_physical_interface =
-
-[FLOWS]
-# Everything in this section is optional
-
-# Maximum flows allowed per VM (given as % of maximum system flows)
-# max_vm_flows = 100
-# Maximum number of link-local flows allowed across all VMs
-# max_system_linklocal_flows = 4096
-# Maximum number of link-local flows allowed per VM
-# max_vm_linklocal_flows = 1024
-
-[METADATA]
-# Shared secret for metadata proxy service (Optional)
-# metadata_proxy_secret = contrail
-
-[NETWORKS]
-# control-channel IP address used by WEB-UI to connect to vnswad to fetch
-# required information (Optional)
-control_network_ip = {{ contrail_haproxy_address }}
-
-[VIRTUAL-HOST-INTERFACE]
-# Everything in this section is mandatory
-
-# name of virtual host interface
-name = vhost0
-
-# IP address and prefix in ip/prefix_len format
-ip = {{ contrail_vhost_address }}/{{ contrail_prefixlen }}
-
-# Gateway IP address for virtual host
-gateway = {{ contrail_vhost_gateway }}
-
-# Physical interface name to which virtual host interface maps to
-physical_interface = {{ contrail_vhost_device }}
-
-# We can have multiple gateway sections with different indices in the
-# following format
-# [GATEWAY-0]
-# Name of the routing_instance for which the gateway is being configured
-# routing_instance = default-domain:admin:public:public
-
-# Gateway interface name
-# interface = vgw
-
-# Virtual network ip blocks for which gateway service is required. Each IP
-# block is represented as ip/prefix. Multiple IP blocks are represented by
-# separating each with a space
-# ip_blocks = 1.1.1.1/24
-
-# [GATEWAY-1]
-# Name of the routing_instance for which the gateway is being configured
-# routing_instance = default-domain:admin:public1:public1
-
-# Gateway interface name
-# interface = vgw1
-
-# Virtual network ip blocks for which gateway service is required. Each IP
-# block is represented as ip/prefix. Multiple IP blocks are represented by
-# separating each with a space
-# ip_blocks = 2.2.1.0/24 2.2.2.0/24
-
-# Routes to be exported in routing_instance. Each route is represented as
-# ip/prefix. Multiple routes are represented by separating each with a space
-# routes = 10.10.10.1/24 11.11.11.1/24
-
-[SERVICE-INSTANCE]
-# Path to the script which handles the netns commands
-netns_command = /usr/bin/opencontrail-vrouter-netns
-
-# Number of workers that will be used to start netns commands
-#netns_workers = 1
-
-# Timeout for each netns command, when the timeout is reached, the netns
-# command is killed.
-#netns_timeout = 30
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/default-pmac.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/default-pmac.j2
deleted file mode 100755
index dac56d1d..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/default-pmac.j2
+++ /dev/null
@@ -1 +0,0 @@
-{{ hostvars[inventory_hostname][contrail_ansible_device]['macaddress'] }}
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/haproxy-contrail-cfg.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/haproxy-contrail-cfg.j2
deleted file mode 100755
index 01196369..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/haproxy-contrail-cfg.j2
+++ /dev/null
@@ -1,78 +0,0 @@
-#contrail-marker-start
-
-listen contrail-stats
-# bind *:5937
- bind {{ internal_vip.ip }}:5937
- bind {{ public_vip.ip }}:5937
- mode http
- stats enable
- stats uri /
- stats auth haproxy:contrail123
-
-# compass has bind neutron-server
-#listen neutron-server
-# bind *:9696
-# balance roundrobin
-# option nolinger
-#{% for host,ip in haproxy_hosts.items() %}
-# server {{ host }} {{ ip }}:9697 weight 1 check inter 2000 rise 2 fall 3
-#{% endfor %}
-
-
-
-listen contrail-api
-# bind *:8082
- bind {{ internal_vip.ip }}:8082
- bind {{ public_vip.ip }}:8082
- balance roundrobin
- option nolinger
- timeout client 3m
- timeout server 3m
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8082 weight 1 check inter 2000 rise 2 fall 3
-{% endfor %}
-
-
-
-listen contrail-discovery
-# bind *:5998
- bind {{ internal_vip.ip }}:5998
- bind {{ public_vip.ip }}:5998
- balance roundrobin
- option nolinger
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:5998 weight 1 check inter 2000 rise 2 fall 3
-{% endfor %}
-
-
-
-listen contrail-analytics-api
-# bind *:8081
- bind {{ internal_vip.ip }}:8081
- bind {{ public_vip.ip }}:8081
- balance roundrobin
- option nolinger
- option tcp-check
- tcp-check connect port 6379
- default-server error-limit 1 on-error mark-down
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8081 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-
-# compass doesn't use ha for rabbitmq, but use cluster mode
-#listen rabbitmq
-# bind *:5673
-# mode tcp
-# balance roundrobin
-# maxconn 10000
-# option tcplog
-# option tcpka
-# option redispatch
-# timeout client 48h
-# timeout server 48h
-{% for host,ip in haproxy_hosts.items() %}
-# server {{ host }} {{ ip }}:5672 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-#contrail-marker-end
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-authorization-properties.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-authorization-properties.j2
deleted file mode 100755
index 41a1c649..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-authorization-properties.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-# The MAPC with basic auth username 'reader' has read only access.
-reader=ro
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-basicauthusers-properties.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-basicauthusers-properties.j2
deleted file mode 100755
index 6ca38a29..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-basicauthusers-properties.j2
+++ /dev/null
@@ -1,30 +0,0 @@
-test:test
-test2:test2
-test3:test3
-dhcp:dhcp
-visual:visual
-sensor:sensor
-
-# compliance testsuite users
-mapclient:mapclient
-helper:mapclient
-
-# This is a read-only MAPC
-reader:reader
-
-# OpenContrail users
-api-server:api-server
-schema-transformer:schema-transformer
-svc-monitor:svc-monitor
-
-control-user:control-user-passwd
-control-node-1:control-node-1
-control-node-2:control-node-2
-control-node-3:control-node-3
-control-node-4:control-node-4
-control-node-5:control-node-5
-control-node-6:control-node-6
-control-node-7:control-node-7
-control-node-8:control-node-8
-control-node-9:control-node-9
-control-node-10:control-node-10
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-log4j-properties.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-log4j-properties.j2
deleted file mode 100755
index ebd0b483..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-log4j-properties.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-# Set root logger level to DEBUG and its only appender to CONSOLE
-log4j.rootLogger=TRACE, CONSOLE
-log4j.error
-
-log4j.logger.de.fhhannover.inform.irond.proc=TRACE, A1, A2
-log4j.additivity.de.fhhannover.inform.irond.proc=false
-
-log4j.appender.A1=org.apache.log4j.ConsoleAppender
-log4j.appender.A1.layout=org.apache.log4j.PatternLayout
-log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %x - %m%n
-
-log4j.appender.A2=org.apache.log4j.FileAppender
-log4j.appender.A2.File=/var/log/contrail/ifmap-server.log
-log4j.appender.A2.layout=org.apache.log4j.PatternLayout
-log4j.appender.A2.layout.ConversionPattern=%d [%t] %-5p %x - %m%n
-
-log4j.logger.de.fhhannover.inform.irond.rawrequests=TRACE, A3
-log4j.additivity.de.fhhannover.inform.irond.rawrequests=false
-log4j.appender.A3=org.apache.log4j.FileAppender
-log4j.appender.A3.file=irond_raw.log
-log4j.appender.A3.layout=org.apache.log4j.PatternLayout
-log4j.appender.A3.layout.ConversionPattern=%d %-5p %x - %m%n
-
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%-8r [%t] %-5p %C{1} %x - %m%n
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-publisher-properties.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-publisher-properties.j2
deleted file mode 100755
index 90d2a887..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/ifmap-publisher-properties.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-#Sun May 27 15:47:44 PDT 2012
-visual=visual--1877135140-1
-test=test--1870931913-1
-test2=test2--1870931914-1
-test3=test3--1870931915-1
-api-server=api-server-1--0000000001-1
-control-node-1=control-node-1--1870931921-1
-control-node-2=control-node-1--1870931922-1
-control-node-3=control-node-1--1870931923-1
-control-node-4=control-node-1--1870931924-1
-control-node-5=control-node-1--1870931925-1
-control-node-6=control-node-1--1870931926-1
-control-node-7=control-node-1--1870931927-1
-control-node-8=control-node-1--1870931928-1
-control-node-9=control-node-1--1870931929-1
-control-node-10=control-node-10--1870931930-1
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/keepalived-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/keepalived-conf.j2
deleted file mode 100755
index b16c4a25..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/keepalived-conf.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-vrrp_script chk_haproxy {
- script "killall -0 haproxy"
- interval 1
- timeout 3
- rise 2
- fall 2
-}
-
-vrrp_instance INTERNAL_1 {
- interface {{ contrail_device }}
- state MASTER
- preemt_delay 7
- grap_master_delay 5
- grap_master_repeat 3
- grap_master_refresh 1
- advert_int 1
- virtual_router_id 85
- vmac_xmit_base
- priority 10{{ item.0 }}
- virtual_ipaddress {
- {{ contrail_haproxy_address }} dev {{ contrail_device }}
- }
- track_script {
- chk_haproxy
- }
- track_interface {
- {{ contrail_device }}
- }
-}
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/neutron-contrail-plugin-ini.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/neutron-contrail-plugin-ini.j2
deleted file mode 100755
index 13e5965a..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/neutron-contrail-plugin-ini.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-[APISERVER]
-api_server_ip={{ contrail_haproxy_address }}
-api_server_port=8082
-multi_tenancy=True
-contrail_extensions=ipam:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_ipam.NeutronPluginContrailIpam,policy:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_policy.NeutronPluginContrailPolicy,route-table:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_vpc.NeutronPluginContrailVpc,contrail:None
-
-[COLLECTOR]
-analytics_api_ip={{ contrail_haproxy_address }}
-analytics_api_port=8081
-
-[KEYSTONE]
-auth_url=http://{{ contrail_keystone_address }}:35357/v2.0
-admin_tenant_name=admin
-admin_user={{ contrail_admin_user }}
-admin_password={{ contrail_admin_password }}
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/nova.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/nova.j2
deleted file mode 100755
index ea4dbbad..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/nova.j2
+++ /dev/null
@@ -1,58 +0,0 @@
-[DEFAULT]
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/var/lib/nova/tmp
-force_dhcp_release=True
-libvirt_use_virtio_for_bridges=True
-verbose=True
-ec2_private_dns_show_ip=False
-auth_strategy = keystone
-libvirt_nonblocking = True
-libvirt_inject_partition = -1
-compute_driver = libvirt.LibvirtDriver
-novncproxy_base_url = http://{{ contrail_keystone_address }}:6080/vnc_auto.html
-vncserver_enabled = true
-vncserver_listen = {{ contrail_address }}
-vncserver_proxyclient_address = {{ contrail_address }}
-security_group_api = neutron
-heal_instance_info_cache_interval = 0
-image_cache_manager_interval = 0
-libvirt_cpu_mode = none
-libvirt_vif_driver = nova_contrail_vif.contrailvif.VRouterVIFDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-glance_host = {{ contrail_keystone_address }}
-glance_port = 9292
-glance_num_retries = 10
-rabbit_host = {{ contrail_keystone_address }}
-rabbit_port = 5672
-rabbit_password = {{ rabbit_password }}
-rabbit_retry_interval = 1
-rabbit_retry_backoff = 2
-rabbit_max_retries = 0
-rabbit_ha_queues = True
-rpc_cast_timeout = 30
-rpc_conn_pool_size = 40
-rpc_response_timeout = 60
-rpc_thread_pool_size = 70
-report_interval = 15
-novncproxy_port = 6080
-vnc_port = 5900
-vnc_port_total = 100
-resume_guests_state_on_host_boot = True
-service_down_time = 300
-periodic_fuzzy_delay = 30
-disable_process_locking = True
-neutron_admin_auth_url =
-
-[keystone_authtoken]
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ contrail_admin_password }}
-auth_host = {{ contrail_keystone_address }}
-auth_protocol = http
-auth_port = 5000
-signing_dir = /tmp/keystone-signing-nova
-
-
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/qemu-device-acl-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/qemu-device-acl-conf.j2
deleted file mode 100755
index 53dfbba2..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/qemu-device-acl-conf.j2
+++ /dev/null
@@ -1,6 +0,0 @@
-cgroup_device_acl = [
- "/dev/null", "/dev/full", "/dev/zero",
- "/dev/random", "/dev/urandom",
- "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
- "/dev/rtc", "/dev/hpet","/dev/net/tun"
-]
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-conf-single.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-conf-single.j2
deleted file mode 100644
index cce01c77..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-conf-single.j2
+++ /dev/null
@@ -1,6 +0,0 @@
-[
- {rabbit, [ {tcp_listeners, [{"{{ internal_ip }}", 5672}]},
- {loopback_users, []},
- {log_levels,[{connection, info},{mirroring, info}]} ]
- }
-].
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-conf.j2
deleted file mode 100644
index f0d09c4f..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-conf.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-[
- {rabbit, [ {tcp_listeners, [{"{{ contrail_address }}", 5672}]}, {cluster_partition_handling, autoheal},{loopback_users, []},
- {cluster_nodes, {[{% for cur_host in groups['opencontrail'] %}'rabbit@{{ cur_host }}'{% if not loop.last %}, {% endif %}{% endfor %}], disc}},
- {vm_memory_high_watermark, 0.4},
- {disk_free_limit,50000000},
- {log_levels,[{connection, info},{mirroring, info}]},
- {heartbeat,10},
- {delegate_count,20},
- {channel_max,5000},
- {tcp_listen_options,
- [binary,
- {packet, raw},
- {reuseaddr, true},
- {backlog, 128},
- {nodelay, true},
- {exit_on_close, false},
- {keepalive, true}
- ]
- },
- {collect_statistics_interval, 60000}
- ]
- },
- {rabbitmq_management_agent, [ {force_fine_statistics, true} ] },
- {kernel, [{net_ticktime, 30}]}
-].
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-cookie.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-cookie.j2
deleted file mode 100644
index 838d0332..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-cookie.j2
+++ /dev/null
@@ -1 +0,0 @@
-{{ ansible_date_time.iso8601_micro | to_uuid }}
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-env-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-env-conf.j2
deleted file mode 100644
index 6a3b4760..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-env-conf.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-NODE_IP_ADDRESS={{ internal_ip }}
-NODENAME=rabbit@{{ ansible_hostname }}-ctrl
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/vrouter-nodemgr-param.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/vrouter-nodemgr-param.j2
deleted file mode 100755
index ee5dcbd7..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/vrouter-nodemgr-param.j2
+++ /dev/null
@@ -1 +0,0 @@
-DISCOVERY={{ ip_settings['host1']['br-prv']['ip'] }}
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/zookeeper-unique-id.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/zookeeper-unique-id.j2
deleted file mode 100755
index ec0033b3..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/zookeeper-unique-id.j2
+++ /dev/null
@@ -1 +0,0 @@
-{{ item.0 + 1 }}
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/vrouter-functions.sh b/deploy/adapters/ansible/roles/open-contrail/templates/vrouter-functions.sh
deleted file mode 100755
index 69af7b2a..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/templates/vrouter-functions.sh
+++ /dev/null
@@ -1,223 +0,0 @@
-#!/bin/bash
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-source /etc/contrail/agent_param
-
-function pkt_setup () {
- for f in /sys/class/net/$1/queues/rx-*
- do
- q="$(echo $f | cut -d '-' -f2)"
- r=$(($q%32))
- s=$(($q/32))
- ((mask=1<<$r))
- str=(`printf "%x" $mask`)
- if [ $s -gt 0 ]; then
- for ((i=0; i < $s; i++))
- do
- str+=,00000000
- done
- fi
- echo $str > $f/rps_cpus
- done
-}
-
-function insert_vrouter() {
- if cat $CONFIG | grep '^\s*platform\s*=\s*dpdk\b' &>/dev/null; then
- vrouter_dpdk_start
- return $?
- fi
-
- grep $kmod /proc/modules 1>/dev/null 2>&1
- if [ $? != 0 ]; then
- insmod /var/lib/dkms/vrouter/2.21/build/vrouter.ko
- if [ $? != 0 ]
- then
- echo "$(date) : Error inserting vrouter module"
- return 1
- fi
-
- if [ -f /sys/class/net/pkt1/queues/rx-0/rps_cpus ]; then
- pkt_setup pkt1
- fi
- if [ -f /sys/class/net/pkt2/queues/rx-0/rps_cpus ]; then
- pkt_setup pkt2
- fi
- if [ -f /sys/class/net/pkt3/queues/rx-0/rps_cpus ]; then
- pkt_setup pkt3
- fi
- fi
-
- # check if vhost0 is not present, then create vhost0 and $dev
- if [ ! -L /sys/class/net/vhost0 ]; then
- echo "$(date): Creating vhost interface: $DEVICE."
- # for bonding interfaces
- loops=0
- while [ ! -f /sys/class/net/$dev/address ]
- do
- sleep 1
- loops=$(($loops + 1))
- if [ $loops -ge 60 ]; then
- echo "Unable to look at /sys/class/net/$dev/address"
- return 1
- fi
- done
-
- DEV_MAC=$(cat /sys/class/net/$dev/address)
- vif --create $DEVICE --mac $DEV_MAC
- if [ $? != 0 ]; then
- echo "$(date): Error creating interface: $DEVICE"
- fi
-
-
- echo "$(date): Adding $dev to vrouter"
- DEV_MAC=$(cat /sys/class/net/$dev/address)
- vif --add $dev --mac $DEV_MAC --vrf 0 --vhost-phys --type physical
- if [ $? != 0 ]; then
- echo "$(date): Error adding $dev to vrouter"
- fi
-
- vif --add $DEVICE --mac $DEV_MAC --vrf 0 --type vhost --xconnect $dev
- if [ $? != 0 ]; then
- echo "$(date): Error adding $DEVICE to vrouter"
- fi
- fi
- return 0
-}
-
-function vrouter_dpdk_start() {
- # wait for vRouter/DPDK to start
- echo "$(date): Waiting for vRouter/DPDK to start..."
- service ${VROUTER_SERVICE} start
- loops=0
- while ! is_vrouter_dpdk_running
- do
- sleep 1
- loops=$(($loops + 1))
- if [ $loops -ge 60 ]; then
- echo "No vRouter/DPDK running."
- echo "Please check if ${VROUTER_SERVICE} service is up and running."
- return 1
- fi
- done
-
- # TODO: at the moment we have no interface deletion, so this loop might
- # be unnecessary in the future
- echo "$(date): Waiting for Agent to configure $DEVICE..."
- loops=0
- while [ ! -L /sys/class/net/vhost0 ]
- do
- sleep 1
- loops=$(($loops + 1))
- if [ $loops -ge 10 ]; then
- break
- fi
- done
-
- # check if vhost0 is not present, then create vhost0 and $dev
- if [ ! -L /sys/class/net/vhost0 ]; then
- echo "$(date): Creating vhost interface: $DEVICE."
- agent_conf_read
-
- DEV_MAC=${physical_interface_mac}
- DEV_PCI=${physical_interface_address}
-
- if [ -z "${DEV_MAC}" -o -z "${DEV_PCI}" ]; then
- echo "No device configuration found in ${CONFIG}"
- return 1
- fi
-
- # TODO: the vhost creation is happening later in vif --add
-# vif --create $DEVICE --mac $DEV_MAC
-# if [ $? != 0 ]; then
-# echo "$(date): Error creating interface: $DEVICE"
-# fi
-
- echo "$(date): Adding $dev to vrouter"
- # add DPDK ethdev 0 as a physical interface
- vif --add 0 --mac $DEV_MAC --vrf 0 --vhost-phys --type physical --pmd --id 0
- if [ $? != 0 ]; then
- echo "$(date): Error adding $dev to vrouter"
- fi
-
- # TODO: vif --xconnect seems does not work without --id parameter?
- vif --add $DEVICE --mac $DEV_MAC --vrf 0 --type vhost --xconnect 0 --pmd --id 1
- if [ $? != 0 ]; then
- echo "$(date): Error adding $DEVICE to vrouter"
- fi
- fi
- return 0
-}
-
-DPDK_BIND=/opt/contrail/bin/dpdk_nic_bind.py
-VROUTER_SERVICE="supervisor-vrouter"
-
-function is_vrouter_dpdk_running() {
- # check for NetLink TCP socket
- lsof -ni:20914 -sTCP:LISTEN > /dev/null
-
- return $?
-}
-
-function agent_conf_read() {
- eval `cat ${CONFIG} | grep -E '^\s*physical_\w+\s*='`
-}
-
-function vrouter_dpdk_if_bind() {
- if [ ! -s /sys/class/net/${dev}/address ]; then
- echo "No ${dev} device found."
- ${DPDK_BIND} --status
- return 1
- fi
-
- modprobe igb_uio
- # multiple kthreads for port monitoring
- modprobe rte_kni kthread_mode=multiple
-
- ${DPDK_BIND} --force --bind=igb_uio $dev
- ${DPDK_BIND} --status
-}
-
-function vrouter_dpdk_if_unbind() {
- if [ -s /sys/class/net/${dev}/address ]; then
- echo "Device ${dev} is already unbinded."
- ${DPDK_BIND} --status
- return 1
- fi
-
- agent_conf_read
-
- DEV_PCI=${physical_interface_address}
- DEV_DRIVER=`lspci -vmmks ${DEV_PCI} | grep 'Module:' | cut -d $'\t' -f 2`
-
- if [ -z "${DEV_DRIVER}" -o -z "${DEV_PCI}" ]; then
- echo "No device ${dev} configuration found in ${AGENT_DPDK_PARAMS_FILE}"
- return 1
- fi
-
- # wait for vRouter/DPDK to stop
- echo "$(date): Waiting for vRouter/DPDK to stop..."
- loops=0
- while is_vrouter_dpdk_running
- do
- sleep 1
- loops=$(($loops + 1))
- if [ $loops -ge 60 ]; then
- echo "vRouter/DPDK is still running."
- echo "Please try to stop ${VROUTER_SERVICE} service."
- return 1
- fi
- done
-
- ${DPDK_BIND} --force --bind=${DEV_DRIVER} ${DEV_PCI}
- ${DPDK_BIND} --status
-
- rmmod rte_kni
- rmmod igb_uio
-}
diff --git a/deploy/adapters/ansible/roles/open-contrail/vars/Debian.yml b/deploy/adapters/ansible/roles/open-contrail/vars/Debian.yml
deleted file mode 100755
index 845aa78c..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/vars/Debian.yml
+++ /dev/null
@@ -1,48 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-package: "contrail-install-packages_2.21-102-ubuntu-14-04juno_all.deb"
-
-common_package:
- - contrail-setup
-
-kernel_package:
- - linux-headers-3.13.0-40
- - linux-headers-3.13.0-40-generic
- - linux-image-3.13.0-40-generic
- - linux-image-extra-3.13.0-40-generic
-
-kernel_required: "3.13.0-40-generic"
-
-database_package:
- - contrail-openstack-database
-
-config_package:
- - contrail-openstack-config
-
-control_package:
- - contrail-openstack-control
-
-collector_package:
- - contrail-openstack-analytics
-
-webui_package:
- - contrail-openstack-webui
-
-vrouter_package:
- - contrail-vrouter-3.13.0-40-generic
-
-dkms_package:
- - contrail-vrouter-dkms
-
-compute_package:
- - contrail-vrouter-common
- - contrail-nova-vif
-
diff --git a/deploy/adapters/ansible/roles/open-contrail/vars/RedHat.yml b/deploy/adapters/ansible/roles/open-contrail/vars/RedHat.yml
deleted file mode 100755
index d760b4e6..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/vars/RedHat.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
diff --git a/deploy/adapters/ansible/roles/open-contrail/vars/main.yml b/deploy/adapters/ansible/roles/open-contrail/vars/main.yml
deleted file mode 100755
index 582e41e8..00000000
--- a/deploy/adapters/ansible/roles/open-contrail/vars/main.yml
+++ /dev/null
@@ -1,89 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-#package: "contrail-install-packages_2.21-102~juno_all.deb" # mv to {os}.yml
-kernel_install: no
-#ansible_ssh_user: "root"
-#ansible_ssh_pass: "root"
-
-#contrail_keystone_address: "{{ internal_vip.ip }}"
-contrail_keystone_address: "{{ public_vip.ip }}"
-contrail_admin_user: "admin"
-contrail_admin_password: "console"
-
-neutron_plugin: neutron_plugin_contrail.tar.gz
-nova_plugin: nova_contrail_vif.tar.gz
-
-
-# network infor adapter for compass
-# contrail_address: "{{ internal_ip }}"
-contrail_address: "{{ ip_settings[inventory_hostname]['br-prv']['ip'] }}"
-#contrail_device: # compass openstack device
-contrail_netmask: "255.255.255.0"
-#contrail_gateway: "10.84.50.254"
-contrail_gateway:
-#contrail_mgmt_address: "172.27.113.91"
-
-
-
-###########################################################
-### we make an independent NIC for OpenContrail vRouter ###
-###########################################################
-contrail_vhost_device: "{{ network_cfg['provider_net_mappings'][0]['interface'] }}"
-contrail_vhost_address: "{{ ip_settings[inventory_hostname]['br-prv']['ip'] }}"
-contrail_vhost_gateway: "{{ ip_settings[inventory_hostname]['br-prv']['gw'] }}"
-contrail_vhost_netmask: "{{ ip_settings[inventory_hostname]['br-prv']['netmask'] }}"
-###########################################################
-###########################################################
-###########################################################
-
-
-
-
-contrail_keepalived: no
-#contrail_haproxy_address: "10.0.0.22" # 10.0.0.80
-#contrail_haproxy_address: "{{ internal_vip.ip }}"
-contrail_haproxy_address: "{{ public_vip.ip }}"
-contrail_netmask: "255.255.255.0"
-contrail_prefixlen: "24"
-contrail_gateway: "10.0.0.1"
-
-contrail_router_asn: "64512"
-
-### Modify when need openstack provisioning
-keystone_provision: no
-install_nova: no
-#rabbit_password: {{ RABBIT_PASS }}
-
-contrail_tor_agents:
- - name: "test01"
- address: "10.0.0.81"
- ovs_protocol: "pssl"
- ovs_port: "9991"
- tunnel_address: "10.0.0.81"
- http_server_port: "9011"
- vendor_name: "Juniper"
- product_name: "QFX5100"
- tsn_names: [ "system002" ]
- - name: "test02"
- address: "10.0.0.82"
- ovs_protocol: "pssl"
- ovs_port: "9992"
- tunnel_address: "10.0.0.82"
- http_server_port: "9012"
- vendor_name: "Juniper"
- product_name: "QFX5100"
- tsn_names: [ "system002" ]
-
-
-# adapter for compass
-kernel_package_noarch: []
-
-compute_package_noarch: []
-
diff --git a/deploy/adapters/ansible/roles/secgroup/handlers/main.yml b/deploy/adapters/ansible/roles/secgroup/handlers/main.yml
index 77082ded..a947c7cd 100644
--- a/deploy/adapters/ansible/roles/secgroup/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/secgroup/handlers/main.yml
@@ -9,10 +9,10 @@
---
- name: restart controller relation service
service: name={{ item }} state=restarted enabled=yes
- ignore_errors: True
+ ignore_errors: "True"
with_items: "{{ controller_services }}"
- name: restart compute relation service
service: name={{ item }} state=restarted enabled=yes
- ignore_errors: True
+ ignore_errors: "True"
with_items: "{{ compute_services }}"
diff --git a/deploy/adapters/ansible/roles/secgroup/tasks/secgroup.yml b/deploy/adapters/ansible/roles/secgroup/tasks/secgroup.yml
index 9323facc..4efb13e9 100644
--- a/deploy/adapters/ansible/roles/secgroup/tasks/secgroup.yml
+++ b/deploy/adapters/ansible/roles/secgroup/tasks/secgroup.yml
@@ -19,7 +19,9 @@
tags: secgroup
- name: update controller configs
- shell: "[ -f '{{ item.1 }}' ] && crudini --merge '{{ item.1 }}' < /opt/os_templates/{{ item.0.src }} || /bin/true"
+ shell: |
+ "[ -f '{{ item.1 }}' ] && crudini --merge '{{ item.1 }}' \
+ < /opt/os_templates/{{ item.0.src }} || /bin/true"
tags: secgroup
with_subelements:
- "{{ configs_templates }}"
@@ -28,11 +30,12 @@
when: inventory_hostname in "{{ groups['controller'] }}"
- name: update compute configs
- shell: "[ -f '{{ item.1 }}' ] && crudini --merge '{{ item.1 }}' < /opt/os_templates/{{ item.0.src }} || /bin/true"
+ shell: |
+ "[ -f '{{ item.1 }}' ] && crudini --merge '{{ item.1 }}' \
+ < /opt/os_templates/{{ item.0.src }} || /bin/true"
tags: secgroup
with_subelements:
- "{{ configs_templates }}"
- dest
notify: restart compute relation service
when: inventory_hostname in "{{ groups['compute'] }}"
-
diff --git a/deploy/adapters/ansible/roles/setup-network/tasks/main.yml b/deploy/adapters/ansible/roles/setup-network/tasks/main.yml
index 2649ea79..5e5a0936 100644
--- a/deploy/adapters/ansible/roles/setup-network/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/setup-network/tasks/main.yml
@@ -21,7 +21,8 @@
when: 'item["type"] == "ovs"'
- name: add ovs uplink
- openvswitch_port: bridge={{ item["name"] }} port={{ item["interface"] }} state=present
+ openvswitch_port: bridge={{ item["name"] }} port={{ item["interface"] }}
+ state=present
with_items: "{{ network_cfg['provider_net_mappings'] }}"
when: 'item["type"] == "ovs"'
@@ -34,7 +35,7 @@
shell: mkdir -p /opt/setup_networks
- name: copy scripts
- copy: src={{ item }} dest=/opt/setup_networks
+ copy: src={{ item }} dest=/opt/setup_networks
with_items:
- setup_networks/log.py
- setup_networks/setup_networks.py
@@ -43,7 +44,7 @@
- network_check
- name: copy boot scripts
- copy: src={{ item }} dest=/etc/init.d/ mode=0755
+ copy: src={{ item }} dest=/etc/init.d/ mode=0755
with_items:
- setup_networks/net_init
@@ -75,4 +76,3 @@
shell: update-rc.d net_init defaults
- meta: flush_handlers
-
diff --git a/deploy/adapters/ansible/roles/storage/files/loop.yml b/deploy/adapters/ansible/roles/storage/files/loop.yml
index 776cf8cd..32088de7 100755
--- a/deploy/adapters/ansible/roles/storage/files/loop.yml
+++ b/deploy/adapters/ansible/roles/storage/files/loop.yml
@@ -1,3 +1,4 @@
+---
##############################################################################
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
#
diff --git a/deploy/adapters/ansible/roles/storage/tasks/main.yml b/deploy/adapters/ansible/roles/storage/tasks/main.yml
index be712c67..b4263d6b 100755
--- a/deploy/adapters/ansible/roles/storage/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/storage/tasks/main.yml
@@ -43,7 +43,10 @@
- storage
- name: set autostart file for centos
- copy: src=storage.service dest=/usr/lib/systemd/system/storage.service mode=0755
+ copy:
+ src: storage.service
+ dest: /usr/lib/systemd/system/storage.service
+ mode: 0755
when: ansible_os_family == "RedHat"
tags:
- storage
diff --git a/deploy/adapters/ansible/roles/storage/tasks/real.yml b/deploy/adapters/ansible/roles/storage/tasks/real.yml
index e99f185e..7845a235 100755
--- a/deploy/adapters/ansible/roles/storage/tasks/real.yml
+++ b/deploy/adapters/ansible/roles/storage/tasks/real.yml
@@ -9,7 +9,7 @@
---
- name: destroy GPT lable
shell: dd if=/dev/urandom of={{ physical_device }} bs=4M count=1
- ignore_errors: True
+ ignore_errors: "True"
- name: create physical and group volumes
lvg: vg=storage-volumes pvs={{ physical_device }}
diff --git a/deploy/adapters/ansible/roles/swift/tasks/swift-compute1.yml b/deploy/adapters/ansible/roles/swift/tasks/swift-compute1.yml
index be00484b..f4f4f7b9 100644
--- a/deploy/adapters/ansible/roles/swift/tasks/swift-compute1.yml
+++ b/deploy/adapters/ansible/roles/swift/tasks/swift-compute1.yml
@@ -7,7 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-
- name: disable auto start
copy:
content: "#!/bin/sh\nexit 101"
@@ -39,8 +38,10 @@
- name: edit /etc/fstab
shell: >
- echo "/var/swift1 /srv/node/swift1/ xfs noatime,nodiratime,nobarrier,logbufs=8 0 2" >> /etc/fstab;
- echo "/var/swift2 /srv/node/swift2/ xfs noatime,nodiratime,nobarrier,logbufs=8 0 2" >> /etc/fstab;
+ echo "/var/swift1 /srv/node/swift1/ xfs noatime,nodiratime,nobarrier,logbufs=8 0 2" \
+ >> /etc/fstab;
+ echo "/var/swift2 /srv/node/swift2/ xfs noatime,nodiratime,nobarrier,logbufs=8 0 2" \
+ >> /etc/fstab;
mount /srv/node/swift1;
mount /srv/node/swift2;
@@ -63,18 +64,3 @@
mkdir -p /var/cache/swift;
chown -R root:swift /var/cache/swift;
chmod -R 775 /var/cache/swift;
-
-#- name: copy swift lib
-# copy: src=swift-lib.tar.gz dest=/tmp/swift-lib.tar.gz
-#
-#- name: upload swift lib
-# unarchive: src=swift-lib.tar.gz dest=/tmp/
-#
-#- name: copy swift lib
-# shell: command: su -s /bin/sh -c "cp /tmp/swift-lib/* /usr/lib/"
-#
-#- name: untar swift lib
-# shell: >
-# tar zxf /tmp/swift-lib.tar.gz;
-# cp /tmp/swift-lib/* /usr/lib/;
-
diff --git a/deploy/adapters/ansible/roles/swift/tasks/swift-controller1.yml b/deploy/adapters/ansible/roles/swift/tasks/swift-controller1.yml
index 36d05040..7346da10 100644
--- a/deploy/adapters/ansible/roles/swift/tasks/swift-controller1.yml
+++ b/deploy/adapters/ansible/roles/swift/tasks/swift-controller1.yml
@@ -30,5 +30,3 @@
- name: update proxy-server conf
template: src=proxy-server.conf dest=/etc/swift/proxy-server.conf backup=yes
-
-
diff --git a/deploy/adapters/ansible/roles/swift/tasks/swift-controller2.yml b/deploy/adapters/ansible/roles/swift/tasks/swift-controller2.yml
index 92d4ab22..75e77882 100644
--- a/deploy/adapters/ansible/roles/swift/tasks/swift-controller2.yml
+++ b/deploy/adapters/ansible/roles/swift/tasks/swift-controller2.yml
@@ -7,17 +7,20 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-
- name: create account.builder file
shell: >
- cd /etc/swift ;
+ cd /etc/swift;
swift-ring-builder account.builder create 10 3 1;
- name: add each storage node to the ring
shell: >
cd /etc/swift;
- swift-ring-builder account.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6002 --device swift1 --weight 100 ;
- swift-ring-builder account.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6002 --device swift2 --weight 100 ;
+ swift-ring-builder account.builder add --region 1 --zone 1 \
+ --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6002 \
+ --device swift1 --weight 100 ;
+ swift-ring-builder account.builder add --region 1 --zone 1 \
+ --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6002 \
+ --device swift2 --weight 100 ;
with_indexed_items: groups['compute']
- name: verify the ring contents 1
@@ -30,8 +33,6 @@
cd /etc/swift;
swift-ring-builder account.builder rebalance;
-
-#####################
- name: create contrainer builder file
shell: >
cd /etc/swift;
@@ -40,8 +41,12 @@
- name: add each storage node to the ring
shell: >
cd /etc/swift;
- swift-ring-builder container.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6001 --device swift1 --weight 100;
- swift-ring-builder container.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6001 --device swift2 --weight 100;
+ swift-ring-builder container.builder add --region 1 --zone 1 \
+ --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6001 \
+ --device swift1 --weight 100;
+ swift-ring-builder container.builder add --region 1 --zone 1 \
+ --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6001 \
+ --device swift2 --weight 100;
with_indexed_items: groups['compute']
- name: verify the ring contents 2
@@ -54,8 +59,6 @@
cd /etc/swift;
swift-ring-builder container.builder rebalance;
-#############################
-
- name: create object builder file
shell: >
cd /etc/swift;
@@ -64,8 +67,12 @@
- name: add each storage node to the ring
shell: >
cd /etc/swift;
- swift-ring-builder object.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6000 --device swift1 --weight 100;
- swift-ring-builder object.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6000 --device swift2 --weight 100;
+ swift-ring-builder object.builder add --region 1 --zone 1 \
+ --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6000 \
+ --device swift1 --weight 100;
+ swift-ring-builder object.builder add --region 1 --zone 1 \
+ --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6000 \
+ --device swift2 --weight 100;
with_indexed_items: groups['compute']
- name: verify the ring contents
@@ -78,16 +85,16 @@
cd /etc/swift;
swift-ring-builder object.builder rebalance;
-##########################
-
- name: distribute ring configuration files to the other controller
shell: >
cd /etc/swift;
- scp account.ring.gz container.ring.gz object.ring.gz root@{{ ip_settings[item.1]['mgmt']['ip'] }}:/etc/swift/;
+ scp account.ring.gz container.ring.gz object.ring.gz \
+ root@{{ ip_settings[item.1]['mgmt']['ip'] }}:/etc/swift/;
with_indexed_items: groups['controller']
- name: distribute ring configuration files to the all compute
shell: >
cd /etc/swift;
- scp account.ring.gz container.ring.gz object.ring.gz root@{{ ip_settings[item.1]['mgmt']['ip'] }}:/etc/swift/;
+ scp account.ring.gz container.ring.gz object.ring.gz \
+ root@{{ ip_settings[item.1]['mgmt']['ip'] }}:/etc/swift/;
with_indexed_items: groups['compute']
diff --git a/deploy/adapters/ansible/roles/swift/tasks/swift.yml b/deploy/adapters/ansible/roles/swift/tasks/swift.yml
index 4e2651a7..473c2710 100644
--- a/deploy/adapters/ansible/roles/swift/tasks/swift.yml
+++ b/deploy/adapters/ansible/roles/swift/tasks/swift.yml
@@ -34,7 +34,7 @@
- name: restart tasks on compute
shell: swift-init all start
when: inventory_hostname in groups['compute']
- ignore_errors: True
+ ignore_errors: "True"
- name: restart tasks on controller
service: name={{ item }} state=restarted enabled=yes
@@ -68,7 +68,7 @@
- swift-container-updater
- swift-object-replicator
when: inventory_hostname in groups['compute']
- ignore_errors: True
+ ignore_errors: "True"
- name: restart swift task
shell: >
@@ -76,4 +76,4 @@
sleep 10;
for i in `cat /opt/swift-service`; do service $i restart; done;
when: inventory_hostname in groups['compute']
- ignore_errors: True
+ ignore_errors: "True"
diff --git a/deploy/adapters/ansible/roles/tacker/tasks/main.yml b/deploy/adapters/ansible/roles/tacker/tasks/main.yml
index 2759e968..5df2253b 100755
--- a/deploy/adapters/ansible/roles/tacker/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/tacker/tasks/main.yml
@@ -11,4 +11,5 @@
- name: Install Tacker on Controller
include: tacker_controller.yml
- when: inventory_hostname in groups['controller'] and ansible_os_family == "Debian"
+ when: inventory_hostname in groups['controller']
+ and ansible_os_family == "Debian"
diff --git a/deploy/adapters/ansible/roles/tacker/tasks/tacker_controller.yml b/deploy/adapters/ansible/roles/tacker/tasks/tacker_controller.yml
index 4e92557d..2d37ff13 100755
--- a/deploy/adapters/ansible/roles/tacker/tasks/tacker_controller.yml
+++ b/deploy/adapters/ansible/roles/tacker/tasks/tacker_controller.yml
@@ -12,31 +12,44 @@
register: http_server
- name: creat tacker_home, tacker_client_home, tacker_horizon_home
- shell: >
- mkdir -p /opt/tacker
- mkdir -p /opt/tacker_client
- mkdir -p /opt/tacker_horizon
+ shell: |
+ mkdir -p /opt/tacker;
+ mkdir -p /opt/tacker_client;
+ mkdir -p /opt/tacker_horizon;
- name: download tacker package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/tacker/{{ tacker_pkg_name }}" dest=/opt/{{ tacker_pkg_name }}
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/tacker/{{ tacker_pkg_name }}"
+ dest: /opt/{{ tacker_pkg_name }}
- name: download tacker_client package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/tacker/{{ tacker_client_pkg_name }}" dest=/opt/{{ tacker_client_pkg_name }}
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/tacker/{{ tacker_client_pkg_name }}"
+ dest: /opt/{{ tacker_client_pkg_name }}
- name: download tacker_horizon package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/tacker/{{ tacker_horizon_pkg_name }}" dest=/opt/{{ tacker_horizon_pkg_name }}
+ get_url:
+ url: "http://{{ http_server.stdout_lines[0] }}/packages/tacker/{{ tacker_horizon_pkg_name }}"
+ dest: /opt/{{ tacker_horizon_pkg_name }}
- name: extract tacker package
- command: su -s /bin/sh -c "tar xzf /opt/{{ tacker_pkg_name }} -C {{ tacker_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files"
+ command: |
+ su -s /bin/sh -c "tar xzf /opt/{{ tacker_pkg_name }} -C {{ tacker_home }} \
+ --strip-components 1 --no-overwrite-dir -k --skip-old-files"
- name: extract tacker_client package
- command: su -s /bin/sh -c "tar xzf /opt/{{ tacker_client_pkg_name }} -C {{ tacker_client_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files"
+ command: |
+ su -s /bin/sh -c "tar xzf /opt/{{ tacker_client_pkg_name }} -C {{ tacker_client_home }} \
+ --strip-components 1 --no-overwrite-dir -k --skip-old-files"
- name: extract tacker_horizon package
- command: su -s /bin/sh -c "tar xzf /opt/{{ tacker_horizon_pkg_name }} -C {{ tacker_horizon_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files"
+ command: |
+ su -s /bin/sh -c "tar xzf /opt/{{ tacker_horizon_pkg_name }} -C {{ tacker_horizon_home }} \
+ --strip-components 1 --no-overwrite-dir -k --skip-old-files"
- name: edit ml2_conf.ini
- shell: crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security;
+ shell: |
+ crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security;
- name: Restart neutron-server
service: name=neutron-server state=restarted
@@ -65,9 +78,10 @@
state: "restarted"
- name: drop and recreate tacker database
- shell: mysql -e "drop database if exists tacker;";
- mysql -e "create database tacker character set utf8;";
- mysql -e "grant all on tacker.* to 'tacker'@'%' identified by 'TACKER_DBPASS';";
+ shell: |
+ mysql -e "drop database if exists tacker;";
+ mysql -e "create database tacker character set utf8;";
+ mysql -e "grant all on tacker.* to 'tacker'@'%' identified by 'TACKER_DBPASS';";
when: inventory_hostname == haproxy_hosts.keys()[0]
- name: get the openstack user info
@@ -83,29 +97,45 @@
register: endpoint_info
- name: delete the existed tacker endpoint
- shell: . /opt/admin-openrc.sh; openstack endpoint delete $(openstack endpoint list | grep tacker | awk '{print $2}')
- when: endpoint_info.stdout.find('tacker') != -1 and inventory_hostname == haproxy_hosts.keys()[0]
+ shell: |
+ . /opt/admin-openrc.sh;
+ openstack endpoint delete \
+ $(openstack endpoint list | grep tacker | awk '{print $2}')
+ when: endpoint_info.stdout.find('tacker') != -1
+ and inventory_hostname == haproxy_hosts.keys()[0]
- name: delete the existed tacker service
- shell: . /opt/admin-openrc.sh; openstack service delete tacker
- when: service_info.stdout.find('tacker') != -1 and inventory_hostname == haproxy_hosts.keys()[0]
+ shell: |
+ . /opt/admin-openrc.sh;
+ openstack service delete tacker;
+ when: service_info.stdout.find('tacker') != -1
+ and inventory_hostname == haproxy_hosts.keys()[0]
- name: delete the existed tacker user
- shell: . /opt/admin-openrc.sh; openstack user delete tacker
- when: user_info.stdout.find('tacker') != -1 and inventory_hostname == haproxy_hosts.keys()[0]
+ shell: |
+ . /opt/admin-openrc.sh;
+ openstack user delete tacker;
+ when: user_info.stdout.find('tacker') != -1
+ and inventory_hostname == haproxy_hosts.keys()[0]
- name: create tacker user with admin privileges
- shell: . /opt/admin-openrc.sh; openstack user create --password console tacker; openstack role add --project service --user tacker admin;
+ shell: |
+ . /opt/admin-openrc.sh;
+ openstack user create --password console tacker;
+ openstack role add --project service --user tacker admin;
when: inventory_hostname == haproxy_hosts.keys()[0]
- name: creat tacker service
- shell: >
- . /opt/admin-openrc.sh; openstack service create --name tacker --description "Tacker Project" nfv-orchestration
+ shell: |
+ . /opt/admin-openrc.sh;
+ openstack service create --name tacker \
+ --description "Tacker Project" nfv-orchestration
when: inventory_hostname == haproxy_hosts.keys()[0]
- name: provide an endpoint to tacker service
- shell: >
- . /opt/admin-openrc.sh; openstack endpoint create --region RegionOne \
+ shell: |
+ . /opt/admin-openrc.sh; \
+ openstack endpoint create --region RegionOne \
--publicurl 'http://{{ public_vip.ip }}:8888/' \
--adminurl 'http://{{ internal_vip.ip }}:8888/' \
--internalurl 'http://{{ internal_vip.ip }}:8888/' tacker
@@ -115,18 +145,16 @@
pip: name=Babel state=present version=2.3.4
- name: install pip packages
- shell: >
+ shell: |
pip install tosca-parser heat-translator oslosphinx;
- name: install tacker
- shell: >
+ shell: |
. /opt/admin-openrc.sh; cd {{ tacker_home }}; python setup.py install
- name: create 'tacker' directory in '/var/cache', set ownership and permissions
- shell: >
+ shell: |
mkdir -p /var/cache/tacker
-# sudo chown <LOGIN_USER>:root /var/cache/tacker
-# chmod 700 /var/cache/tacker
- name: create 'tacker' directory in '/var/log'
shell: mkdir -p /var/log/tacker
@@ -136,30 +164,40 @@
with_items: "{{ tacker_configs_templates }}"
- name: edit tacker configuration file
- shell: crudini --merge /usr/local/etc/tacker/tacker.conf < /opt/os_templates/tacker.j2
+ shell: |
+ crudini --merge /usr/local/etc/tacker/tacker.conf \
+ < /opt/os_templates/tacker.j2
- name: populate tacker database
- shell: >
- . /opt/admin-openrc.sh; /usr/local/bin/tacker-db-manage --config-file /usr/local/etc/tacker/tacker.conf upgrade head
+ shell: |
+ . /opt/admin-openrc.sh; \
+ /usr/local/bin/tacker-db-manage \
+ --config-file /usr/local/etc/tacker/tacker.conf upgrade head
when: inventory_hostname == haproxy_hosts.keys()[0]
- name: install tacker client
- shell: >
- . /opt/admin-openrc.sh; cd {{ tacker_client_home }}; python setup.py install
+ shell: |
+ . /opt/admin-openrc.sh; cd {{ tacker_client_home }};
+ python setup.py install
- name: install tacker horizon
- shell: >
- . /opt/admin-openrc.sh; cd {{ tacker_horizon_home }}; python setup.py install
+ shell: |
+ . /opt/admin-openrc.sh; cd {{ tacker_horizon_home }};
+ python setup.py install
- name: enable tacker horizon in dashboard
- shell: >
- cp {{ tacker_horizon_home }}/openstack_dashboard_extensions/* /usr/share/openstack-dashboard/openstack_dashboard/enabled/
+ shell: |
+ cp {{ tacker_horizon_home }}/openstack_dashboard_extensions/* \
+ /usr/share/openstack-dashboard/openstack_dashboard/enabled/
- name: restart apache server
shell: service apache2 restart
- name: launch tacker-server
- shell: >
- . /opt/admin-openrc.sh; python /usr/local/bin/tacker-server --config-file /usr/local/etc/tacker/tacker.conf --log-file /var/log/tacker/tacker.log
+ shell: |
+ . /opt/admin-openrc.sh; \
+ python /usr/local/bin/tacker-server \
+ --config-file /usr/local/etc/tacker/tacker.conf |
+ --log-file /var/log/tacker/tacker.log
async: 9999999999999
poll: 0
diff --git a/deploy/adapters/ansible/roles/tacker/vars/main.yml b/deploy/adapters/ansible/roles/tacker/vars/main.yml
index 0ae6d9ef..601f3721 100755
--- a/deploy/adapters/ansible/roles/tacker/vars/main.yml
+++ b/deploy/adapters/ansible/roles/tacker/vars/main.yml
@@ -1,3 +1,4 @@
+---
##############################################################################
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
#