aboutsummaryrefslogtreecommitdiffstats
path: root/deploy/adapters
diff options
context:
space:
mode:
authorliyuenan <liyuenan@huawei.com>2016-12-19 11:06:36 +0800
committerliyuenan <liyuenan@huawei.com>2016-12-20 15:05:03 +0800
commit819912d0379f6cd2b2693c2968576f7514a117c5 (patch)
treee24d274484fa1ec8976c9f1bd44f5ee6e445724b /deploy/adapters
parenteb5dbdac42b1b7b775fbc1dc513376425a6898ff (diff)
master only support newton
JIRA: COMPASS-513 Remove other roles and ppa, master only support newton. Change-Id: I47ddb16baa25902c3e05cc7f9d0d6430f5dc7e00 Signed-off-by: liyuenan <liyuenan@huawei.com>
Diffstat (limited to 'deploy/adapters')
-rw-r--r--deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml10
-rw-r--r--deploy/adapters/ansible/openstack/templates/neutron.conf431
-rw-r--r--deploy/adapters/ansible/openstack/templates/nova.conf105
-rw-r--r--deploy/adapters/ansible/openstack_juno/.gitkeep0
-rw-r--r--deploy/adapters/ansible/openstack_kilo/.gitkeep0
-rw-r--r--deploy/adapters/ansible/openstack_liberty/.gitkeep0
-rwxr-xr-xdeploy/adapters/ansible/openstack_liberty/roles/odl_cluster/tasks/odl_controller.yml252
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/.gitkeep0
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/HA-ansible-multinodes.yml265
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/aodh/tasks/aodh_install.yml31
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/aodh.conf.j246
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/api_paste.ini.j222
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/policy.json.j220
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/aodh/vars/Debian.yml22
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/aodh/vars/RedHat.yml22
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/apache/files/index.html10
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/apache/tasks/main.yml38
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/ceilometer_controller/vars/Debian.yml37
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/ceilometer_controller/vars/RedHat.yml36
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/ceph-mon/tasks/install_mon.yml36
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/ceph-openstack/tasks/main.yml33
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/ceph-osd/tasks/install_osd.yml42
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/ceph-purge/tasks/main.yml37
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/congress/files/congress.conf37
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/congress/files/congress.service19
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/congress_config_debian.yml31
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/congress_config_redhat.yml31
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/congress_install.yml25
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/main.yml20
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/congress/vars/RedHat.yml21
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/dashboard/vars/Debian.yml17
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/database/templates/data.j251
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/database/vars/main.yml39
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/ext-network/handlers/main.yml29
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/ext-network/tasks/main.yml56
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/glance/tasks/nfs.yml68
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/glance/vars/Debian.yml21
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/glance/vars/RedHat.yml23
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/ha/templates/haproxy.cfg227
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/heat/tasks/heat_install.yml39
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/heat/templates/heat.j228
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/keystone/tasks/keystone_install.yml97
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/keystone/vars/RedHat.yml20
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/keystone/vars/main.yml194
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/neutron-compute/tasks/main.yml75
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/neutron-compute/vars/Debian.yml19
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/neutron-network/tasks/main.yml117
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/neutron-network/vars/Debian.yml25
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/nova-compute/tasks/main.yml58
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/nova-compute/templates/nova-compute.conf11
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/nova-compute/templates/nova.conf89
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/nova-controller/tasks/nova_config.yml21
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/odl_cluster/vars/Debian.yml19
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/log.py43
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/net_init20
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/setup_networks.py83
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/handlers/main.yml11
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/main.yml121
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/onos_controller.yml131
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/onos_sfc_controller.yml140
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/openvswitch.yml64
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/keepalived.conf47
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/ml2_conf.sh15
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/my_configs.debian14
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/network.cfg5
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/vars/Debian.yml15
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/vars/RedHat.yml15
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/vars/main.yml23
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/open-contrail/tasks/uninstall-openvswitch.yml46
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/secgroup/templates/neutron.j27
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/secgroup/templates/nova.j23
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/secgroup/vars/Debian.yml35
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/tacker/files/tacker.conf36
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/tacker/tasks/tacker_controller.yml215
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/config.yaml4
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tacker.j2426
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tackerc.sh12
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/templates/neutron.conf486
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/templates/nova.conf96
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/.gitkeep0
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml265
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/handlers/main.yml13
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/aodh_config.yml14
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/aodh_install.yml31
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/main.yml23
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/aodh.conf.j246
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/api_paste.ini.j222
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/policy.json.j220
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/Debian.yml22
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/RedHat.yml22
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/main.yml12
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/Debian.yml34
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/RedHat.yml36
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/tasks/install_mon.yml43
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/vars/Debian.yml12
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/ceph_openstack_post.yml19
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/main.yml33
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/vars/Debian.yml30
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-osd/tasks/install_osd.yml42
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/common/vars/Debian.yml30
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/dashboard/templates/openstack-dashboard.conf.j218
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/dashboard/vars/Debian.yml17
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_cluster_debian.yml69
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_install.yml70
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/templates/data.j251
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/Debian.yml55
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/main.yml39
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/handlers/main.yml29
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/tasks/main.yml54
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/vars/Debian.yml18
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/vars/RedHat.yml17
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/vars/main.yml10
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/tasks/nfs.yml68
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/Debian.yml21
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/RedHat.yml23
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ha/templates/haproxy.cfg216
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/heat/tasks/heat_install.yml39
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/heat/templates/heat.j228
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/tasks/keystone_install.yml98
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/templates/wsgi-keystone.conf.j250
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/Debian.yml24
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/main.yml179
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/files/controllers.py1062
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/files/deb.conf.bak11
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/handlers/main.yml15
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/tasks/main.yml75
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/vars/Debian.yml19
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/tasks/main.yml117
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/vars/Debian.yml25
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-compute/tasks/main.yml63
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-compute/templates/nova-compute.conf11
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-controller/tasks/nova_config.yml21
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_00_download_packages.yml34
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml53
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_04_install_pip_packages.yml26
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml22
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_odl_controller.yml47
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml25
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/main.yml24
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/jetty.xml88
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/ml2_conf.sh14
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/tomcat-server.xml61
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/Debian.yml21
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/main.yml31
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/handlers/main.yml11
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/main.yml51
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/onos_controller.yml140
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/openvswitch.yml57
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/templates/ml2_conf.sh15
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/Debian.yml14
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/RedHat.yml14
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/main.yml19
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/open-contrail/tasks/uninstall-openvswitch.yml46
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/templates/neutron.j27
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/templates/nova.j23
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/vars/Debian.yml35
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/setup-network/files/setup_networks/net_init24
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/storage/files/storage10
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift.yml89
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/main.yml18
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/tacker/templates/tacker.j2426
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/templates/neutron.conf486
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/templates/nova.conf96
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/HA-ansible-multinodes.yml264
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/handlers/main.yml13
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/aodh_config.yml14
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/main.yml23
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/main.yml12
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/handlers/main.yml16
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/tasks/main.yml22
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/vars/Debian.yml17
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/vars/RedHat.yml17
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/vars/main.yml11
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/handlers/main.yml22
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/tasks/main.yml22
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/Debian.yml26
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/RedHat.yml26
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/main.yml11
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/tasks/install_mon.yml43
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/templates/ceph-mon.service22
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/vars/Debian.yml12
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/tasks/ceph_openstack_post.yml19
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/tasks/main.yml33
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/vars/Debian.yml30
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-osd/tasks/install_osd.yml42
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-controller/templates/cinder.conf85
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-volume/templates/cinder.conf75
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/common/templates/pip.conf5
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/common/vars/Debian.yml31
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/congress/handlers/main.yml12
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/congress/tasks/congress_db.yml28
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/congress/templates/api-paste.ini34
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/congress/templates/congress.conf510
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/congress/templates/policy.json6
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/congress/vars/Debian.yml21
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/congress/vars/main.yml12
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/tasks/main.yml106
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/templates/openstack-dashboard.conf.j218
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/vars/Debian.yml17
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/database/tasks/mariadb_cluster_debian.yml69
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/database/tasks/mariadb_install.yml70
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/database/templates/data.j251
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/database/vars/Debian.yml55
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/database/vars/main.yml39
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/handlers/main.yml29
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/tasks/main.yml44
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/Debian.yml18
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/RedHat.yml17
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/main.yml10
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/glance/tasks/nfs.yml68
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-api.conf93
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-registry.conf64
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/glance/vars/Debian.yml21
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/glance/vars/RedHat.yml23
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ha/templates/haproxy.cfg227
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/heat/tasks/heat_install.yml39
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/heat/templates/heat.j254
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_config.yml96
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_install.yml98
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/main.yml30
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/admin-openrc.sh18
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/demo-openrc.sh17
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/keystone.conf60
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/wsgi-keystone.conf.j250
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/Debian.yml24
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/main.yml194
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/deb.conf11
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/deb.conf.bak11
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/get_deb_depends.py27
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/moon/handlers/main.yml12
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/main.yml11
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon-compute.yml20
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon-controller.yml238
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon.yml16
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/admin-openrc.sh15
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/api-paste.ini106
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/demo-openrc.sh13
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/keystone-paste.ini96
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/keystone.conf59
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/proxy-server.conf775
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/wsgi-keystone.conf.j246
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/vars/Debian.yml168
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/moon/vars/main.yml172
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/handlers/main.yml15
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/tasks/main.yml75
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/vars/Debian.yml19
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-controller/tasks/neutron_install.yml46
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-network/tasks/main.yml117
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-network/vars/Debian.yml25
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/tasks/main.yml57
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/templates/nova-compute.conf11
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/templates/nova.conf104
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/nova-controller/tasks/nova_config.yml21
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/files/opendaylight.service21
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_00_download_packages.yml34
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml53
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_04_install_pip_packages.yml26
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml22
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_odl_controller.yml47
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml30
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml34
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/main.yml24
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/moon-odl.yml61
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/jetty.xml88
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/ml2_conf.sh14
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/moon-environment3
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/settings.xml82
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/tomcat-server.xml61
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/vars/Debian.yml21
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/vars/main.yml31
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/handlers/main.yml11
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/main.yml51
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/onos_controller.yml140
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/openvswitch.yml57
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/templates/ml2_conf.sh15
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/Debian.yml14
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/RedHat.yml14
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/main.yml19
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/open-contrail/tasks/uninstall-openvswitch.yml46
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/templates/neutron.j27
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/templates/nova.j23
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/vars/Debian.yml35
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/setup-network/files/setup_networks/net_init24
-rwxr-xr-xdeploy/adapters/ansible/openstack_newton_xenial/roles/storage/files/storage10
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/main.yml11
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-compute1.yml80
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-controller1.yml34
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-controller2.yml93
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/account-server.conf200
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/container-server.conf229
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/object-server.conf347
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/proxy-server.conf764
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/rsyncd.conf23
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/swift.conf183
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/swift/vars/Debian.yml27
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/tacker/templates/tacker.j2426
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/templates/neutron.conf112
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/templates/nova.conf119
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/HA-ansible-multinodes.yml265
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/aodh/handlers/main.yml13
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/aodh/tasks/aodh_config.yml14
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/aodh/tasks/aodh_install.yml31
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/aodh/tasks/main.yml23
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/aodh/templates/aodh.conf.j246
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/aodh/templates/api_paste.ini.j222
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/aodh/templates/policy.json.j220
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/aodh/vars/Debian.yml22
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/aodh/vars/RedHat.yml22
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/aodh/vars/main.yml12
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/apache/files/index.html10
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/apache/tasks/main.yml38
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/ceilometer_controller/vars/Debian.yml37
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/ceilometer_controller/vars/RedHat.yml36
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/ceph-mon/tasks/install_mon.yml36
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/ceph-openstack/tasks/ceph_openstack_post.yml19
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/ceph-openstack/tasks/main.yml33
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/ceph-osd/tasks/install_osd.yml42
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/ceph-purge/tasks/main.yml37
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/common/tasks/RedHat.yml3
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/common/tasks/main.yml96
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/common/templates/hosts7
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/common/templates/ntp.conf54
-rw-r--r--deploy/adapters/ansible/openstack_osp9/roles/common/templates/openstack_ppa_repo.repo.j27
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/common/templates/pip.conf5
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/common/vars/Debian.yml30
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/common/vars/RedHat.yml26
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/common/vars/main.yml14
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/dashboard/handlers/main.yml12
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/dashboard/tasks/main.yml121
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/openstack-dashboard-redhat.conf.j221
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/openstack-dashboard.conf14
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/openstack-dashboard.conf.j215
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/ports.j215
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/dashboard/vars/Debian.yml17
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/dashboard/vars/RedHat.yml19
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/dashboard/vars/main.yml13
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/database/templates/data.j251
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/database/vars/main.yml39
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/ext-network/handlers/main.yml29
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/ext-network/tasks/main.yml56
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/ext-network/vars/Debian.yml18
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/ext-network/vars/RedHat.yml17
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/ext-network/vars/main.yml10
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/glance/tasks/nfs.yml68
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/glance/vars/Debian.yml21
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/glance/vars/RedHat.yml23
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/heat/tasks/heat_install.yml39
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/heat/templates/heat.j228
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/keystone/tasks/keystone_install.yml97
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/keystone/vars/RedHat.yml20
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/keystone/vars/main.yml164
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/neutron-compute/tasks/main.yml75
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/neutron-compute/vars/Debian.yml19
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/neutron-network/tasks/main.yml117
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/neutron-network/vars/Debian.yml25
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/nova-compute/tasks/main.yml58
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/nova-compute/templates/nova-compute.conf11
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/nova-compute/templates/nova.conf89
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/nova-controller/tasks/nova_config.yml21
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/odl_cluster/tasks/openvswitch.yml148
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/odl_cluster/vars/Debian.yml23
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/onos_cluster/files/setup_networks/log.py43
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/onos_cluster/files/setup_networks/net_init20
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/onos_cluster/files/setup_networks/setup_networks.py83
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/onos_cluster/handlers/main.yml11
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/main.yml121
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/onos_controller.yml131
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/onos_sfc_controller.yml140
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/openvswitch.yml64
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/keepalived.conf47
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/ml2_conf.sh15
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/my_configs.debian14
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/network.cfg5
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/onos_cluster/vars/Debian.yml15
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/onos_cluster/vars/RedHat.yml15
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/onos_cluster/vars/main.yml23
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/open-contrail/tasks/uninstall-openvswitch.yml46
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/secgroup/templates/neutron.j27
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/secgroup/templates/nova.j23
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/secgroup/vars/Debian.yml35
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/roles/tacker/templates/tacker.j2426
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/templates/dnsmasq-neutron.conf2
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/templates/ml2_conf.ini113
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/templates/neutron.conf486
-rwxr-xr-xdeploy/adapters/ansible/openstack_osp9/templates/nova.conf96
-rw-r--r--deploy/adapters/ansible/roles/aodh/handlers/main.yml (renamed from deploy/adapters/ansible/openstack_mitaka/roles/aodh/handlers/main.yml)0
-rw-r--r--deploy/adapters/ansible/roles/aodh/tasks/aodh_config.yml (renamed from deploy/adapters/ansible/openstack_mitaka/roles/aodh/tasks/aodh_config.yml)0
-rw-r--r--deploy/adapters/ansible/roles/aodh/tasks/aodh_install.yml (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/aodh_install.yml)0
-rw-r--r--deploy/adapters/ansible/roles/aodh/tasks/main.yml (renamed from deploy/adapters/ansible/openstack_mitaka/roles/aodh/tasks/main.yml)0
-rw-r--r--deploy/adapters/ansible/roles/aodh/templates/aodh.conf.j2 (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/aodh.conf.j2)0
-rw-r--r--deploy/adapters/ansible/roles/aodh/vars/Debian.yml (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/Debian.yml)0
-rw-r--r--deploy/adapters/ansible/roles/aodh/vars/RedHat.yml (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/RedHat.yml)0
-rw-r--r--deploy/adapters/ansible/roles/aodh/vars/main.yml (renamed from deploy/adapters/ansible/openstack_mitaka/roles/aodh/vars/main.yml)0
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/handlers/main.yml6
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_config.yml (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/tasks/ceilometer_config.yml)0
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_install.yml (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/tasks/ceilometer_install.yml)0
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/tasks/main.yml42
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.conf.j2 (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/templates/ceilometer.conf.j2)0
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/templates/nova.conf.j2 (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/templates/nova.conf.j2)0
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/vars/Debian.yml10
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/vars/RedHat.yml10
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/handlers/main.yml12
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_config.yml (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/tasks/ceilometer_config.yml)0
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_install.yml (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/tasks/ceilometer_install.yml)0
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/tasks/main.yml51
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.conf.j2 (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/templates/ceilometer.conf.j2)0
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/templates/cinder.conf.j2 (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/templates/cinder.conf.j2)0
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/templates/glance-api.conf.j2 (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/templates/glance-api.conf.j2)0
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/templates/glance-registry.conf.j2 (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/templates/glance-registry.conf.j2)0
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/templates/wsgi-ceilometer.conf.j2 (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/templates/wsgi-ceilometer.conf.j2)0
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/vars/Debian.yml17
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/vars/RedHat.yml31
-rw-r--r--deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml11
-rw-r--r--deploy/adapters/ansible/roles/ceph-mon/templates/ceph-mon.service (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/templates/ceph-mon.service)0
-rw-r--r--deploy/adapters/ansible/roles/ceph-mon/vars/Debian.yml4
-rw-r--r--deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_post.yml (renamed from deploy/adapters/ansible/openstack_mitaka/roles/ceph-openstack/tasks/ceph_openstack_post.yml)0
-rw-r--r--deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml7
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-openstack/vars/Debian.yml4
-rw-r--r--deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml4
-rw-r--r--deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf63
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf45
-rw-r--r--deploy/adapters/ansible/roles/common/templates/pip.conf2
-rw-r--r--deploy/adapters/ansible/roles/common/vars/Debian.yml3
-rw-r--r--deploy/adapters/ansible/roles/congress/files/congress.service (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/congress/files/congress.service)0
-rw-r--r--[-rwxr-xr-x]deploy/adapters/ansible/roles/congress/handlers/main.yml (renamed from deploy/adapters/ansible/openstack_mitaka/roles/congress/handlers/main.yml)2
-rw-r--r--deploy/adapters/ansible/roles/congress/tasks/congress_config.yml (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/congress/tasks/congress_config.yml)0
-rw-r--r--[-rwxr-xr-x]deploy/adapters/ansible/roles/congress/tasks/congress_db.yml (renamed from deploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/congress_db.yml)0
-rw-r--r--deploy/adapters/ansible/roles/congress/tasks/congress_install.yml (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/congress/tasks/congress_install.yml)2
-rw-r--r--deploy/adapters/ansible/roles/congress/tasks/main.yml (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/congress/tasks/main.yml)0
-rw-r--r--[-rwxr-xr-x]deploy/adapters/ansible/roles/congress/templates/api-paste.ini (renamed from deploy/adapters/ansible/openstack_mitaka/roles/congress/templates/api-paste.ini)0
-rw-r--r--[-rwxr-xr-x]deploy/adapters/ansible/roles/congress/templates/congress.conf (renamed from deploy/adapters/ansible/openstack_mitaka/roles/congress/templates/congress.conf)0
-rw-r--r--[-rwxr-xr-x]deploy/adapters/ansible/roles/congress/templates/policy.json (renamed from deploy/adapters/ansible/openstack_mitaka/roles/congress/templates/policy.json)0
-rw-r--r--[-rwxr-xr-x]deploy/adapters/ansible/roles/congress/vars/Debian.yml (renamed from deploy/adapters/ansible/openstack_mitaka/roles/congress/vars/Debian.yml)0
-rw-r--r--[-rwxr-xr-x]deploy/adapters/ansible/roles/congress/vars/main.yml (renamed from deploy/adapters/ansible/openstack_mitaka/roles/congress/vars/main.yml)0
-rw-r--r--deploy/adapters/ansible/roles/dashboard/tasks/main.yml16
-rw-r--r--deploy/adapters/ansible/roles/dashboard/templates/local_settings.py.j2 (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/templates/local_settings.py.j2)0
-rwxr-xr-xdeploy/adapters/ansible/roles/dashboard/templates/openstack-dashboard.conf.j25
-rw-r--r--deploy/adapters/ansible/roles/dashboard/vars/Debian.yml1
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml43
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml11
-rw-r--r--deploy/adapters/ansible/roles/database/templates/data.j26
-rw-r--r--deploy/adapters/ansible/roles/database/vars/Debian.yml18
-rw-r--r--deploy/adapters/ansible/roles/database/vars/main.yml7
-rw-r--r--deploy/adapters/ansible/roles/ext-network/handlers/main.yml2
-rw-r--r--deploy/adapters/ansible/roles/ext-network/tasks/main.yml58
-rw-r--r--deploy/adapters/ansible/roles/ext-network/vars/Debian.yml (renamed from deploy/adapters/ansible/openstack_mitaka/roles/ext-network/vars/Debian.yml)0
-rw-r--r--deploy/adapters/ansible/roles/ext-network/vars/RedHat.yml (renamed from deploy/adapters/ansible/openstack_mitaka/roles/ext-network/vars/RedHat.yml)0
-rw-r--r--deploy/adapters/ansible/roles/ext-network/vars/main.yml (renamed from deploy/adapters/ansible/openstack_mitaka/roles/ext-network/vars/main.yml)0
-rw-r--r--deploy/adapters/ansible/roles/glance/tasks/nfs.yml6
-rw-r--r--deploy/adapters/ansible/roles/glance/templates/glance-api.conf28
-rw-r--r--deploy/adapters/ansible/roles/glance/templates/glance-registry.conf12
-rw-r--r--deploy/adapters/ansible/roles/glance/vars/Debian.yml3
-rw-r--r--deploy/adapters/ansible/roles/glance/vars/RedHat.yml4
-rw-r--r--deploy/adapters/ansible/roles/ha/templates/haproxy.cfg23
-rw-r--r--deploy/adapters/ansible/roles/heat/tasks/heat_install.yml13
-rw-r--r--deploy/adapters/ansible/roles/heat/templates/heat.j239
-rw-r--r--deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml131
-rw-r--r--deploy/adapters/ansible/roles/keystone/tasks/keystone_create.yml (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_create.yml)0
-rw-r--r--deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml20
-rw-r--r--deploy/adapters/ansible/roles/keystone/tasks/main.yml7
-rw-r--r--deploy/adapters/ansible/roles/keystone/templates/admin-openrc-v2.sh (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/admin-openrc-v2.sh)0
-rw-r--r--deploy/adapters/ansible/roles/keystone/templates/admin-openrc.sh11
-rw-r--r--deploy/adapters/ansible/roles/keystone/templates/demo-openrc.sh10
-rw-r--r--deploy/adapters/ansible/roles/keystone/templates/keystone.conf51
-rw-r--r--deploy/adapters/ansible/roles/keystone/templates/wsgi-keystone.conf.j210
-rw-r--r--deploy/adapters/ansible/roles/keystone/vars/Debian.yml5
-rw-r--r--deploy/adapters/ansible/roles/keystone/vars/main.yml76
-rw-r--r--deploy/adapters/ansible/roles/moon/files/controllers.py (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/controllers.py)0
-rw-r--r--deploy/adapters/ansible/roles/moon/files/deb.conf (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/files/deb.conf)0
-rw-r--r--deploy/adapters/ansible/roles/moon/files/get_deb_depends.py (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/files/get_deb_depends.py)0
-rwxr-xr-xdeploy/adapters/ansible/roles/moon/handlers/main.yml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/handlers/main.yml)0
-rw-r--r--deploy/adapters/ansible/roles/moon/tasks/main.yml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/tasks/main.yml)0
-rw-r--r--deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/tasks/moon-compute.yml)0
-rw-r--r--deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/tasks/moon-controller.yml)0
-rw-r--r--deploy/adapters/ansible/roles/moon/tasks/moon.yml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/tasks/moon.yml)0
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/admin-openrc.sh (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/admin-openrc.sh)0
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/api-paste.ini (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/api-paste.ini)0
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/demo-openrc.sh (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/demo-openrc.sh)0
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/keystone-paste.ini (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/keystone-paste.ini)0
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/keystone.conf (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/keystone.conf)0
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/proxy-server.conf (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/proxy-server.conf)0
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/wsgi-keystone.conf.j2 (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/wsgi-keystone.conf.j2)0
-rw-r--r--deploy/adapters/ansible/roles/moon/vars/Debian.yml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/vars/Debian.yml)0
-rw-r--r--deploy/adapters/ansible/roles/moon/vars/main.yml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/vars/main.yml)0
-rw-r--r--deploy/adapters/ansible/roles/neutron-compute/handlers/main.yml3
-rw-r--r--deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml8
-rw-r--r--deploy/adapters/ansible/roles/neutron-compute/templates/neutron.conf (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/templates/neutron.conf)0
-rw-r--r--deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml4
-rw-r--r--deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml4
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/tasks/main.yml6
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml4
-rw-r--r--deploy/adapters/ansible/roles/nova-compute/tasks/main.yml21
-rw-r--r--deploy/adapters/ansible/roles/nova-compute/templates/nova-compute.conf2
-rw-r--r--deploy/adapters/ansible/roles/nova-compute/templates/nova.conf105
-rw-r--r--deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml6
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/files/opendaylight.service (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/files/opendaylight.service)0
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_00_download_packages.yml7
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml9
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_04_install_pip_packages.yml19
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml20
-rw-r--r--[-rwxr-xr-x]deploy/adapters/ansible/roles/odl_cluster/tasks/01_odl_controller.yml8
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml11
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml)0
-rw-r--r--[-rwxr-xr-x]deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml4
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/moon-odl.yml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/moon-odl.yml)0
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/templates/jetty.xml20
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/templates/ml2_conf.sh2
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/templates/moon-environment (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/moon-environment)0
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/templates/settings.xml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/settings.xml)0
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml8
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/vars/main.yml5
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/tasks/main.yml4
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml75
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml76
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/vars/main.yml13
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml10
-rw-r--r--deploy/adapters/ansible/roles/secgroup/templates/neutron.j24
-rw-r--r--deploy/adapters/ansible/roles/secgroup/templates/nova.j22
-rw-r--r--deploy/adapters/ansible/roles/secgroup/vars/Debian.yml4
-rwxr-xr-xdeploy/adapters/ansible/roles/setup-network/files/setup_networks/net_init22
-rwxr-xr-xdeploy/adapters/ansible/roles/storage/files/storage8
-rw-r--r--deploy/adapters/ansible/roles/swift/tasks/main.yml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/main.yml)0
-rw-r--r--deploy/adapters/ansible/roles/swift/tasks/swift-compute1.yml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-compute1.yml)0
-rw-r--r--deploy/adapters/ansible/roles/swift/tasks/swift-controller1.yml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-controller1.yml)0
-rw-r--r--deploy/adapters/ansible/roles/swift/tasks/swift-controller2.yml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-controller2.yml)0
-rw-r--r--deploy/adapters/ansible/roles/swift/tasks/swift.yml (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift.yml)0
-rw-r--r--deploy/adapters/ansible/roles/swift/templates/account-server.conf (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/account-server.conf)0
-rw-r--r--deploy/adapters/ansible/roles/swift/templates/container-server.conf (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/container-server.conf)0
-rw-r--r--deploy/adapters/ansible/roles/swift/templates/object-server.conf (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/object-server.conf)0
-rw-r--r--deploy/adapters/ansible/roles/swift/templates/proxy-server.conf (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/proxy-server.conf)0
-rw-r--r--deploy/adapters/ansible/roles/swift/templates/rsyncd.conf (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/rsyncd.conf)0
-rw-r--r--deploy/adapters/ansible/roles/swift/templates/swift.conf (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/swift.conf)0
-rw-r--r--deploy/adapters/ansible/roles/swift/vars/Debian.yml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/Debian.yml)0
-rw-r--r--deploy/adapters/ansible/roles/swift/vars/main.yml (renamed from deploy/adapters/ansible/openstack_newton_xenial/roles/swift/vars/main.yml)0
-rw-r--r--deploy/adapters/ansible/roles/tacker/templates/tacker.j2421
535 files changed, 1343 insertions, 24818 deletions
diff --git a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
index 95102d2b..236035e0 100644
--- a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
+++ b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
@@ -72,9 +72,9 @@
- neutron-common
- neutron-network
- ceilometer_controller
-# - ext-network
- dashboard
- heat
+ - aodh
- hosts: all
remote_user: root
@@ -93,6 +93,13 @@
- cinder-volume
- ceilometer_compute
+#- hosts: all
+# remote_user: root
+# accelerate: true
+# max_fail_percentage: 0
+# roles:
+# - moon
+
- hosts: all
remote_user: root
accelerate: true
@@ -229,6 +236,7 @@
- hosts: controller
remote_user: root
+ accelerate: true
max_fail_percentage: 0
roles:
- ext-network
diff --git a/deploy/adapters/ansible/openstack/templates/neutron.conf b/deploy/adapters/ansible/openstack/templates/neutron.conf
index cbdd534d..49caa879 100644
--- a/deploy/adapters/ansible/openstack/templates/neutron.conf
+++ b/deploy/adapters/ansible/openstack/templates/neutron.conf
@@ -1,400 +1,72 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
debug = {{ VERBOSE }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
state_path = /var/lib/neutron
-
-# Where to store lock files
lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
+notify_nova_on_port_status_changes = True
+notify_nova_on_port_data_changes = True
log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
service_plugins = router
-
-# Paste configuration file
api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
rpc_thread_pool_size = 240
-# Size of RPC connection pool
rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ internal_vip.ip }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
+nova_url = http://{{ internal_vip.ip }}:8774/v3
nova_region_name = RegionOne
-
-# Username for connection to nova in admin context
nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-{% if NOVA_ADMIN_TENANT_ID|default('') %}
-nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
-{% endif %}
-# Password for connection to nova in admin context.
nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
+nova_admin_auth_url = http://{{ internal_vip.ip }}:35357/v3
send_events_interval = 2
-# ======== end of neutron nova interactions ==========
-
[quotas]
-# Default driver to use for quota checks
quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
quota_security_group_rule = 1000
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
report_interval = 30
-# =========== end of items for agent management extension =====
-
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = neutron
+password = {{ NEUTRON_PASS }}
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = neutron
@@ -402,72 +74,39 @@ admin_password = {{ NEUTRON_PASS }}
signing_dir = $state_path/keystone-signing
[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
idle_timeout = 30
use_db_reconnect = True
-
-# If set, use this value for max_overflow with sqlalchemy
max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
pool_timeout = 10
[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
service_provider=FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewllDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
{% if enable_fwaas %}
[fwaas]
driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
enabled = True
{% endif %}
+
+[nova]
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = nova
+password = {{ NOVA_PASS }}
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_port = 5672
+rabbit_userid = {{ RABBIT_USER }}
diff --git a/deploy/adapters/ansible/openstack/templates/nova.conf b/deploy/adapters/ansible/openstack/templates/nova.conf
index 52773598..4a7bb0a2 100644
--- a/deploy/adapters/ansible/openstack/templates/nova.conf
+++ b/deploy/adapters/ansible/openstack/templates/nova.conf
@@ -7,78 +7,113 @@
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
+log-dir=/var/log/nova
state_path=/var/lib/nova
-lock_path=/var/lib/nova/tmp
force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
+verbose={{ VERBOSE }}
ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
enabled_apis=osapi_compute,metadata
-default_floating_pool={{ public_net_info.network }}
auth_strategy = keystone
+my_ip = {{ internal_ip }}
+use_neutron = True
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+transport_url = rabbit://{{ RABBIT_USER }}:{{ RABBIT_PASS }}@{{ rabbit_host }}
+default_floating_pool={{ public_net_info.network }}
+metadata_listen={{ internal_ip }}
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+iscsi_helper=tgtadm
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+debug={{ DEBUG }}
+volumes_path=/var/lib/nova/volumes
rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
osapi_compute_listen={{ internal_ip }}
-metadata_listen={{ internal_ip }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
security_group_api = neutron
-
instance_usage_audit = True
instance_usage_audit_period = hour
notify_on_state_change = vm_and_task_state
notification_driver = nova.openstack.common.notifier.rpc_notifier
notification_driver = ceilometer.compute.nova_notifier
-
memcached_servers = {{ memcached_servers }}
[database]
# The SQLAlchemy connection string used to connect to the database
connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
idle_timeout = 30
+pool_timeout = 10
use_db_reconnect = True
+
+[api_database]
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova_api
+idle_timeout = 30
pool_timeout = 10
+use_db_reconnect = True
+
+[cinder]
+os_region_name = RegionOne
+
+[oslo_concurrency]
+lock_path=/var/lib/nova/tmp
+
+[libvirt]
+use_virtio_for_bridges=True
+
+[wsgi]
+api_paste_config=/etc/nova/api-paste.ini
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = nova
+password = {{ NOVA_PASS }}
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = nova
admin_password = {{ NOVA_PASS }}
-memcached_servers = {{ memcached_servers }}
+
+[vnc]
+enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
[glance]
+api_servers = http://{{ internal_vip.ip }}:9292
host = {{ internal_vip.ip }}
[neutron]
url = http://{{ internal_vip.ip }}:9696
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+region_name = RegionOne
+project_name = service
+username = neutron
+password = {{ NEUTRON_PASS }}
+service_metadata_proxy = True
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
auth_strategy = keystone
admin_tenant_name = service
admin_username = neutron
admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
-service_metadata_proxy = True
-metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+admin_auth_url = http://{{ internal_vip.ip }}:35357/v3
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_port = 5672
+rabbit_userid = {{ RABBIT_USER }}
+
diff --git a/deploy/adapters/ansible/openstack_juno/.gitkeep b/deploy/adapters/ansible/openstack_juno/.gitkeep
deleted file mode 100644
index e69de29b..00000000
--- a/deploy/adapters/ansible/openstack_juno/.gitkeep
+++ /dev/null
diff --git a/deploy/adapters/ansible/openstack_kilo/.gitkeep b/deploy/adapters/ansible/openstack_kilo/.gitkeep
deleted file mode 100644
index e69de29b..00000000
--- a/deploy/adapters/ansible/openstack_kilo/.gitkeep
+++ /dev/null
diff --git a/deploy/adapters/ansible/openstack_liberty/.gitkeep b/deploy/adapters/ansible/openstack_liberty/.gitkeep
deleted file mode 100644
index e69de29b..00000000
--- a/deploy/adapters/ansible/openstack_liberty/.gitkeep
+++ /dev/null
diff --git a/deploy/adapters/ansible/openstack_liberty/roles/odl_cluster/tasks/odl_controller.yml b/deploy/adapters/ansible/openstack_liberty/roles/odl_cluster/tasks/odl_controller.yml
deleted file mode 100755
index 3c7032d1..00000000
--- a/deploy/adapters/ansible/openstack_liberty/roles/odl_cluster/tasks/odl_controller.yml
+++ /dev/null
@@ -1,252 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: install controller packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: controller_packages | union(controller_packages_noarch)
-
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: download oracle-jdk8 package file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }}
-
-#"
-
-- name: upload install_jdk8 scripts
- unarchive: src=install_jdk8.tar dest=/opt/
-
-- name: install install_jdk8 package
- command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh"
-
-#- name: install opendaylight packages
-# apt: name={{ item }} state=present
-# with_items:
-# - openjdk-8-jdk
-
-#- name: create odl directories
-# file:
-# path: /opt/opendaylight-0.2.2
-# state: "directory"
-# group: root
-# owner: root
-# mode: 0755
-
-- name: create odl group
- group: name=odl system=yes state=present
-
-- name: create odl user
- user:
- name: odl
- group: odl
- home: "{{ odl_home }}"
- createhome: "yes"
- system: "yes"
- shell: "/bin/false"
-
-#- name: get image http server
-# shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
-# register: http_server
-
-- name: download odl package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/odl/{{ odl_pkg_url }}" dest=/opt/{{ odl_pkg_name }}
-
-# "
-
-#- name: download odl package
-# get_url: url={{ odl_pkg_url }} dest=/opt/{{ odl_pkg_name }}
-
-# TODO: unarchive doesn't support strip-component at the moment
-# TODO: switch to use untar after support is added.
-- name: extract odl package
-# unarchive: src=/opt/{{ odl_pkg_name }} dest={{ odl_home }} group=odl owner=odl mode=0775 copy=no
- command: su -s /bin/sh -c "tar xzf /opt/{{ odl_pkg_name }} -C {{ odl_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" odl
-# notify:
-# - restart odl service
-
-- name: opendaylight system file
- template:
- src: "{{ service_file.src }}"
- dest: "{{ service_file.dst }}"
- mode: 0755
-
-- name: set l3 fwd enable in custom.properties
- template:
- src: custom.properties
- dest: "{{ odl_home }}/etc/custom.properties"
- owner: odl
- group: odl
- mode: 0775
- when: odl_l3_agent == "Enable"
-
-- name: create karaf config
- template:
- src: org.apache.karaf.features.cfg
- dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg"
- owner: odl
- group: odl
- mode: 0775
-
-- name: create tomcat config
- template:
- src: tomcat-server.xml
- dest: "{{ odl_home }}/configuration/tomcat-server.xml"
-
-- name: install odl pip packages
- pip: name={{ item }} state=present version=1.0.1
- with_items: odl_pip
-
-
-#- name: restart odl service
-# service: name=opendaylight state=started pattern="opendaylight"
-
-##########################################################################################################
-################################# OpenDayLight Cluster Configuration #################################
-##########################################################################################################
-#- name: create initial directory
-# shell: >
-# mkdir -p {{ odl_home }}/configuration/initial;
-
-#- name: create akka config
-# template:
-# src: akka.conf
-# dest: "{{ odl_home }}/configuration/initial/akka.conf"
-# notify:
-# - restart odl service
-
-
-#- name: create module-shards config
-# template:
-# src: module-shards.conf
-# dest: "{{ odl_home }}/configuration/initial/module-shards.conf"
-# notify:
-# - restart odl service
-
-#- name: copy Jolokia-OSGi config
-# shell: >
-# cp -r jolokia {{ odl_home }}system/org/;
-
-#- name: copy Jolokia-OSGi config
-# template:
-# src: jolokia
-# dest: "{{ odl_home }}/system/org/"
-# notify:
-# - restart odl service
-
-
-#- name: mkdir Jolokia-OSGi directory
-# shell: >
-# mkdir -p {{ odl_home }}system/org/jolokia;
-# mkdir -p {{ odl_home }}system/org/jolokia/jolokia-osgi;
-# mkdir -p {{ odl_home }}system/org/jolokia/jolokia-osgi/1.1.5;
-
-
-#- name: copy Jolokia-OSGi config
-# template: src={{ item.src }} dest={{ item.dest }}
-# with_items:
-# - src: "jolokia-osgi-1.1.5-features.xml"
-# dest: "{{ odl_home }}/system/org/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5-features.xml"
-# - src: "jolokia-osgi-1.1.5.jar.sha1"
-# dest: "{{ odl_home }}/system/org/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5.jar.sha1"
-# - src: "jolokia-osgi-1.1.5.jar"
-# dest: "{{ odl_home }}/system/org/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5.jar"
-
-#- name: copy Jolokia-OSGi jar config
-# copy: src=roles/odl_cluster/templates/jolokia-osgi-1.1.5.jar dest="{{ odl_home }}/system/org/jolokia/jolokia-osgi/1.1.5/"
-
-- name: remove karaf data directory
- shell: rm -rf {{ odl_home }}/data/*;
-
-#- name: chown OpenDaylight Directory and Files
-# shell: >
-# chown -R odl:odl "{{ odl_home }}";
-# chown odl:odl "{{ service_file.dst }}";
-
-
-##########################################################################################################
-################################ OpenDayLight connect with OpenStack ################################
-##########################################################################################################
-- name: turn off neutron-server neutron-plugins-openvswitch-agent Daemon on control node
- shell: >
- sed -i '/{{ service_ovs_agent_name }}/d' /opt/service ;
- sed -i '/neutron-server/d' /opt/service;
- sed -i '/keepalived/d' /opt/service;
-
-- name: turn off neutron-server on control node
- service: name=neutron-server state=stopped
-
-- name: turn off keepalived on control node
- service: name=keepalived state=stopped
- when: ansible_os_family == "Debian"
-
-- name: chown opendaylight directory and files
- shell: >
- chown -R odl:odl "{{ odl_home }}";
- chown odl:odl "{{ service_file.dst }}";
-
-- name: start opendaylight
- service: name=opendaylight state=started
- when: ansible_os_family == "Debian"
-
-- name: set opendaylight autostart
- shell: chkconfig opendaylight on
- when: ansible_os_family == "RedHat"
-
-- name: start opendaylight
- shell: service opendaylight start
- when: ansible_os_family == "RedHat"
-
-- name: check if opendaylight running
- shell: netstat -lpen --tcp | grep java | grep 6653; while [ $? -ne 0 ]; do sleep 10; netstat -lpen --tcp | grep java | grep 6653; done
-
-- name: run openvswitch script
- include: openvswitch.yml
-
-#- name: Configure Neutron1
-# shell: >
-# crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight;
-# crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
-
-#- name: Create ML2 Configuration File
-# template:
-# src: ml2_conf.sh
-# dest: "/opt/ml2_conf.sh"
-# mode: 0777
-
-#- name: Execute ML2 Configuration File
-# command: su -s /bin/sh -c "/opt/ml2_conf.sh;"
-
-
-- name: configure l2 configuration
- shell: crudini --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge br-prv;
- when: odl_l3_agent == "Disable"
-
-- name: configure l3 configuration
- shell: crudini --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge br-ex;
- when: odl_l3_agent == "Enable"
-
-- name: configure odl l3 driver
- shell: crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin;
- when: odl_l3_agent == "Enable"
-
-- name: configure metadata for l3 configuration
- shell: crudini --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata True;
- when: odl_l3_agent == "Enable"
-
-- name: drop and recreate neutron database
- shell: mysql -e "drop database if exists neutron;";
- mysql -e "create database neutron character set utf8;";
- mysql -e "grant all on neutron.* to 'neutron'@'%' identified by '{{ NEUTRON_DBPASS }}';";
- su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron;
- when: inventory_hostname == haproxy_hosts.keys()[0]
- tags:
- - test_odl
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/.gitkeep b/deploy/adapters/ansible/openstack_mitaka/.gitkeep
deleted file mode 100644
index e69de29b..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/.gitkeep
+++ /dev/null
diff --git a/deploy/adapters/ansible/openstack_mitaka/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack_mitaka/HA-ansible-multinodes.yml
deleted file mode 100644
index c04445d8..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/HA-ansible-multinodes.yml
+++ /dev/null
@@ -1,265 +0,0 @@
----
-- hosts: all
- remote_user: root
- pre_tasks:
- - name: make sure ssh dir exist
- file:
- path: '{{ item.path }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- state: directory
- mode: 0755
- with_items:
- - path: /root/.ssh
- owner: root
- group: root
-
- - name: write ssh config
- copy:
- content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no"
- dest: '{{ item.dest }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- mode: 0600
- with_items:
- - dest: /root/.ssh/config
- owner: root
- group: root
-
- - name: generate ssh keys
- shell: if [ ! -f ~/.ssh/id_rsa.pub ]; then ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ""; else echo "already gen ssh key!"; fi;
-
- - name: fetch ssh keys
- fetch: src=/root/.ssh/id_rsa.pub dest=/tmp/ssh-keys-{{ ansible_hostname }} flat=yes
-
- - authorized_key:
- user: root
- key: "{{ lookup('file', 'item') }}"
- with_fileglob:
- - /tmp/ssh-keys-*
- max_fail_percentage: 0
- roles:
- - common
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - setup-network
-
-- hosts: ha
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ha
-
-- hosts: controller
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - memcached
- - apache
- - database
- - mq
- - keystone
- - nova-controller
- - neutron-controller
- - cinder-controller
- - glance
- - neutron-common
- - neutron-network
- - ceilometer_controller
-# - ext-network
- - dashboard
- - heat
- - aodh
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - storage
-
-- hosts: compute
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - nova-compute
- - neutron-compute
- - cinder-volume
- - ceilometer_compute
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - secgroup
-
-- hosts: ceph_adm
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles: []
- # - ceph-deploy
-
-- hosts: ceph
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-purge
- - ceph-config
-
-- hosts: ceph_mon
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-mon
-
-- hosts: ceph_osd
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-osd
-
-- hosts: ceph
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-openstack
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - monitor
-
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- tasks:
- - name: set bash to nova
- user:
- name: nova
- shell: /bin/bash
-
- - name: make sure ssh dir exist
- file:
- path: '{{ item.path }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- state: directory
- mode: 0755
- with_items:
- - path: /var/lib/nova/.ssh
- owner: nova
- group: nova
-
- - name: copy ssh keys for nova
- shell: cp -rf /root/.ssh/id_rsa /var/lib/nova/.ssh;
-
- - name: write ssh config
- copy:
- content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no"
- dest: '{{ item.dest }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- mode: 0600
- with_items:
- - dest: /var/lib/nova/.ssh/config
- owner: nova
- group: nova
-
- - authorized_key:
- user: nova
- key: "{{ lookup('file', 'item') }}"
- with_fileglob:
- - /tmp/ssh-keys-*
-
- - name: chown ssh file
- shell: chown -R nova:nova /var/lib/nova/.ssh;
-
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - odl_cluster
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - onos_cluster
-
-- hosts: all
- remote_user: root
- sudo: True
- max_fail_percentage: 0
- roles:
- - open-contrail
-
-- hosts: all
- remote_user: root
- accelerate: true
- serial: 1
- max_fail_percentage: 0
- roles:
- - odl_cluster_neutron
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - odl_cluster_post
-
-- hosts: controller
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ext-network
-
-- hosts: controller
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - tacker
-
-- hosts: controller
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - boot-recovery
-
-- hosts: controller
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - controller-recovery
-
-- hosts: compute
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - compute-recovery
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/tasks/aodh_install.yml b/deploy/adapters/ansible/openstack_mitaka/roles/aodh/tasks/aodh_install.yml
deleted file mode 100644
index eb51fbea..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/tasks/aodh_install.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: install aodh packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: update aodh conf
- template: src={{ item }} dest=/etc/aodh/aodh.conf
- backup=yes
- with_items:
- - aodh.conf.j2
-# - api_paste.ini.j2
-# - policy.json.j2
- notify:
- - restart aodh services
-
-- name: write services to monitor list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: remove default sqlite db
- shell: rm /var/lib/aodh/aodh.sqlite || touch aodh.sqllite.db.removed
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/aodh.conf.j2 b/deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/aodh.conf.j2
deleted file mode 100644
index 752dd0f0..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/aodh.conf.j2
+++ /dev/null
@@ -1,46 +0,0 @@
-{% set memcached_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-
-[DEFAULT]
-bind_host = {{ internal_ip }}
-bind_port = 8042
-rpc_backend = rabbit
-auth_strategy = keystone
-debug = True
-
-[oslo_messaging_rabbit]
-rabbit_hosts = {{ internal_vip.ip }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-#rabbit_use_ssl = false
-
-[database]
-connection = mysql://aodh:{{ AODH_DBPASS }}@{{ db_host }}/aodh
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
-auth_url = http://{{ internal_vip.ip }}:35357
-identity_uri = http://{{ internal_vip.ip }}:35357
-auth_plugin = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = aodh
-password = {{ AODH_PASS }}
-memcached_servers = {{ memcached_servers }}
-token_cache_time = 300
-revocation_cache_time = 60
-
-[service_credentials]
-os_auth_url = http://{{ internal_vip.ip }}:5000/v2.0
-os_username = aodh
-os_tenant_name = service
-os_password = {{ AODH_PASS }}
-os_endpoint_type = internalURL
-os_region_name = RegionOne
-
-[api]
-host = {{ internal_ip }}
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/api_paste.ini.j2 b/deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/api_paste.ini.j2
deleted file mode 100644
index 151789c4..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/api_paste.ini.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-# aodh API WSGI Pipeline
-# Define the filters that make up the pipeline for processing WSGI requests
-# Note: This pipeline is PasteDeploy's term rather than aodh's pipeline
-# used for processing samples
-
-# Remove authtoken from the pipeline if you don't want to use keystone authentication
-[pipeline:main]
-pipeline = cors request_id authtoken api-server
-
-[app:api-server]
-paste.app_factory = aodh.api.app:app_factory
-
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-oslo_config_project = aodh
-
-[filter:request_id]
-paste.filter_factory = oslo_middleware:RequestId.factory
-
-[filter:cors]
-paste.filter_factory = oslo_middleware.cors:filter_factory
-oslo_config_project = aodh
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/policy.json.j2 b/deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/policy.json.j2
deleted file mode 100644
index 4fd873e9..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/policy.json.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-{
- "context_is_admin": "role:admin",
- "segregation": "rule:context_is_admin",
- "admin_or_owner": "rule:context_is_admin or project_id:%(project_id)s",
- "default": "rule:admin_or_owner",
-
- "telemetry:get_alarm": "rule:admin_or_owner",
- "telemetry:get_alarms": "rule:admin_or_owner",
- "telemetry:query_alarm": "rule:admin_or_owner",
-
- "telemetry:create_alarm": "",
- "telemetry:change_alarm": "rule:admin_or_owner",
- "telemetry:delete_alarm": "rule:admin_or_owner",
-
- "telemetry:get_alarm_state": "rule:admin_or_owner",
- "telemetry:change_alarm_state": "rule:admin_or_owner",
-
- "telemetry:alarm_history": "rule:admin_or_owner",
- "telemetry:query_alarm_history": "rule:admin_or_owner"
-}
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka/roles/aodh/vars/Debian.yml
deleted file mode 100644
index bdf4655e..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/vars/Debian.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-#############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#############################################################################
----
-packages:
- - aodh-api
- - aodh-evaluator
- - aodh-notifier
- - aodh-listener
- - aodh-expirer
- - python-ceilometerclient
-
-services:
- - aodh-api
- - aodh-notifier
- - aodh-evaluator
- - aodh-listener
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/vars/RedHat.yml b/deploy/adapters/ansible/openstack_mitaka/roles/aodh/vars/RedHat.yml
deleted file mode 100644
index a0381c6b..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/vars/RedHat.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-#############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#############################################################################
----
-packages:
- - openstack-aodh-api
- - openstack-aodh-evaluator
- - openstack-aodh-notifier
- - openstack-aodh-listener
- - openstack-aodh-expirer
- - python-ceilometerclient
-
-services:
- - openstack-aodh-api
- - openstack-aodh-notifier
- - openstack-aodh-evaluator
- - openstack-aodh-listener
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/apache/files/index.html b/deploy/adapters/ansible/openstack_mitaka/roles/apache/files/index.html
deleted file mode 100644
index f083c4f1..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/apache/files/index.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
- <head>
- <title>Index</title>
- </head>
- <body>
- <a href="/horizon">Openstack Dashboard</a>
- </body>
-</html>
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/apache/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/apache/tasks/main.yml
deleted file mode 100755
index 44407bef..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/apache/tasks/main.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: install packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes"
- with_items: packages | union(packages_noarch)
-
-- name: assure listen port exist
- template:
- dest: '{{ apache_config_dir }}/ports.conf'
- src: ports.conf.j2
- notify:
- - restart apache related services
-
-- name: remove default listen port on centos
- lineinfile:
- dest: /etc/httpd/conf/httpd.conf
- state: absent
- regexp: 'Listen 80'
- when: ansible_os_family == 'RedHat'
-
-- name: copy index.html file
- copy: src=index.html dest=/var/www/html/index.html mode=0644
- when: ansible_os_family == 'RedHat'
-
-- name: copy index.html file
- copy: src=index.html dest=/var/www/index.html mode=0644
- when: ansible_os_family == 'Debian'
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/ceilometer_controller/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka/roles/ceilometer_controller/vars/Debian.yml
deleted file mode 100644
index b749ffaa..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/ceilometer_controller/vars/Debian.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-ceilometer_packages:
- - ceilometer-api
- - ceilometer-collector
- - ceilometer-agent-central
- - ceilometer-agent-notification
-# - ceilometer-alarm-evaluator
-# - ceilometer-alarm-notifier
- - python-ceilometerclient
-
-ceilometer_services:
- - ceilometer-agent-central
- - ceilometer-agent-notification
- - ceilometer-api
- - ceilometer-collector
-# - ceilometer-alarm-evaluator
-# - ceilometer-alarm-notifier
-
-ceilometer_configs_templates:
- - src: ceilometer.j2
- dest:
- - /etc/ceilometer/ceilometer.conf
- - src: cinder.j2
- dest:
- - /etc/cinder/cinder.conf
- - src: glance.j2
- dest:
- - /etc/glance/glance-api.conf
- - /etc/glance/glance-registry.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/ceilometer_controller/vars/RedHat.yml b/deploy/adapters/ansible/openstack_mitaka/roles/ceilometer_controller/vars/RedHat.yml
deleted file mode 100644
index 6c5f53ec..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/ceilometer_controller/vars/RedHat.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-ceilometer_packages:
- - openstack-ceilometer-api
- - openstack-ceilometer-collector
- - openstack-ceilometer-central
- - openstack-ceilometer-notification
-# - openstack-ceilometer-alarm
- - python-ceilometerclient
-
-ceilometer_services:
- - openstack-ceilometer-central
- - openstack-ceilometer-notification
- - openstack-ceilometer-api
- - openstack-ceilometer-collector
-# - openstack-ceilometer-alarm-evaluator
-# - openstack-ceilometer-alarm-notifier
-
-ceilometer_configs_templates:
- - src: ceilometer.j2
- dest:
- - /etc/ceilometer/ceilometer.conf
- - src: cinder.j2
- dest:
- - /etc/cinder/cinder.conf
- - src: glance.j2
- dest:
- - /etc/glance/glance-api.conf
- - /etc/glance/glance-registry.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/ceph-mon/tasks/install_mon.yml b/deploy/adapters/ansible/openstack_mitaka/roles/ceph-mon/tasks/install_mon.yml
deleted file mode 100644
index 0ad666a6..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/ceph-mon/tasks/install_mon.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: Create a default data directory
- file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}" state="directory"
-
-- name: Populate the monitor daemon
- shell: "ceph-mon --mkfs -i {{ inventory_hostname }} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring"
-
-- name: Change ceph/mon dir owner to ceph
- shell: "chown -R ceph:ceph /var/lib/ceph/mon"
- when: ansible_os_family == "Debian"
-
-- name: Touch the done and auto start file
- file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}/{{ item }}" state="touch"
- with_items:
- - "done"
- - "{{ ceph_start_type }}"
-
-- name: start mon daemon
- shell: "{{ ceph_start_script }}"
-
-- name: wait for creating osd keyring
- wait_for: path=/var/lib/ceph/bootstrap-osd/ceph.keyring
-
-- name: fetch osd keyring
- fetch: src="/var/lib/ceph/bootstrap-osd/ceph.keyring" dest="/tmp/ceph.osd.keyring" flat=yes
- run_once: True
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/ceph-openstack/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/ceph-openstack/tasks/main.yml
deleted file mode 100644
index 06c3acb6..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/ceph-openstack/tasks/main.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- include_vars: "{{ ansible_os_family }}.yml"
- tags:
- - ceph_deploy
- - ceph_openstack_pre
- - ceph_openstack_conf
- - ceph_openstack_post
- - ceph_openstack
-
-- include: ceph_openstack_pre.yml
- tags:
- - ceph_deploy
- - ceph_openstack_pre
- - ceph_openstack
-
-- include: ceph_openstack_conf.yml
- tags:
- - ceph_deploy
- - ceph_openstack_conf
- - ceph_openstack
-
-- include: ceph_openstack_post.yml
- tags:
- - ceph_deploy
- - ceph_openstack_post
- - ceph_openstack
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/ceph-osd/tasks/install_osd.yml b/deploy/adapters/ansible/openstack_mitaka/roles/ceph-osd/tasks/install_osd.yml
deleted file mode 100644
index 35e84cf8..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/ceph-osd/tasks/install_osd.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: create osd lv and mount it on /var/local/osd
- script: create_osd.sh
-
-- name: fetch osd keyring from ceph_adm
- fetch: src="/var/lib/ceph/bootstrap-osd/ceph.keyring" dest="/tmp/ceph.osd.keyring" flat=yes
- delegate_to: "{{ public_vip.ip }}"
- when: compute_expansion
-
-- name: copy osd keyring
- copy: src="/tmp/ceph.osd.keyring" dest="/var/lib/ceph/bootstrap-osd/ceph.keyring"
-
-- name: prepare osd disk
- shell: ceph-disk prepare --fs-type xfs /var/local/osd
-
-- name: change local/osd dir owner to ceph
- shell: chown ceph:ceph /var/local/osd
- when: ansible_os_family == "Debian"
-
-- name: activate osd node
- shell: ceph-disk activate /var/local/osd
-
-- name: enable ceph service
- service: name=ceph enabled=yes
-
-- name: rebuild osd after reboot
- lineinfile: dest=/etc/init/ceph-osd-all-starter.conf insertafter="^task" line="pre-start script\n set -e\n /opt/setup_storage/losetup.sh\n sleep 3\n mount /dev/storage-volumes/ceph0 /var/local/osd\nend script"
- when: ansible_os_family == "Debian"
-
-- name: rebuild osd after reboot for centos
- lineinfile: dest=/etc/init.d/ceph insertafter="^### END INIT INFO" line="\nsleep 1\nmount /dev/storage-volumes/ceph0 /var/local/osd"
- when: ansible_os_family == "RedHat"
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/ceph-purge/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/ceph-purge/tasks/main.yml
deleted file mode 100644
index 02013762..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/ceph-purge/tasks/main.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- name: clear tmp files
- local_action: shell rm -rf /tmp/ceph*
- tags:
- - ceph_purge
- - ceph_deploy
-
-- name: install ceph-related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items:
- - ceph-deploy
- tags:
- - ceph_purge
- - ceph_deploy
- when: ansible_os_family == "Debian"
-
-- name: purge ceph
- shell: "ceph-deploy purge {{ inventory_hostname }}; ceph-deploy purgedata {{ inventory_hostname }}; ceph-deploy forgetkeys"
- tags:
- - ceph_purge
- - ceph_deploy
- when: ansible_os_family == "Debian"
-
-- name: remove monmap
- file: path="/tmp/monmap" state="absent"
- tags:
- - ceph_purge
- - ceph_deploy
-
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/congress/files/congress.conf b/deploy/adapters/ansible/openstack_mitaka/roles/congress/files/congress.conf
deleted file mode 100755
index 22a64a66..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/congress/files/congress.conf
+++ /dev/null
@@ -1,37 +0,0 @@
-description "OpenStack Congress Server"
-author "Thomas Goirand <zigo@debian.org>"
-
-start on runlevel [2345]
-stop on runlevel [!2345]
-
-chdir /var/run
-
-respawn
-respawn limit 20 5
-limit nofile 65535 65535
-
-pre-start script
- for i in lock run log lib ; do
- mkdir -p /var/$i/congress
- chown root /var/$i/congress
- done
-end script
-
-script
- [ -x "/usr/local/bin/congress-server" ] || exit 0
- DAEMON_ARGS=""
- CONFIG_FILE="/etc/congress/congress.conf"
- USE_SYSLOG=""
- USE_LOGFILE=""
- NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG=""
- [ -r /etc/default/openstack ] && . /etc/default/openstack
- [ -r /etc/default/$UPSTART_JOB ] && . /etc/default/$UPSTART_JOB
- [ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog"
- [ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=/var/log/congress/congress.log"
- [ -z "$NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG" ] && DAEMON_ARGS="$DAEMON_ARGS --config-file=$CONFIG_FILE"
-
- exec start-stop-daemon --start --chdir /var/lib/congress \
- --chuid root:root --make-pidfile --pidfile /var/run/congress/congress.pid \
- --exec /usr/local/bin/congress-server -- ${DAEMON_ARGS}
-end script
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/congress/files/congress.service b/deploy/adapters/ansible/openstack_mitaka/roles/congress/files/congress.service
deleted file mode 100755
index 23db7b0e..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/congress/files/congress.service
+++ /dev/null
@@ -1,19 +0,0 @@
-[Unit]
-Description=OpenStack Congress server
-After=
-
-[Service]
-User=root
-Group=root
-Type=simple
-WorkingDirectory=/var/lib/congress
-PermissionsStartOnly=true
-ExecStartPre=/bin/mkdir -p /var/lock/congress /var/log/congress /var/lib/congress
-ExecStartPre=/usr/bin/touch /var/log/congress/congress.log
-ExecStart=/usr/bin/congress-server --config-file /etc/congress/congress.conf
-Restart=on-failure
-LimitNOFILE=65535
-TimeoutStopSec=15
-
-[Install]
-WantedBy=multi-user.target
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/congress_config_debian.yml b/deploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/congress_config_debian.yml
deleted file mode 100755
index c5d7cce7..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/congress_config_debian.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-##############################################################################
-## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-##
-## All rights reserved. This program and the accompanying materials
-## are made available under the terms of the Apache License, Version 2.0
-## which accompanies this distribution, and is available at
-## http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
----
-- name: upgrade openstackclient
- pip: name=python-openstackclient state=latest
-
-- name: create congress service
- copy: src=congress.conf dest=/etc/init
-
-- name: create congress service work dir
- file: path=/var/lib/congress state=directory
-
-- name: link the congress service
- file:
- src: /etc/init/congress.conf
- dest: /etc/init.d/congress
- state: link
-
-- name: congress db sync
- shell: /usr/local/bin/congress-db-manage --config-file /etc/congress/congress.conf upgrade head
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: start congress service
- shell: service congress start
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/congress_config_redhat.yml b/deploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/congress_config_redhat.yml
deleted file mode 100755
index e922c508..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/congress_config_redhat.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-##############################################################################
-## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-##
-## All rights reserved. This program and the accompanying materials
-## are made available under the terms of the Apache License, Version 2.0
-## which accompanies this distribution, and is available at
-## http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
----
-- name: upgrade openstackclient
- pip: name=python-openstackclient state=latest
-
-- name: create congress service
- copy: src=congress.service dest=/lib/systemd/system/
-
-- name: create congress service work dir
- file: path=/var/lib/congress state=directory
-
-- name: link the congress service
- file:
- src: /lib/systemd/system/congress.service
- dest: /etc/systemd/system/multi-user.target.wants/congress.service
- state: link
-
-- name: congress db sync
- shell: /usr/bin/congress-db-manage --config-file /etc/congress/congress.conf upgrade head
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: start congress service
- shell: service congress start
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/congress_install.yml b/deploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/congress_install.yml
deleted file mode 100755
index 65daff3e..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/congress_install.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: install congress packages
- pip: name={{ item }} state=present
- with_items: packages
-
-- name: create congress etc directory
- file: path=/etc/congress state=directory
-
-- name: update congress conf
- template: src={{ item }} dest=/etc/congress/{{ item }}
- backup=yes
- with_items:
- - congress.conf
- - api-paste.ini
- - policy.json
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/main.yml
deleted file mode 100755
index 2cbd619c..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/main.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include: congress_install.yml
-
-- include: congress_db.yml
- when:
- - inventory_hostname == haproxy_hosts.keys()[0]
-
-- include: congress_config_debian.yml
- when: ansible_os_family == "Debian"
-
-- include: congress_config_redhat.yml
- when: ansible_os_family == "RedHat"
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/congress/vars/RedHat.yml b/deploy/adapters/ansible/openstack_mitaka/roles/congress/vars/RedHat.yml
deleted file mode 100755
index 15916e69..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/congress/vars/RedHat.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-##############################################################################
-## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-##
-## All rights reserved. This program and the accompanying materials
-## are made available under the terms of the Apache License, Version 2.0
-## which accompanies this distribution, and is available at
-## http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
----
-packages:
- - congress
- - python-congressclient
- - python-cloudfoundryclient
-
-service:
- - congress
-
-credentials:
- - user: congress
- db: congress
- password: "{{ CONGRESS_DBPASS }}"
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/dashboard/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka/roles/dashboard/vars/Debian.yml
deleted file mode 100644
index aaeb8cdb..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/dashboard/vars/Debian.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages: []
-
-services:
- - memcached
- - apache2
-
-apache_config_dir: /etc/apache2
-horizon_dir: /usr/share/openstack-dashboard/openstack_dashboard
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/database/templates/data.j2 b/deploy/adapters/ansible/openstack_mitaka/roles/database/templates/data.j2
deleted file mode 100644
index 66c2fead..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/database/templates/data.j2
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/sh
-mysql -uroot -Dmysql <<EOF
-drop database if exists keystone;
-drop database if exists glance;
-drop database if exists neutron;
-drop database if exists nova;
-drop database if exists cinder;
-drop database if exists heat;
-drop database if exists aodh;
-
-CREATE DATABASE keystone;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON keystone.* TO 'keystone'@'{{ host }}' IDENTIFIED BY '{{ KEYSTONE_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE glance;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON glance.* TO 'glance'@'{{ host }}' IDENTIFIED BY '{{ GLANCE_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE neutron;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON neutron.* TO 'neutron'@'{{ host }}' IDENTIFIED BY '{{ NEUTRON_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE nova;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON nova.* TO 'nova'@'{{ host }}' IDENTIFIED BY '{{ NOVA_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE cinder;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON cinder.* TO 'cinder'@'{{ host }}' IDENTIFIED BY '{{ CINDER_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE heat;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON heat.* TO 'heat'@'{{ host }}' IDENTIFIED BY '{{ HEAT_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE aodh;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON aodh.* TO 'aodh'@'{{ host }}' IDENTIFIED BY '{{ AODH_DBPASS }}';
-{% endfor %}
-
-{% if WSREP_SST_USER is defined %}
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON *.* TO '{{ WSREP_SST_USER }}'@'{{ host }}' IDENTIFIED BY '{{ WSREP_SST_PASS }}';
-{% endfor %}
-{% endif %}
-EOF
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/database/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/database/vars/main.yml
deleted file mode 100644
index a32897f0..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/database/vars/main.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch: []
-
-services_noarch:
- - mysql
-
-credentials:
- - user: keystone
- db: keystone
- password: "{{ KEYSTONE_DBPASS }}"
- - user: neutron
- db: neutron
- password: "{{ NEUTRON_DBPASS }}"
- - user: glance
- db: glance
- password: "{{ GLANCE_DBPASS }}"
- - user: nova
- db: nova_api
- password: "{{ NOVA_DBPASS }}"
- - user: nova
- db: nova
- password: "{{ NOVA_DBPASS }}"
- - user: cinder
- db: cinder
- password: "{{ CINDER_DBPASS }}"
- - user: heat
- db: heat
- password: "{{ HEAT_DBPASS }}"
- - user: aodh
- db: aodh
- password: "{{ AODH_DBPASS }}"
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/handlers/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/handlers/main.yml
deleted file mode 100644
index 36e39072..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/handlers/main.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart neutron-plugin-openvswitch-agent
- service: name=neutron-openvswitch-agent state=restarted enabled=yes
- when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: restart neutron-l3-agent
- service: name=neutron-l3-agent state=restarted enabled=yes
-
-- name: kill dnsmasq
- command: killall dnsmasq
- ignore_errors: True
-
-- name: restart neutron-dhcp-agent
- service: name=neutron-dhcp-agent state=restarted enabled=yes
-
-- name: restart neutron-metadata-agent
- service: name=neutron-metadata-agent state=restarted enabled=yes
-
-- name: restart xorp
- service: name=xorp state=restarted enabled=yes sleep=10
- ignore_errors: True
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/tasks/main.yml
deleted file mode 100644
index b52b9178..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/tasks/main.yml
+++ /dev/null
@@ -1,56 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-# FIXME: temporary workaround for openstack api access random failure
-- name: restart api server
- service: name={{ item }} state=restarted enabled=yes
- with_items: api_services | union(api_services_noarch)
-
-- name: restart neutron server
- service: name=neutron-server state=restarted enabled=yes
-
-- name: wait for neutron time
- shell: "sleep 10"
-
-- name: create external net
- neutron_network:
- login_username: ADMIN
- login_password: "{{ ADMIN_PASS }}"
- login_tenant_name: admin
- auth_url: "http://{{ internal_vip.ip }}:35357/v2.0"
- name: "{{ public_net_info.network }}"
- provider_network_type: "{{ public_net_info.type }}"
- provider_physical_network: "{{ public_net_info.provider_network }}"
- provider_segmentation_id: "{{ public_net_info.segment_id}}"
- shared: false
- router_external: yes
- state: present
- run_once: true
- when: 'public_net_info.enable == True'
-
-- name: create external subnet
- neutron_subnet:
- login_username: ADMIN
- login_password: "{{ ADMIN_PASS }}"
- login_tenant_name: admin
- auth_url: "http://{{ internal_vip.ip }}:35357/v2.0"
- name: "{{ public_net_info.subnet }}"
- network_name: "{{ public_net_info.network }}"
- cidr: "{{ public_net_info.floating_ip_cidr }}"
- enable_dhcp: "{{ public_net_info.enable_dhcp }}"
- no_gateway: "{{ public_net_info.no_gateway }}"
- gateway_ip: "{{ public_net_info.external_gw }}"
- allocation_pool_start: "{{ public_net_info.floating_ip_start }}"
- allocation_pool_end: "{{ public_net_info.floating_ip_end }}"
- state: present
- run_once: true
- when: 'public_net_info.enable == True'
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/glance/tasks/nfs.yml b/deploy/adapters/ansible/openstack_mitaka/roles/glance/tasks/nfs.yml
deleted file mode 100644
index 9dc72e31..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/glance/tasks/nfs.yml
+++ /dev/null
@@ -1,68 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: install nfs packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: nfs_packages
-
-- name: install nfs
- local_action: yum name={{ item }} state=present
- with_items:
- - rpcbind
- - nfs-utils
- run_once: True
-
-- name: create image directory
- local_action: file path=/opt/images state=directory mode=0777
- run_once: True
-
-- name: remove nfs config item if exist
- local_action: lineinfile dest=/etc/exports state=absent
- regexp="^/opt/images"
- run_once: True
-
-- name: update nfs config
- local_action: lineinfile dest=/etc/exports state=present
- line="/opt/images *(rw,insecure,sync,all_squash)"
- run_once: True
-
-- name: restart compass nfs service
- local_action: service name={{ item }} state=restarted enabled=yes
- with_items:
- - rpcbind
- - nfs-server
- run_once: True
-
-- name: get mount info
- command: mount
- register: mount_info
- tags:
- - recovery
-
-- name: get nfs server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: ip_info
- tags:
- - recovery
-
-- name: restart host nfs service
- service: name={{ item }} state=restarted enabled=yes
- with_items: '{{ nfs_services }}'
-
-- name: mount image directory
- shell: |
- mkdir -p /var/lib/glance/images
- mount -t nfs -onfsvers=3 {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images
- sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
- #echo {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab
- when: mount_info.stdout.find('images') == -1
- retries: 5
- delay: 3
- tags:
- - recovery
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/glance/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka/roles/glance/vars/Debian.yml
deleted file mode 100644
index d1825012..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/glance/vars/Debian.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - glance
- - nfs-common
-
-nfs_packages:
- - nfs-common
-
-nfs_services: []
-
-services:
- - glance-registry
- - glance-api
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/glance/vars/RedHat.yml b/deploy/adapters/ansible/openstack_mitaka/roles/glance/vars/RedHat.yml
deleted file mode 100644
index 2987d0c4..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/glance/vars/RedHat.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - openstack-glance
- - rpcbind
-
-nfs_packages:
- - nfs-utils
- - rpcbind
-
-nfs_services:
- - rpcbind
-
-services:
- - openstack-glance-api
- - openstack-glance-registry
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/ha/templates/haproxy.cfg b/deploy/adapters/ansible/openstack_mitaka/roles/ha/templates/haproxy.cfg
deleted file mode 100755
index 5fbcc9d9..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/ha/templates/haproxy.cfg
+++ /dev/null
@@ -1,227 +0,0 @@
-
-global
- #chroot /var/run/haproxy
- daemon
- user haproxy
- group haproxy
- maxconn 4000
- pidfile /var/run/haproxy/haproxy.pid
- #log 127.0.0.1 local0
- tune.bufsize 1000000
- stats socket /var/run/haproxy.sock
- stats timeout 2m
-
-defaults
- log global
- maxconn 8000
- option redispatch
- option dontlognull
- option splice-auto
- timeout http-request 10s
- timeout queue 1m
- timeout connect 10s
- timeout client 50s
- timeout server 50s
- timeout check 10s
- retries 3
-
-listen proxy-mysql
- bind {{ internal_vip.ip }}:3306
- option tcpka
- option tcplog
- balance source
-{% for host, ip in haproxy_hosts.items() %}
-{% if loop.index == 1 %}
- server {{ host }} {{ ip }}:3306 weight 1 check inter 2000 rise 2 fall 5
-{% else %}
- server {{ host }} {{ ip }}:3306 weight 1 check inter 2000 rise 2 fall 5 backup
-{% endif %}
-{% endfor %}
-
-listen proxy-rabbit
- bind {{ internal_vip.ip }}:5672
- bind {{ public_vip.ip }}:5672
-
- option tcpka
- option tcplog
- timeout client 3h
- timeout server 3h
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:5672 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-glance_registry_cluster
- bind {{ internal_vip.ip }}:9191
- bind {{ public_vip.ip }}:9191
- option tcpka
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:9191 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-glance_api_cluster
- bind {{ internal_vip.ip }}:9292
- bind {{ public_vip.ip }}:9292
- option tcpka
- option tcplog
- option httpchk
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:9292 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-nova-novncproxy
- bind {{ internal_vip.ip }}:6080
- bind {{ public_vip.ip }}:6080
- option tcpka
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:6080 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-network
- bind {{ internal_vip.ip }}:9696
- bind {{ public_vip.ip }}:9696
- option tcpka
- option tcplog
- balance source
- option httpchk
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:9696 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-volume
- bind {{ internal_vip.ip }}:8776
- bind {{ public_vip.ip }}:8776
- option tcpka
- option httpchk
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-keystone_admin_cluster
- bind {{ internal_vip.ip }}:35357
- bind {{ public_vip.ip }}:35357
- option tcpka
- option httpchk
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:35357 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-keystone_public_internal_cluster
- bind {{ internal_vip.ip }}:5000
- bind {{ public_vip.ip }}:5000
- option tcpka
- option httpchk
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:5000 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-nova_compute_api_cluster
- bind {{ internal_vip.ip }}:8774
- bind {{ public_vip.ip }}:8774
- mode tcp
- option httpchk
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8774 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-nova_metadata_api_cluster
- bind {{ internal_vip.ip }}:8775
- bind {{ public_vip.ip }}:8775
- option tcpka
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8775 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-cinder_api_cluster
- bind {{ internal_vip.ip }}:8776
- bind {{ public_vip.ip }}:8776
- mode tcp
- option httpchk
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-#listen proxy-swift-proxy
-# bind {{ internal_vip.ip }}:8080
-# bind {{ public_vip.ip }}:8080
-# balance source
-# option tcpka
-# option tcplog
-#{% for host,ip in haproxy_hosts.items() %}
-# server {{ host }} {{ ip }}:8080 weight 1 check inter 2000 rise 2 fall 5
-#{% endfor %}
-
-listen proxy-ceilometer_api_cluster
- bind {{ internal_vip.ip }}:8777
- bind {{ public_vip.ip }}:8777
- mode tcp
- option tcp-check
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8777 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-aodh_api_cluster
- bind {{ internal_vip.ip }}:8042
- bind {{ public_vip.ip }}:8042
- mode tcp
- option tcp-check
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8042 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-congress_api_cluster
- bind {{ internal_vip.ip }}:1789
- bind {{ public_vip.ip }}:1789
- mode tcp
- option tcp-check
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:1789 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-dashboarad
- bind {{ public_vip.ip }}:80
- mode http
- balance source
- capture cookie vgnvisitor= len 32
- cookie SERVERID insert indirect nocache
- option forwardfor
- option httpchk
- option httpclose
- rspidel ^Set-cookie:\ IP=
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:80 cookie {{ host }} weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen stats
- mode http
- bind 0.0.0.0:9999
- stats enable
- stats refresh 30s
- stats uri /
- stats realm Global\ statistics
- stats auth admin:admin
-
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/heat/tasks/heat_install.yml b/deploy/adapters/ansible/openstack_mitaka/roles/heat/tasks/heat_install.yml
deleted file mode 100644
index b90e6402..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/heat/tasks/heat_install.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: install heat related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: generate heat service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-# '
-
-- name: create heat user domain
- shell: >
- . /opt/admin-openrc-v3.sh;
- openstack domain create --description "Stack projects and users" heat;
- openstack user create --domain heat --password {{ HEAT_PASS }} heat_domain_admin;
- openstack role add --domain heat --user-domain heat --user heat_domain_admin admin;
- openstack role create heat_stack_owner;
- openstack role add --project demo --user demo heat_stack_owner;
- when: inventory_hostname == groups['controller'][0]
-
-- name: update heat conf
- template: src=heat.j2
- dest=/etc/heat/heat.conf
- backup=yes
- notify:
- - restart heat service
- - remove heat-sqlite-db
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/heat/templates/heat.j2 b/deploy/adapters/ansible/openstack_mitaka/roles/heat/templates/heat.j2
deleted file mode 100644
index 62df9fd9..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/heat/templates/heat.j2
+++ /dev/null
@@ -1,28 +0,0 @@
-[DEFAULT]
-heat_metadata_server_url = http://{{ internal_vip.ip }}:8000
-heat_waitcondition_server_url = http://{{ internal_vip.ip }}:8000/v1/waitcondition
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-log_dir = /var/log/heat
-stack_domain_admin = heat_domain_admin
-stack_domain_admin_password = {{ HEAT_PASS }}
-stack_user_domain_name = heat
-
-[database]
-connection = mysql://heat:{{ HEAT_DBPASS }}@{{ db_host }}/heat
-idle_timeout = 30
-use_db_reconnect = True
-pool_timeout = 10
-
-[ec2authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = heat
-admin_password = {{ HEAT_PASS }}
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/tasks/keystone_install.yml b/deploy/adapters/ansible/openstack_mitaka/roles/keystone/tasks/keystone_install.yml
deleted file mode 100644
index ba4fc28e..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/tasks/keystone_install.yml
+++ /dev/null
@@ -1,97 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install keystone packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: disable boot auto start
- file:
- path={{ item }}
- state=absent
- with_items:
- - /etc/init.d/keystone
- - /etc/init/keystone.conf
- when: ansible_os_family == "Debian"
-
-- name: generate keystone service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: delete sqlite database
- file:
- path: /var/lib/keystone/keystone.db
- state: absent
-
-- name: update keystone conf
- template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
- notify:
- - restart keystone services
-
-- name: assure listen port exist
- lineinfile:
- dest: '{{ apache_config_dir }}/ports.conf'
- regexp: '{{ item.regexp }}'
- line: '{{ item.line}}'
- with_items:
- - regexp: "^Listen {{ internal_ip }}:5000"
- line: "Listen {{ internal_ip }}:5000"
- - regexp: "^Listen {{ internal_ip }}:35357"
- line: "Listen {{ internal_ip }}:35357"
- notify:
- - restart keystone services
-
-- name: update apache2 configs
- template:
- src: wsgi-keystone.conf.j2
- dest: '{{ apache_config_dir }}/sites-available/wsgi-keystone.conf'
- when: ansible_os_family == 'Debian'
- notify:
- - restart keystone services
-
-- name: update apache2 configs
- template:
- src: wsgi-keystone.conf.j2
- dest: '{{ apache_config_dir }}/wsgi-keystone.conf'
- when: ansible_os_family == 'RedHat'
- notify:
- - restart keystone services
-
-- name: enable keystone server
- file:
- src: "{{ apache_config_dir }}/sites-available/wsgi-keystone.conf"
- dest: "{{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf"
- state: "link"
- when: ansible_os_family == 'Debian'
- notify:
- - restart keystone services
-
-- name: keystone source files
- template: src={{ item }} dest=/opt/{{ item }}
- with_items:
- - admin-openrc.sh
- - demo-openrc.sh
- - admin-openrc-v3.sh
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/vars/RedHat.yml b/deploy/adapters/ansible/openstack_mitaka/roles/keystone/vars/RedHat.yml
deleted file mode 100644
index 63ddce3c..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/vars/RedHat.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-cron_path: "/var/spool/cron"
-
-packages:
- - openstack-keystone
- - python-openstackclient
-
-services:
- - httpd
-
-apache_config_dir: /etc/httpd/conf.d
-http_service_name: httpd
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/keystone/vars/main.yml
deleted file mode 100755
index baaf89e1..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/vars/main.yml
+++ /dev/null
@@ -1,194 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch:
- - python-keystoneclient
-
-services_noarch: []
-os_services:
- - name: keystone
- type: identity
- region: RegionOne
- description: "OpenStack Identity"
- publicurl: "http://{{ public_vip.ip }}:5000/v2.0"
- internalurl: "http://{{ internal_vip.ip }}:5000/v2.0"
- adminurl: "http://{{ internal_vip.ip }}:35357/v2.0"
-
- - name: glance
- type: image
- region: RegionOne
- description: "OpenStack Image Service"
- publicurl: "http://{{ public_vip.ip }}:9292"
- internalurl: "http://{{ internal_vip.ip }}:9292"
- adminurl: "http://{{ internal_vip.ip }}:9292"
-
- - name: nova
- type: compute
- region: RegionOne
- description: "OpenStack Compute"
- publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
-
- - name: neutron
- type: network
- region: RegionOne
- description: "OpenStack Networking"
- publicurl: "http://{{ public_vip.ip }}:9696"
- internalurl: "http://{{ internal_vip.ip }}:9696"
- adminurl: "http://{{ internal_vip.ip }}:9696"
-
- - name: ceilometer
- type: metering
- region: RegionOne
- description: "OpenStack Telemetry"
- publicurl: "http://{{ public_vip.ip }}:8777"
- internalurl: "http://{{ internal_vip.ip }}:8777"
- adminurl: "http://{{ internal_vip.ip }}:8777"
-
- - name: aodh
- type: alarming
- region: RegionOne
- description: "OpenStack Telemetry"
- publicurl: "http://{{ public_vip.ip }}:8042"
- internalurl: "http://{{ internal_vip.ip }}:8042"
- adminurl: "http://{{ internal_vip.ip }}:8042"
-
- - name: cinder
- type: volume
- region: RegionOne
- description: "OpenStack Block Storage"
- publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
-
- - name: cinderv2
- type: volumev2
- region: RegionOne
- description: "OpenStack Block Storage v2"
- publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
-
- - name: heat
- type: orchestration
- region: RegionOne
- description: "OpenStack Orchestration"
- publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
-
- - name: heat-cfn
- type: cloudformation
- region: RegionOne
- description: "OpenStack CloudFormation Orchestration"
- publicurl: "http://{{ public_vip.ip }}:8000/v1"
- internalurl: "http://{{ internal_vip.ip }}:8000/v1"
- adminurl: "http://{{ internal_vip.ip }}:8000/v1"
-
- - name: congress
- type: policy
- region: RegionOne
- description: "OpenStack Policy Service"
- publicurl: "http://{{ public_vip.ip }}:1789"
- internalurl: "http://{{ internal_vip.ip }}:1789"
- adminurl: "http://{{ internal_vip.ip }}:1789"
-
-# - name: swift
-# type: object-store
-# region: RegionOne
-# description: "OpenStack Object Storage"
-# publicurl: "http://{{ public_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-# internalurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-# adminurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-
-os_users:
- - user: admin
- password: "{{ ADMIN_PASS }}"
- email: admin@admin.com
- role: admin
- tenant: admin
- tenant_description: "Admin Tenant"
-
- - user: glance
- password: "{{ GLANCE_PASS }}"
- email: glance@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: nova
- password: "{{ NOVA_PASS }}"
- email: nova@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: keystone
- password: "{{ KEYSTONE_PASS }}"
- email: keystone@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: neutron
- password: "{{ NEUTRON_PASS }}"
- email: neutron@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: ceilometer
- password: "{{ CEILOMETER_PASS }}"
- email: ceilometer@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: cinder
- password: "{{ CINDER_PASS }}"
- email: cinder@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: aodh
- password: "{{ AODH_PASS }}"
- email: aodh@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: heat
- password: "{{ HEAT_PASS }}"
- email: heat@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: congress
- password: "{{ CONGRESS_PASS }}"
- email: congress@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: demo
- password: ""
- email: heat@demo.com
- role: heat_stack_user
- tenant: demo
- tenant_description: "Demo Tenant"
-
-# - user: swift
-# password: "{{ CINDER_PASS }}"
-# email: swift@admin.com
-# role: admin
-# tenant: service
-# tenant_description: "Service Tenant"
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/neutron-compute/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/neutron-compute/tasks/main.yml
deleted file mode 100644
index fd3e51d3..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/neutron-compute/tasks/main.yml
+++ /dev/null
@@ -1,75 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: activate ipv4 forwarding
- sysctl: name=net.ipv4.ip_forward value=1
- state=present reload=yes
-
-- name: deactivate ipv4 rp filter
- sysctl: name=net.ipv4.conf.all.rp_filter value=0
- state=present reload=yes
-
-- name: deactivate ipv4 default rp filter
- sysctl: name=net.ipv4.conf.default.rp_filter
- value=0 state=present reload=yes
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install compute-related neutron packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: fix openstack neutron plugin config file
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
- systemctl daemon-reload
- when: ansible_os_family == 'RedHat'
-
-- name: fix openstack neutron plugin config file ubuntu
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
- when: ansible_os_family == "Debian"
-
-- name: generate neutron compute service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: config ml2 plugin
- template: src=templates/ml2_conf.ini
- dest=/etc/neutron/plugins/ml2/ml2_conf.ini
- backup=yes
-
-- name: ln plugin.ini
- file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
-
-- name: config neutron
- template: src=templates/neutron.conf
- dest=/etc/neutron/neutron.conf backup=yes
- notify:
- - restart neutron compute service
- - restart nova-compute services
-
-- meta: flush_handlers
-
-- include: ../../neutron-network/tasks/odl.yml
- when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/neutron-compute/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka/roles/neutron-compute/vars/Debian.yml
deleted file mode 100644
index 6ae52f3b..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/neutron-compute/vars/Debian.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-packages:
- - neutron-common
- - neutron-plugin-ml2
- - openvswitch-datapath-dkms
- - openvswitch-switch
- - neutron-plugin-openvswitch-agent
-
-services:
- - neutron-openvswitch-agent
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/neutron-network/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/neutron-network/tasks/main.yml
deleted file mode 100644
index 31f7f17c..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/neutron-network/tasks/main.yml
+++ /dev/null
@@ -1,117 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: activate ipv4 forwarding
- sysctl: name=net.ipv4.ip_forward value=1
- state=present reload=yes
-
-- name: deactivate ipv4 rp filter
- sysctl: name=net.ipv4.conf.all.rp_filter value=0
- state=present reload=yes
-
-- name: deactivate ipv4 default rp filter
- sysctl: name=net.ipv4.conf.default.rp_filter
- value=0 state=present reload=yes
-
-- name: assert kernel support for vxlan
- command: modinfo -F version vxlan
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-
-- name: assert iproute2 suppport for vxlan
- command: ip link add type vxlan help
- register: iproute_out
- failed_when: iproute_out.rc == 255
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install neutron network related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: generate neutron network service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: fix openstack neutron plugin config file
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
- systemctl daemon-reload
- when: ansible_os_family == 'RedHat'
-
-- name: fix openstack neutron plugin config file ubuntu
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
- when: ansible_os_family == "Debian"
-
-- name: config l3 agent
- template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini
- backup=yes
-
-- name: config dhcp agent
- template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini
- backup=yes
-
-- name: update dnsmasq-neutron.conf
- template: src=templates/dnsmasq-neutron.conf
- dest=/etc/neutron/dnsmasq-neutron.conf
-
-- name: config metadata agent
- template: src=metadata_agent.ini
- dest=/etc/neutron/metadata_agent.ini backup=yes
-
-- name: config ml2 plugin
- template: src=templates/ml2_conf.ini
- dest=/etc/neutron/plugins/ml2/ml2_conf.ini
- backup=yes
-
-- name: ln plugin.ini
- file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
-
-- name: config neutron
- template: src=templates/neutron.conf
- dest=/etc/neutron/neutron.conf backup=yes
-
-- name: force mtu to 1450 for vxlan
- lineinfile:
- dest: /etc/neutron/dnsmasq-neutron.conf
- regexp: '^dhcp-option-force'
- line: 'dhcp-option-force=26,1450'
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-
-- include: firewall.yml
- when: enable_fwaas == True
-
-- include: vpn.yml
- when: enable_vpnaas == True
-
-- include: odl.yml
- when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: restart neutron network relation service
- service: name={{ item }} state=restarted enabled=yes
- with_flattened:
- - services_noarch
- - services
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/neutron-network/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka/roles/neutron-network/vars/Debian.yml
deleted file mode 100644
index c95d0265..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/neutron-network/vars/Debian.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - neutron-plugin-ml2
- - openvswitch-datapath-dkms
- - openvswitch-switch
- - neutron-l3-agent
- - neutron-dhcp-agent
- - neutron-plugin-openvswitch-agent
-
-services:
- - openvswitch-switch
- - neutron-openvswitch-agent
-
-openvswitch_agent: neutron-plugin-openvswitch-agent
-
-xorp_packages:
- - xorp
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/nova-compute/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/nova-compute/tasks/main.yml
deleted file mode 100644
index fe544630..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/nova-compute/tasks/main.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install nova-compute related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: restart virtlogd
- service: name=virtlogd state=started enabled=yes
- when: ansible_os_family == "Debian"
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: update nova-compute conf
- template: src={{ item }} dest=/etc/nova/{{ item }}
- with_items:
- - nova.conf
- notify:
- - restart nova-compute services
-
-- name: get number of cpu support virtualization
- shell: egrep -c '(vmx|svm)' /proc/cpuinfo
- register: kvm_cpu_num
-
-- name: update nova-compute conf
- template: src={{ item }} dest=/etc/nova/{{ item }}
- with_items:
- - nova-compute.conf
- notify:
- - restart nova-compute services
-
-- name: generate neutron control service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: remove nova sqlite db
- shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/nova-compute/templates/nova-compute.conf b/deploy/adapters/ansible/openstack_mitaka/roles/nova-compute/templates/nova-compute.conf
deleted file mode 100644
index 305d408b..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/nova-compute/templates/nova-compute.conf
+++ /dev/null
@@ -1,11 +0,0 @@
-[DEFAULT]
-compute_driver=libvirt.LibvirtDriver
-force_raw_images = true
-[libvirt]
-{% if kvm_cpu_num.stdout_lines[0]|int == 0 %}
-virt_type=qemu
-{% else %}
-virt_type=kvm
-{% endif %}
-images_type = raw
-mem_stats_period_seconds=0
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/nova-compute/templates/nova.conf b/deploy/adapters/ansible/openstack_mitaka/roles/nova-compute/templates/nova.conf
deleted file mode 100644
index 73b49a5a..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/nova-compute/templates/nova.conf
+++ /dev/null
@@ -1,89 +0,0 @@
-[DEFAULT]
-block_device_allocate_retries=5
-block_device_allocate_retries_interval=300
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/var/lib/nova/tmp
-force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
-enabled_apis=osapi_compute,metadata
-
-default_floating_pool={{ public_net_info.network }}
-auth_strategy = keystone
-
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-osapi_compute_listen={{ internal_ip }}
-metadata_listen={{ internal_ip }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
-
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-
-[database]
-# The SQLAlchemy connection string used to connect to the database
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-idle_timeout = 30
-use_db_reconnect = True
-pool_timeout = 10
-
-[api_database]
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova_api
-idle_timeout = 30
-use_db_reconnect = True
-pool_timeout = 10
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/2.0
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-
-[glance]
-host = {{ internal_vip.ip }}
-
-[neutron]
-url = http://{{ internal_vip.ip }}:9696
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
-service_metadata_proxy = True
-metadata_proxy_shared_secret = {{ METADATA_SECRET }}
-auth_type = password
-auth_url = http://{{ internal_vip.ip }}:35357
-password = {{ NEUTRON_PASS }}
-username = neutron
-project_domain_name = default
-user_domain_name = default
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/nova-controller/tasks/nova_config.yml b/deploy/adapters/ansible/openstack_mitaka/roles/nova-controller/tasks/nova_config.yml
deleted file mode 100644
index f332c97a..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/nova-controller/tasks/nova_config.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: nova api db sync
- shell: su -s /bin/sh -c "nova-manage api_db sync" nova
- ignore_errors: True
- notify:
- - restart nova service
-
-- name: nova db sync
- nova_manage: action=dbsync
- notify:
- - restart nova service
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/odl_cluster/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka/roles/odl_cluster/vars/Debian.yml
deleted file mode 100755
index bb560ac0..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/odl_cluster/vars/Debian.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-common_packages:
- - crudini
-
-service_ovs_name: openvswitch-switch
-service_ovs_agent_name: neutron-openvswitch-agent
-
-service_file:
- src: opendaylight.conf
- dst: /etc/init/opendaylight.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/log.py b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/log.py
deleted file mode 100644
index a22ff0fe..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/log.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import logging
-import os
-loggers = {}
-log_dir = "/var/log/setup_network"
-try:
- os.makedirs(log_dir)
-except:
- pass
-
-
-def getLogger(name):
- if name in loggers:
- return loggers[name]
-
- logger = logging.getLogger(name)
- logger.setLevel(logging.DEBUG)
-
- # create file handler which logs even debug messages
- log_file = "%s/%s.log" % (log_dir, name)
- try:
- os.remove(log_file)
- except:
- pass
-
- fh = logging.FileHandler(log_file)
- fh.setLevel(logging.DEBUG)
-
- # create console handler with a higher log level
- ch = logging.StreamHandler()
- ch.setLevel(logging.ERROR)
-
- # create formatter and add it to the handlers
- formatter = logging.Formatter(
- "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
- ch.setFormatter(formatter)
- fh.setFormatter(formatter)
-
- # add the handlers to logger
- logger.addHandler(ch)
- logger.addHandler(fh)
-
- loggers[name] = logger
- return logger
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/net_init b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/net_init
deleted file mode 100755
index c27a8bf8..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/net_init
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-## BEGIN INIT INFO
-# Provides: anamon.init
-# Default-Start: 3 5
-# Default-Stop: 0 1 2 4 6
-# Required-Start: $network
-# Short-Description: Starts the cobbler anamon boot notification program
-# Description: anamon runs the first time a machine is booted after
-# installation.
-## END INIT INFO
-
-#
-# anamon.init: Starts the cobbler post-install boot notification program
-#
-# chkconfig: 35 0 6
-#
-# description: anamon runs the first time a machine is booted after
-# installation.
-#
-python /opt/setup_networks/setup_networks.py
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/setup_networks.py b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/setup_networks.py
deleted file mode 100644
index 086edb20..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/setup_networks.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import yaml
-import netaddr
-import os
-import log as logging
-
-LOG = logging.getLogger("net-init")
-config_path = os.path.join(os.path.dirname(__file__), "network.cfg")
-
-
-def setup_bondings(bond_mappings):
- print bond_mappings
-
-
-def add_vlan_link(interface, ifname, vlan_id):
- LOG.info("add_vlan_link enter")
- cmd = "ip link add link %s name %s type vlan id %s; " % (
- ifname, interface, vlan_id)
- cmd += "ip link set %s up; ip link set %s up" % (interface, ifname)
- LOG.info("add_vlan_link: cmd=%s" % cmd)
- os.system(cmd)
-
-
-def add_ovs_port(ovs_br, ifname, uplink, vlan_id=None):
- LOG.info("add_ovs_port enter")
- cmd = "ovs-vsctl --may-exist add-port %s %s" % (ovs_br, ifname)
- if vlan_id:
- cmd += " tag=%s" % vlan_id
- cmd += " -- set Interface %s type=internal;" % ifname
- cmd += "ip link set dev %s address `ip link show %s |awk '/link\/ether/{print $2}'`;" % (ifname, uplink) # noqa
- cmd += "ip link set %s up;" % ifname
- LOG.info("add_ovs_port: cmd=%s" % cmd)
- os.system(cmd)
-
-
-def setup_intfs(sys_intf_mappings, uplink_map):
- LOG.info("setup_intfs enter")
- for intf_name, intf_info in sys_intf_mappings.items():
- if intf_info["type"] == "vlan":
- add_vlan_link(
- intf_name,
- intf_info["interface"],
- intf_info["vlan_tag"])
- elif intf_info["type"] == "ovs":
- add_ovs_port(
- intf_info["interface"],
- intf_name,
- uplink_map[intf_info["interface"]],
- vlan_id=intf_info.get("vlan_tag"))
- else:
- pass
-
-
-def setup_ips(ip_settings, sys_intf_mappings):
- LOG.info("setup_ips enter")
- for intf_info in ip_settings.values():
- network = netaddr.IPNetwork(intf_info["cidr"])
- if sys_intf_mappings[intf_info["name"]]["type"] == "ovs":
- intf_name = intf_info["name"]
- else:
- intf_name = intf_info["alias"]
- cmd = "ip addr add %s/%s brd %s dev %s;" \
- % (intf_info["ip"], intf_info["netmask"], str(network.broadcast), intf_name) # noqa
- if "gw" in intf_info:
- cmd += "route del default;"
- cmd += "ip route add default via %s dev %s" % (
- intf_info["gw"], intf_name)
- LOG.info("setup_ips: cmd=%s" % cmd)
- os.system(cmd)
-
-
-def main(config):
- uplink_map = {}
- setup_bondings(config["bond_mappings"])
- for provider_net in config["provider_net_mappings"]:
- uplink_map[provider_net['name']] = provider_net['interface']
-
- setup_intfs(config["sys_intf_mappings"], uplink_map)
- setup_ips(config["ip_settings"], config["sys_intf_mappings"])
-
-if __name__ == "__main__":
- os.system("service openvswitch-switch status|| service openvswitch-switch start") # noqa
- config = yaml.load(open(config_path))
- main(config)
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/handlers/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/handlers/main.yml
deleted file mode 100755
index e099fcf4..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/handlers/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart onos service
- service: name=onos state=restarted enabled=yes
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/main.yml
deleted file mode 100755
index 6b619057..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/main.yml
+++ /dev/null
@@ -1,121 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: install onos related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
- when: groups['onos']|length !=0
-
-- name: remove neutron-openvswitch-agent auto start
- shell: >
- update-rc.d neutron-openvswitch-agent remove;
- sed -i /neutron-openvswitch-agent/d /opt/service
- when: groups['onos']|length !=0
- ignore_errors: True
-
-- name: shut down and disable Neutron's agent services
- service: name=neutron-openvswitch-agent state=stopped
- when: groups['onos']|length !=0
- ignore_errors: True
-
-- name: remove neutron-l3-agent auto start
- shell: >
- update-rc.d neutron-l3-agent remove;
- sed -i /neutron-l3-agent/d /opt/service
- when: inventory_hostname in groups['onos']
- ignore_errors: True
-
-- name: shut down and disable Neutron's l3 agent services
- service: name=neutron-l3-agent state=stopped
- when: inventory_hostname in groups['onos']
- ignore_errors: True
-
-- name: Stop the Open vSwitch service and clear existing OVSDB
- shell: >
- service openvswitch-switch stop ;
- rm -rf /var/log/openvswitch/* ;
- rm -rf /etc/openvswitch/conf.db ;
- service openvswitch-switch start ;
- when: groups['onos']|length !=0
- ignore_errors: True
-
-##################################################################
-########### Recover External network #################
-##################################################################
-
-- name: add ovs bridge
- openvswitch_bridge: bridge={{ item["name"] }} state=present
- with_items: "{{ network_cfg['provider_net_mappings'] }}"
- when: item["type"] == "ovs" and groups['onos']|length !=0
-
-- name: add ovs uplink
- openvswitch_port: bridge={{ item["name"] }} port={{ item["interface"] }} state=present
- with_items: "{{ network_cfg['provider_net_mappings'] }}"
- when: item["type"] == "ovs" and groups['onos']|length !=0
-
-- name: add ovs uplink
- shell: ip link set {{ item["interface"] }} up
- with_items: "{{ network_cfg['provider_net_mappings'] }}"
- when: item["type"] == "ovs" and groups['onos']|length !=0
-
-- name: ensure script dir exist
- shell: mkdir -p /opt/setup_networks
- when: groups['onos']|length !=0
-
-- name: copy scripts
- copy: src={{ item }} dest=/opt/setup_networks
- with_items:
- - setup_networks/log.py
- - setup_networks/setup_networks.py
- when: groups['onos']|length !=0
-
-- name: copy boot scripts
- copy: src={{ item }} dest=/etc/init.d/ mode=0755
- with_items:
- - setup_networks/net_init
- when: groups['onos']|length !=0
-
-- name: copy config files
- template: src=network.cfg dest=/opt/setup_networks
- when: groups['onos']|length !=0
-
-- name: make sure python lib exist
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items:
- - python-yaml
- - python-netaddr
- when: groups['onos']|length !=0
-
-- name: run scripts
- shell: python /opt/setup_networks/setup_networks.py
- when: groups['onos']|length !=0
-
-- name: add to boot scripts
- service: name=net_init enabled=yes
- when: groups['onos']|length !=0
-##################################################################
-
-- name: restart keepalived to recover external IP
- shell: service keepalived restart
- when: inventory_hostname in groups['onos']
- ignore_errors: True
-
-- name: Install ONOS Cluster on Controller
- include: onos_controller.yml
- when: inventory_hostname in groups['onos'] and onos_sfc == "Disable"
-
-- name: Install ONOS Cluster on Controller
- include: onos_sfc_controller.yml
- when: inventory_hostname in groups['onos'] and onos_sfc == "Enable"
-
-- name: Config ONOS Cluster
- include: openvswitch.yml
- when: groups['onos']|length !=0
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/onos_controller.yml b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/onos_controller.yml
deleted file mode 100755
index 9ab8d1c1..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/onos_controller.yml
+++ /dev/null
@@ -1,131 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: download onos driver packages
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_driver }}" dest=/opt/
-
-- name: unarchive onos driver package
- command: su -s /bin/sh -c "tar xvf /opt/networking-onos.tar -C /opt/"
-
-- name: install onos driver
- command: su -s /bin/sh -c "/opt/networking-onos/install_driver.sh"
-
-- name: install onos required packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages
-
-- name: download oracle-jdk8 package file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }}
-
-- name: download oracle-jdk8 script file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/
-
-- name: unarchive onos driver package
- command: su -s /bin/sh -c "tar xvf /opt/install_jdk8.tar -C /opt/"
-
-- name: install install_jdk8 package
- command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh"
-
-- name: create JAVA_HOME environment variable
- shell: >
- export J2SDKDIR=/usr/lib/jvm/java-8-oracle;
- export J2REDIR=/usr/lib/jvm/java-8-oracle/jre;
- export PATH=$PATH:/usr/lib/jvm/java-8-oracle/bin:/usr/lib/jvm/java-8-oracle/db/bin:/usr/lib/jvm/java-8-oracle/jre/bin;
- export JAVA_HOME=/usr/lib/jvm/java-8-oracle;
- export DERBY_HOME=/usr/lib/jvm/java-8-oracle/db;
-
-- name: create onos group
- group: name=onos system=yes state=present
-
-- name: create onos user
- user:
- name: onos
- group: onos
- home: "{{ onos_home }}"
- createhome: "yes"
- system: "yes"
- shell: "/bin/false"
-
-- name: download onos package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_pkg_name }}" dest=/opt/{{ onos_pkg_name }}
-
-- name: create new jar repository
- command: su -s /bin/sh -c "mkdir ~/.m2"
- ignore_errors: True
-
-- name: download jar repository
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ repository }}" dest=~/.m2/
-
-- name: extract jar repository
- command: su -s /bin/sh -c "tar xvf ~/.m2/repository.tar -C ~/.m2/"
-
-- name: extract onos package
- command: su -s /bin/sh -c "tar xzf /opt/{{ onos_pkg_name }} -C {{ onos_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" onos
-
-- name: configure onos service
- shell: >
- echo 'export ONOS_OPTS=debug' > {{ onos_home }}/options;
- echo 'export ONOS_USER=root' >> {{ onos_home }}/options;
- mkdir {{ onos_home }}/var;
- mkdir {{ onos_home }}/config;
- sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' {{ onos_home }}/init/onos.conf;
- cp -rf {{ onos_home }}/init/onos.conf /etc/init/;
- cp -rf {{ onos_home }}/init/onos.conf /etc/init.d/;
-
-- name: configure onos boot feature
- shell: >
- sed -i '/^featuresBoot=/c\featuresBoot={{ onos_boot_features }}' {{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg;
-
-- name: wait for config time
- shell: "sleep 10"
-
-- name: start onos service
- service: name=onos state=started enabled=yes
-
-- name: wait for onos start time
- shell: "sleep 200"
-
-- name: add onos auto start
- shell: >
- echo "onos">>/opt/service
-
-##########################################################################################################
-################################ ONOS connect with OpenStack ################################
-##########################################################################################################
-- name: Configure Neutron1
- shell: >
- crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins onos_router;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers onos_ml2;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers vxlan
-
-- name: Create ML2 Configuration File
- template:
- src: ml2_conf.sh
- dest: "/opt/ml2_conf.sh"
- mode: 0777
-
-- name: Configure Neutron2
- command: su -s /bin/sh -c "/opt/ml2_conf.sh;"
-
-- name: Configure Neutron3
- shell: >
- mysql -e "drop database if exists neutron_ml2;";
- mysql -e "create database neutron_ml2 character set utf8;";
- mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';";
- su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron;
- su -s /bin/sh -c "neutron-db-manage --subproject networking-sfc upgrade head" neutron;
-
-- name: Restart neutron-server
- service: name=neutron-server state=restarted
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/onos_sfc_controller.yml b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/onos_sfc_controller.yml
deleted file mode 100755
index 226923e8..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/onos_sfc_controller.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: download onos driver packages
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_driver }}" dest=/opt/
-
-- name: download onos sfc driver package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_sfc_driver }}" dest=/opt/
-
-- name: unarchive onos driver package
- command: su -s /bin/sh -c "tar xvf /opt/networking-onos.tar -C /opt/"
-
-- name: unarchive onos sfc driver package
- command: su -s /bin/sh -c "tar xvf /opt/networking-sfc.tar -C /opt/"
-
-- name: install onos driver
- command: su -s /bin/sh -c "/opt/networking-onos/install_driver.sh"
-
-- name: install onos sfc driver
- command: su -s /bin/sh -c "/opt/networking-sfc/install_driver.sh"
-
-- name: install onos required packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages
-
-- name: download oracle-jdk8 package file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }}
-
-- name: download oracle-jdk8 script file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/
-
-- name: unarchive onos driver package
- command: su -s /bin/sh -c "tar xvf /opt/install_jdk8.tar -C /opt/"
-
-- name: install install_jdk8 package
- command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh"
-
-- name: create JAVA_HOME environment variable
- shell: >
- export J2SDKDIR=/usr/lib/jvm/java-8-oracle;
- export J2REDIR=/usr/lib/jvm/java-8-oracle/jre;
- export PATH=$PATH:/usr/lib/jvm/java-8-oracle/bin:/usr/lib/jvm/java-8-oracle/db/bin:/usr/lib/jvm/java-8-oracle/jre/bin;
- export JAVA_HOME=/usr/lib/jvm/java-8-oracle;
- export DERBY_HOME=/usr/lib/jvm/java-8-oracle/db;
-
-- name: create onos group
- group: name=onos system=yes state=present
-
-- name: create onos user
- user:
- name: onos
- group: onos
- home: "{{ onos_home }}"
- createhome: "yes"
- system: "yes"
- shell: "/bin/false"
-
-- name: download onos package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_pkg_name }}" dest=/opt/{{ onos_pkg_name }}
-
-- name: create new jar repository
- command: su -s /bin/sh -c "mkdir ~/.m2"
- ignore_errors: True
-
-- name: download jar repository
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ repository }}" dest=~/.m2/
-
-- name: extract jar repository
- command: su -s /bin/sh -c "tar xvf ~/.m2/repository.tar -C ~/.m2/"
-
-- name: extract onos package
- command: su -s /bin/sh -c "tar xzf /opt/{{ onos_pkg_name }} -C {{ onos_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" onos
-
-- name: configure onos service
- shell: >
- echo 'export ONOS_OPTS=debug' > {{ onos_home }}/options;
- echo 'export ONOS_USER=root' >> {{ onos_home }}/options;
- mkdir {{ onos_home }}/var;
- mkdir {{ onos_home }}/config;
- sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' {{ onos_home }}/init/onos.conf;
- cp -rf {{ onos_home }}/init/onos.conf /etc/init/;
- cp -rf {{ onos_home }}/init/onos.conf /etc/init.d/;
-
-- name: configure onos boot feature
- shell: >
- sed -i '/^featuresBoot=/c\featuresBoot={{ onos_boot_features }}' {{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg;
-
-- name: wait for config time
- shell: "sleep 10"
-
-- name: start onos service
- service: name=onos state=started enabled=yes
-
-- name: wait for onos start time
- shell: "sleep 200"
-
-- name: add onos auto start
- shell: >
- echo "onos">>/opt/service
-
-##########################################################################################################
-################################ ONOS connect with OpenStack ################################
-##########################################################################################################
-- name: Configure Neutron1
- shell: >
- crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_sfc.services.sfc.plugin.SfcPlugin,networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin,onos_router;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers onos_ml2;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers vxlan
-
-- name: Create ML2 Configuration File
- template:
- src: ml2_conf.sh
- dest: "/opt/ml2_conf.sh"
- mode: 0777
-
-- name: Configure Neutron2
- command: su -s /bin/sh -c "/opt/ml2_conf.sh;"
-
-- name: Configure Neutron3
- shell: >
- mysql -e "drop database if exists neutron_ml2;";
- mysql -e "create database neutron_ml2 character set utf8;";
- mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';";
- su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron;
- su -s /bin/sh -c "neutron-db-manage --subproject networking-sfc upgrade head" neutron;
-
-- name: Restart neutron-server
- service: name=neutron-server state=restarted
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/openvswitch.yml
deleted file mode 100755
index 76863890..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/openvswitch.yml
+++ /dev/null
@@ -1,64 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: set veth port
- shell: >
- ip link add onos_port1 type veth peer name onos_port2;
- ifconfig onos_port1 up;
- ifconfig onos_port2 up;
- ignore_errors: True
-
-- name: set veth to ovs
- shell: >
- export externamMac=`ifconfig eth1 | grep -Eo '\<[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'`;
- ifconfig onos_port2 hw ether $externamMac;
- ovs-vsctl add-port br-prv onos_port1;
- ignore_errors: True
-
-- name: add openflow-base feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow-base'";
- when: inventory_hostname in groups['onos']
-
-- name: add openflow feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow'";
- when: inventory_hostname in groups['onos']
-
-- name: add ovsdatabase feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdatabase'";
- when: inventory_hostname in groups['onos']
-
-- name: add ovsdb-base feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-base'";
- when: inventory_hostname in groups['onos']
-
-- name: add onos driver ovsdb feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-drivers-ovsdb'";
- when: inventory_hostname in groups['onos']
-
-- name: add ovsdb provider host feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-provider-host'";
- when: inventory_hostname in groups['onos']
-
-- name: add vtn feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-app-vtn-onosfw'";
- when: inventory_hostname in groups['onos']
-
-- name: set public eth card start
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'externalportname-set -n onos_port2'"
- when: inventory_hostname in groups['onos']
-
-- name: Set ONOS as the manager
- command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:6640;"
-
-- name: delete default gateway
- shell: >
- route delete default;
- when: inventory_hostname not in groups['onos']
- ignore_errors: True
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/keepalived.conf b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/keepalived.conf
deleted file mode 100644
index 4ccf1c43..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/keepalived.conf
+++ /dev/null
@@ -1,47 +0,0 @@
-global_defs {
- router_id {{ inventory_hostname }}
-}
-
-vrrp_sync_group VG1 {
- group {
- internal_vip
- public_vip
- }
-}
-
-vrrp_instance internal_vip {
- interface {{ internal_vip.interface }}
- virtual_router_id {{ vrouter_id_internal }}
- state BACKUP
- nopreempt
- advert_int 1
- priority {{ 50 + (host_index[inventory_hostname] * 50) }}
-
- authentication {
- auth_type PASS
- auth_pass 1234
- }
-
- virtual_ipaddress {
- {{ internal_vip.ip }}/{{ internal_vip.netmask }} dev {{ internal_vip.interface }}
- }
-}
-
-vrrp_instance public_vip {
- interface br-ex
- virtual_router_id {{ vrouter_id_public }}
- state BACKUP
- nopreempt
- advert_int 1
- priority {{ 50 + (host_index[inventory_hostname] * 50) }}
-
- authentication {
- auth_type PASS
- auth_pass 4321
- }
-
- virtual_ipaddress {
- {{ network_cfg.public_vip.ip }}/{{ network_cfg.public_vip.netmask }} dev br-ex
- }
-
-}
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/ml2_conf.sh b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/ml2_conf.sh
deleted file mode 100755
index 8af03df4..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/ml2_conf.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-cat <<EOT>> /etc/neutron/plugins/ml2/ml2_conf.ini
-[onos]
-password = admin
-username = admin
-url_path = http://{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:8181/onos/vtn
-EOT
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/my_configs.debian b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/my_configs.debian
deleted file mode 100644
index 5ab1519b..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/my_configs.debian
+++ /dev/null
@@ -1,14 +0,0 @@
-{%- for alias, intf in host_ip_settings.items() %}
-
-auto {{ alias }}
-iface {{ alias }} inet static
- address {{ intf["ip"] }}
- netmask {{ intf["netmask"] }}
-{% if "gw" in intf %}
- gateway {{ intf["gw"] }}
-{% endif %}
-{% if intf["name"] == alias %}
- pre-up ip link set {{ sys_intf_mappings[alias]["interface"] }} up
- pre-up ip link add link {{ sys_intf_mappings[alias]["interface"] }} name {{ alias }} type vlan id {{ sys_intf_mappings[alias]["vlan_tag"] }}
-{% endif %}
-{% endfor %}
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/network.cfg b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/network.cfg
deleted file mode 100644
index 75ba90cb..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/network.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-bond_mappings: {{ network_cfg["bond_mappings"] }}
-ip_settings: {{ ip_settings[inventory_hostname] }}
-sys_intf_mappings: {{ sys_intf_mappings }}
-provider_net_mappings: {{ network_cfg["provider_net_mappings"] }}
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/vars/Debian.yml
deleted file mode 100755
index c480dd9f..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/vars/Debian.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - software-properties-common
- - crudini
- - git
-
-services: []
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/vars/RedHat.yml b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/vars/RedHat.yml
deleted file mode 100755
index c480dd9f..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/vars/RedHat.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - software-properties-common
- - crudini
- - git
-
-services: []
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/vars/main.yml
deleted file mode 100755
index 0f6204e2..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/vars/main.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch: []
-
-services_noarch: []
-onos_pkg_name: onos-1.6.0.tar.gz
-onos_home: /opt/onos/
-karaf_dist: apache-karaf-3.0.5
-jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz
-jdk8_script_name: install_jdk8.tar
-onos_driver: networking-onos.tar
-onos_sfc_driver: networking-sfc.tar
-repository: repository.tar
-onos_boot_features: config,standard,region,package,kar,ssh,management,webconsole,onos-api,onos-core,onos-incubator,onos-cli,onos-rest,onos-gui,onos-openflow-base, onos-openflow, onos-ovsdatabase, onos-ovsdb-base, onos-drivers-ovsdb, onos-ovsdb-provider-host, onos-app-vtn-onosfw
-
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/open-contrail/tasks/uninstall-openvswitch.yml b/deploy/adapters/ansible/openstack_mitaka/roles/open-contrail/tasks/uninstall-openvswitch.yml
deleted file mode 100755
index 836cb78b..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/open-contrail/tasks/uninstall-openvswitch.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: del ovs bridge
- shell: ovs-vsctl del-br br-int; ovs-vsctl del-br br-tun; ovs-vsctl del-br br-prv;
-
-- name: remove ovs and ovs-plugin daeman
- shell: >
- sed -i '/neutron-openvswitch-agent/d' /opt/service ;
- sed -i '/openvswitch-switch/d' /opt/service ;
-
-- name: stop ovs and ovs-plugin
- shell: service openvswitch-switch stop; service neutron-openvswitch-agent stop;
-
-- name: remove ovs and ovs-plugin files
- shell: >
- update-rc.d -f neutron-openvswitch-agent remove;
- mv /etc/init.d/neutron-openvswitch-agent /home/neutron-openvswitch-agent;
- mv /etc/init/neutron-openvswitch-agent.conf /home/neutron-openvswitch-agent.conf;
- update-rc.d -f openvswitch-switch remove ;
- mv /etc/init.d/openvswitch-switch /home/openvswitch-switch ;
- mv /etc/init/openvswitch-switch.conf /home/openvswitch-switch.conf ;
- update-rc.d -f neutron-ovs-cleanup remove ;
- mv /etc/init.d/neutron-ovs-cleanup /home/neutron-ovs-cleanup ;
- mv /etc/init/neutron-ovs-cleanup.conf /home/neutron-ovs-cleanup.conf ;
-
-- name: remove ovs kernel module
- shell: rmmod vport_vxlan; rmmod openvswitch;
- ignore_errors: True
-
-- name: copy recovery script
- copy: src={{ item }} dest=/opt/setup_networks
- with_items:
-# - recover_network_opencontrail.py
- - setup_networks_opencontrail.py
-
-#- name: recover external script
-# shell: python /opt/setup_networks/recover_network_opencontrail.py
-
-- name: modify net-init
- shell: sed -i 's/setup_networks.py/setup_networks_opencontrail.py/g' /etc/init.d/net_init
-
-- name: resolve dual NIC problem
- shell: >
- echo "net.ipv4.conf.all.arp_ignore=1" >> /etc/sysctl.conf ;
- /sbin/sysctl -p ;
- echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore ;
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/secgroup/templates/neutron.j2 b/deploy/adapters/ansible/openstack_mitaka/roles/secgroup/templates/neutron.j2
deleted file mode 100644
index e7107660..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/secgroup/templates/neutron.j2
+++ /dev/null
@@ -1,7 +0,0 @@
-[securitygroup]
-firewall_driver = neutron.agent.firewall.NoopFirewallDriver
-enable_security_group = True
-
-[agent]
-prevent_arp_spoofing = False
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/secgroup/templates/nova.j2 b/deploy/adapters/ansible/openstack_mitaka/roles/secgroup/templates/nova.j2
deleted file mode 100644
index 7dbc216a..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/secgroup/templates/nova.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-[DEFAULT]
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/secgroup/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka/roles/secgroup/vars/Debian.yml
deleted file mode 100644
index 221a3d92..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/secgroup/vars/Debian.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-configs_templates:
- - src: nova.j2
- dest:
- - /etc/nova/nova.conf
- - src: neutron.j2
- dest:
- - /etc/neutron/plugins/ml2/ml2_conf.ini
- - /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
- - /etc/neutron/plugins/ml2/restproxy.ini
-
-controller_services:
- - nova-api
- - nova-cert
- - nova-conductor
- - nova-consoleauth
- - nova-novncproxy
- - nova-scheduler
- - neutron-server
- - neutron-openvswitch-agent
- - neutron-l3-agent
- - neutron-dhcp-agent
- - neutron-metadata-agent
-
-compute_services:
- - nova-compute
- - neutron-openvswitch-agent
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/files/tacker.conf b/deploy/adapters/ansible/openstack_mitaka/roles/tacker/files/tacker.conf
deleted file mode 100644
index 0c90dcb9..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/files/tacker.conf
+++ /dev/null
@@ -1,36 +0,0 @@
-description "OpenStack Tacker Server"
-author "Yifei Xue <xueyifei@huawei.com>"
-
-start on runlevel [2345]
-stop on runlevel [!2345]
-
-chdir /var/run
-
-respawn
-respawn limit 20 5
-limit nofile 65535 65535
-
-pre-start script
- for i in lock run log lib ; do
- mkdir -p /var/$i/tacker
- chown root /var/$i/tacker
- done
-end script
-
-script
- [ -x "/usr/local/bin/tacker-server" ] || exit 0
- DAEMON_ARGS=""
- CONFIG_FILE="/usr/local/etc/tacker/tacker.conf"
- USE_SYSLOG=""
- USE_LOGFILE=""
- NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG=""
- [ -r /etc/default/openstack ] && . /etc/default/openstack
- [ -r /etc/default/$UPSTART_JOB ] && . /etc/default/$UPSTART_JOB
- [ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog"
- [ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=/var/log/tacker/tacker.log"
- [ -z "$NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG" ] && DAEMON_ARGS="$DAEMON_ARGS --config-file=$CONFIG_FILE"
-
- exec start-stop-daemon --start --chdir /var/lib/tacker \
- --chuid root:root --make-pidfile --pidfile /var/run/tacker/tacker.pid \
- --exec /usr/local/bin/tacker-server -- ${DAEMON_ARGS}
-end script
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/tasks/tacker_controller.yml b/deploy/adapters/ansible/openstack_mitaka/roles/tacker/tasks/tacker_controller.yml
deleted file mode 100755
index cd3b19e8..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/tasks/tacker_controller.yml
+++ /dev/null
@@ -1,215 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: get http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: creat tacker_home, tacker_client_home, tacker_horizon_home
- shell: >
- mkdir -p /opt/tacker
- mkdir -p /opt/tacker_client
- mkdir -p /opt/tacker_horizon
-
-- name: download tacker package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/tacker/{{ tacker_pkg_name }}" dest=/opt/{{ tacker_pkg_name }}
-
-- name: download tacker_client package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/tacker/{{ tacker_client_pkg_name }}" dest=/opt/{{ tacker_client_pkg_name }}
-
-- name: download tacker_horizon package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/tacker/{{ tacker_horizon_pkg_name }}" dest=/opt/{{ tacker_horizon_pkg_name }}
-
-- name: extract tacker package
- command: su -s /bin/sh -c "tar xzf /opt/{{ tacker_pkg_name }} -C {{ tacker_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files"
-
-- name: extract tacker_client package
- command: su -s /bin/sh -c "tar xzf /opt/{{ tacker_client_pkg_name }} -C {{ tacker_client_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files"
-
-- name: extract tacker_horizon package
- command: su -s /bin/sh -c "tar xzf /opt/{{ tacker_horizon_pkg_name }} -C {{ tacker_horizon_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files"
-
-- name: edit ml2_conf.ini
- shell: crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security;
-
-- name: Restart neutron-server
- service: name=neutron-server state=restarted
-
-- name: "create haproxy configuration for tacker"
- template:
- src: "haproxy-tacker-cfg.j2"
- dest: "/tmp/haproxy-tacker.cfg"
-
-- name: get the current haproxy configuration
- shell: cat /etc/haproxy/haproxy.cfg
- register: ha_cfg
-
-- name: "combination of the haproxy configuration"
- shell: "cat /tmp/haproxy-tacker.cfg >> /etc/haproxy/haproxy.cfg"
- when: ha_cfg.stdout.find('8888') == -1
-
-- name: "delete temporary configuration file"
- file:
- dest: "/tmp/haproxy-tacker.cfg"
- state: "absent"
-
-- name: "restart haproxy"
- service:
- name: "haproxy"
- state: "restarted"
-
-- name: drop and recreate tacker database
- shell: mysql -e "drop database if exists tacker;";
- mysql -e "create database tacker character set utf8;";
- mysql -e "grant all on tacker.* to 'tacker'@'%' identified by 'TACKER_DBPASS';";
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: get the openstack user info
- shell: . /opt/admin-openrc.sh; openstack user list
- register: user_info
-
-- name: get the openstack service info
- shell: . /opt/admin-openrc.sh; openstack service list
- register: service_info
-
-- name: get the openstack endpoint info
- shell: . /opt/admin-openrc.sh; openstack endpoint list
- register: endpoint_info
-
-- name: delete the existed tacker endpoint
- shell: . /opt/admin-openrc.sh; openstack endpoint delete $(openstack endpoint list | grep tacker | awk '{print $2}')
- when: endpoint_info.stdout.find('tacker') != -1 and inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: delete the existed tacker service
- shell: . /opt/admin-openrc.sh; openstack service delete tacker
- when: service_info.stdout.find('tacker') != -1 and inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: delete the existed tacker user
- shell: . /opt/admin-openrc.sh; openstack user delete tacker
- when: user_info.stdout.find('tacker') != -1 and inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: create tacker user with admin privileges
- shell: . /opt/admin-openrc.sh; openstack user create --password console tacker; openstack role add --project service --user tacker admin;
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: creat tacker service
- shell: >
- . /opt/admin-openrc.sh; openstack service create --name tacker --description "Tacker Project" nfv-orchestration
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: provide an endpoint to tacker service
- shell: >
- . /opt/admin-openrc.sh; openstack endpoint create --region RegionOne \
- --publicurl 'http://{{ public_vip.ip }}:8888/' \
- --adminurl 'http://{{ internal_vip.ip }}:8888/' \
- --internalurl 'http://{{ internal_vip.ip }}:8888/' tacker
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: install pip package
- pip: name=Babel state=present version=2.3.4
-
-- name: install pip packages
- shell: >
- pip install tosca-parser heat-translator oslosphinx;
-
-- name: install tacker
- shell: >
- . /opt/admin-openrc.sh; cd {{ tacker_home }}; python setup.py install
-
-- name: create 'tacker' directory in '/var/cache', set ownership and permissions
- shell: >
- mkdir -p /var/cache/tacker
-# sudo chown <LOGIN_USER>:root /var/cache/tacker
-# chmod 700 /var/cache/tacker
-
-- name: create 'tacker' directory in '/var/log'
- shell: mkdir -p /var/log/tacker
-
-- name: copy tacker configs
- template: src={{ item.src }} dest=/opt/os_templates
- with_items: "{{ tacker_configs_templates }}"
-
-- name: edit tacker configuration file
- shell: crudini --merge /usr/local/etc/tacker/tacker.conf < /opt/os_templates/tacker.j2
-
-- name: populate tacker database
- shell: >
- . /opt/admin-openrc.sh; /usr/local/bin/tacker-db-manage --config-file /usr/local/etc/tacker/tacker.conf upgrade head
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: install tacker client
- shell: >
- . /opt/admin-openrc.sh; cd {{ tacker_client_home }}; python setup.py install
-
-- name: install tacker horizon
- shell: >
- . /opt/admin-openrc.sh; cd {{ tacker_horizon_home }}; python setup.py install
-
-- name: enable tacker horizon in dashboard
- shell: >
- cp {{ tacker_horizon_home }}/openstack_dashboard_extensions/* /usr/share/openstack-dashboard/openstack_dashboard/enabled/
-
-- name: restart apache server
- shell: service apache2 restart
-
-- name: create tacker service
- copy: src=tacker.conf dest=/etc/init
-
-- name: create tacker service work dir
- file: path=/var/lib/tacker state=directory
-
-- name: link the tacker service
- file:
- src: /etc/init/tacker.conf
- dest: /etc/init.d/tacker
- state: link
-
-- name: start tacker service
- shell: service tacker start
-
-- name: create tackerc file
- template: src=tackerc.sh dest=/opt/tackerc.sh mode=777
-
-- name: get the nfv_user info
- shell: . /opt/tackerc.sh; openstack user list
- register: nfvuser_info
-
-- name: delete the existed nfv user
- shell: . /opt/tackerc.sh; openstack user delete nfv_user
- when: nfvuser_info.stdout.find('nfv') != -1 and inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: get the openstack project info
- shell: . /opt/tackerc.sh; openstack project list
- register: nfvproject_info
-
-- name: delete the existed nfv project
- shell: . /opt/tackerc.sh; openstack project delete $(openstack project list | grep nfv | awk '{print $2}')
- when: nfvproject_info.stdout.find('nfv') != -1 and inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: create an nfv project
- shell: . /opt/tackerc.sh; openstack project create --description "NFV Project" nfv
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: create nfv user with admin privileges
- shell: . /opt/tackerc.sh; openstack user create --password console nfv_user; openstack role add --project nfv --user nfv_user admin;
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: create config.yml
- template: src=config.yaml dest=/opt/config.yaml
-
-- name: check if tacker running
- shell: . /opt/tackerc.sh; while (!(tacker ext-list)); do sleep 30; done
-
-- name: register VIM to tacker
- shell: . /opt/tackerc.sh; tacker vim-register --config-file /opt/config.yaml --description "OpenStack" --name VIM0
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: restart tacker service
- shell: service tacker stop; service tacker start
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/config.yaml b/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/config.yaml
deleted file mode 100644
index 8f73e907..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/config.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-auth_url: 'http://{{ public_vip.ip }}:5000/v2.0'
-username: 'nfv_user'
-password: 'console'
-project_name: 'nfv'
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tacker.j2 b/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tacker.j2
deleted file mode 100644
index 4f186b67..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tacker.j2
+++ /dev/null
@@ -1,426 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = True
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = True
-
-# Where to store Tacker state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/tacker
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-auth_strategy = keystone
-policy_file = /usr/local/etc/tacker/policy.json
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-# log_dir =
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ internal_ip }}
-
-# Port the bind the API server to
-bind_port = 8888
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of tacker.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Tacker core plugin entrypoint to be loaded from the
-# tacker.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the tacker source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-# core_plugin =
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# tacker.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the tacker source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-
-service_plugins = vnfm,nfvo
-
-# Paste configuration file
-# api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-# auth_strategy = keystone
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Tacker is
-# being used in conjunction with nova security groups
-# allow_overlapping_ips = False
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = tacker.openstack.common.rpc.impl_kombu
-# Size of RPC thread pool
-# rpc_thread_pool_size = 64
-# Size of RPC connection pool
-# rpc_conn_pool_size = 30
-# Seconds to wait for a response from call or multicall
-# rpc_response_timeout = 60
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-# rpc_cast_timeout = 30
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = tacker.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = tacker
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# IP address of the RabbitMQ installation
-# rabbit_host = localhost
-# Password of the RabbitMQ server
-# rabbit_password = guest
-# Port where RabbitMQ server is running/listening
-# rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-# rabbit_userid = guest
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-
-# QPID
-# rpc_backend=tacker.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=tacker.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = tacker.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = tacker.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = tacker.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-# default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-# notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-# agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# tacker server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to tacker server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-# api_workers = 0
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-# rpc_workers = 0
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== tacker nova interactions ==========
-# Send notification to nova when port status is active.
-# notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-# notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-# nova_url = http://127.0.0.1:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-# nova_region_name =
-
-# Username for connection to nova in admin context
-# nova_admin_username =
-
-# The uuid of the admin nova tenant
-# nova_admin_tenant_id =
-
-# Password for connection to nova in admin context.
-# nova_admin_password =
-
-# Authorization URL for connection to nova in admin context.
-# nova_admin_auth_url =
-
-# CA file for novaclient to verify server certificates
-# nova_ca_certificates_file =
-
-# Boolean to control ignoring SSL errors on the nova url
-# nova_api_insecure = False
-
-# Number of seconds between sending events to nova if there are any events to send
-# send_events_interval = 2
-
-# ======== end of tacker nova interactions ==========
-
-[agent]
-# Use "sudo tacker-rootwrap /etc/tacker/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = sudo /usr/local/bin/tacker-rootwrap /usr/local/etc/tacker/rootwrap.conf
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-# report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-signing_dir = /var/cache/tacker
-#cafile = /opt/stack/data/ca-bundle.pem
-#project_domain_id = default
-project_name = service
-#user_domain_id = default
-password = console
-username = tacker
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_plugin = password
-identity_uri = http://{{ internal_vip.ip }}:5000/v2.0
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
-
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/tacker
-connection = mysql://tacker:TACKER_DBPASS@{{ internal_vip.ip }}:3306/tacker?charset=utf8
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main tacker server. (Leave it as is if the database runs on this host.)
-# connection = sqlite://
-# NOTE: In deployment the [database] section and its connection attribute may
-# be set in the corresponding core plugin '.ini' file. However, it is suggested
-# to put the [database] section and its connection attribute in this
-# configuration file.
-
-# Database engine for which script will be generated when using offline
-# migration
-# engine =
-
-# The SQLAlchemy connection string used to connect to the slave database
-# slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-# max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-# retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-# min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-# max_pool_size = 10
-
-# Timeout in seconds before idle sql connections are reaped
-# idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-# max_overflow = 20
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-# connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-# connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-# pool_timeout = 10
-
-[tacker]
-# Specify drivers for hosting device
-# infra_driver = heat,nova,noop
-
-# Specify drivers for mgmt
-# mgmt_driver = noop,openwrt
-
-# Specify drivers for monitoring
-# monitor_driver = ping, http_ping
-
-[nfvo_vim]
-# Supported VIM drivers, resource orchestration controllers such as OpenStack, kvm
-#Default VIM driver is OpenStack
-#vim_drivers = openstack
-#Default VIM placement if vim id is not provided
-default_vim = VIM0
-
-[vim_keys]
-#openstack = /etc/tacker/vim/fernet_keys
-[tacker_nova]
-# parameters for novaclient to talk to nova
-region_name = RegionOne
-#project_domain_id = default
-project_name = service
-#user_domain_id = default
-password = console
-username = nova
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_plugin = password
-
-[tacker_heat]
-heat_uri = http://{{ internal_vip.ip }}:8004/v1
-stack_retries = 60
-stack_retry_wait = 5
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tackerc.sh b/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tackerc.sh
deleted file mode 100644
index c673e7f1..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tackerc.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-export LC_ALL=C
-export OS_NO_CACHE=true
-export OS_TENANT_NAME=service
-export OS_PROJECT_NAME=service
-export OS_USERNAME=tacker
-export OS_PASSWORD=console
-export OS_AUTH_URL=http://{{ internal_vip.ip }}:5000/v2.0
-export OS_DEFAULT_DOMAIN=default
-export OS_AUTH_STRATEGY=keystone
-export OS_REGION_NAME=RegionOne
-export TACKER_ENDPOINT_TYPE=internalurl
diff --git a/deploy/adapters/ansible/openstack_mitaka/templates/neutron.conf b/deploy/adapters/ansible/openstack_mitaka/templates/neutron.conf
deleted file mode 100644
index 33231ed5..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/templates/neutron.conf
+++ /dev/null
@@ -1,486 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ VERBOSE }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-notify_nova_on_port_status_changes = True
-notify_nova_on_port_data_changes = True
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ internal_vip.ip }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = RegionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-{% if NOVA_ADMIN_TENANT_ID|default('') %}
-nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
-{% endif %}
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 30
-use_db_reconnect = True
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewllDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
-
-{% if enable_fwaas %}
-[fwaas]
-driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
-enabled = True
-{% endif %}
-
-[nova]
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-project_name = service
-username = nova
-password = {{ NOVA_PASS }}
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/templates/nova.conf b/deploy/adapters/ansible/openstack_mitaka/templates/nova.conf
deleted file mode 100644
index 3a5735cf..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/templates/nova.conf
+++ /dev/null
@@ -1,96 +0,0 @@
-{% set memcached_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-
-[DEFAULT]
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/var/lib/nova/tmp
-force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
-enabled_apis=osapi_compute,metadata
-
-default_floating_pool={{ public_net_info.network }}
-auth_strategy = keystone
-
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-osapi_compute_listen={{ internal_ip }}
-metadata_listen={{ internal_ip }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
-
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-
-memcached_servers = {{ memcached_servers }}
-
-[database]
-# The SQLAlchemy connection string used to connect to the database
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-idle_timeout = 30
-use_db_reconnect = True
-pool_timeout = 10
-
-[api_database]
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova_api
-idle_timeout = 30
-use_db_reconnect = True
-pool_timeout = 10
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/2.0
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-memcached_servers = {{ memcached_servers }}
-
-[glance]
-host = {{ internal_vip.ip }}
-
-[neutron]
-url = http://{{ internal_vip.ip }}:9696
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
-service_metadata_proxy = True
-metadata_proxy_shared_secret = {{ METADATA_SECRET }}
-auth_type = password
-auth_url = http://{{ internal_vip.ip }}:35357
-password = {{ NEUTRON_PASS }}
-username = neutron
-project_domain_name = default
-user_domain_name = default
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/.gitkeep b/deploy/adapters/ansible/openstack_mitaka_xenial/.gitkeep
deleted file mode 100644
index e69de29b..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/.gitkeep
+++ /dev/null
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml
deleted file mode 100644
index ac31b682..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml
+++ /dev/null
@@ -1,265 +0,0 @@
----
-- hosts: all
- remote_user: root
- pre_tasks:
- - name: make sure ssh dir exist
- file:
- path: '{{ item.path }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- state: directory
- mode: 0755
- with_items:
- - path: /root/.ssh
- owner: root
- group: root
-
- - name: write ssh config
- copy:
- content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no"
- dest: '{{ item.dest }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- mode: 0600
- with_items:
- - dest: /root/.ssh/config
- owner: root
- group: root
-
- - name: generate ssh keys
- shell: if [ ! -f ~/.ssh/id_rsa.pub ]; then ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ""; else echo "already gen ssh key!"; fi;
-
- - name: fetch ssh keys
- fetch: src=/root/.ssh/id_rsa.pub dest=/tmp/ssh-keys-{{ ansible_hostname }} flat=yes
-
- - authorized_key:
- user: root
- key: "{{ lookup('file', 'item') }}"
- with_fileglob:
- - /tmp/ssh-keys-*
- max_fail_percentage: 0
- roles:
- - common
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - setup-network
-
-- hosts: ha
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ha
-
-- hosts: controller
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - memcached
- - apache
- - database
- - mq
- - keystone
- - nova-controller
- - neutron-controller
- - cinder-controller
- - glance
- - neutron-common
- - neutron-network
- - ceilometer_controller
-# - ext-network
- - dashboard
- - heat
-# - aodh
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - storage
-
-- hosts: compute
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - nova-compute
- - neutron-compute
- - cinder-volume
- - ceilometer_compute
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - moon
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - secgroup
-
-- hosts: ceph_adm
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles: []
- # - ceph-deploy
-
-- hosts: ceph
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-purge
- - ceph-config
-
-- hosts: ceph_mon
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-mon
-
-- hosts: ceph_osd
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-osd
-
-- hosts: ceph
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-openstack
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - monitor
-
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- tasks:
- - name: set bash to nova
- user:
- name: nova
- shell: /bin/bash
-
- - name: make sure ssh dir exist
- file:
- path: '{{ item.path }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- state: directory
- mode: 0755
- with_items:
- - path: /var/lib/nova/.ssh
- owner: nova
- group: nova
-
- - name: copy ssh keys for nova
- shell: cp -rf /root/.ssh/id_rsa /var/lib/nova/.ssh;
-
- - name: write ssh config
- copy:
- content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no"
- dest: '{{ item.dest }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- mode: 0600
- with_items:
- - dest: /var/lib/nova/.ssh/config
- owner: nova
- group: nova
-
- - authorized_key:
- user: nova
- key: "{{ lookup('file', 'item') }}"
- with_fileglob:
- - /tmp/ssh-keys-*
-
- - name: chown ssh file
- shell: chown -R nova:nova /var/lib/nova/.ssh;
-
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - odl_cluster
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - onos_cluster
-
-- hosts: all
- remote_user: root
- sudo: True
- max_fail_percentage: 0
- roles:
- - open-contrail
-
-- hosts: all
- remote_user: root
- accelerate: true
- serial: 1
- max_fail_percentage: 0
- roles:
- - odl_cluster_neutron
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - odl_cluster_post
-
-- hosts: controller
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ext-network
-
-- hosts: controller
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - boot-recovery
-
-- hosts: controller
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - controller-recovery
-
-- hosts: compute
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - compute-recovery
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/handlers/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/handlers/main.yml
deleted file mode 100644
index b3399e0c..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/handlers/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart aodh services
- service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/aodh_config.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/aodh_config.yml
deleted file mode 100644
index e60d5338..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/aodh_config.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: aodh db sync
- shell: su -s /bin/sh -c "aodh-dbsync" aodh
- notify:
- - restart aodh services
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/aodh_install.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/aodh_install.yml
deleted file mode 100644
index eb51fbea..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/aodh_install.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: install aodh packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: update aodh conf
- template: src={{ item }} dest=/etc/aodh/aodh.conf
- backup=yes
- with_items:
- - aodh.conf.j2
-# - api_paste.ini.j2
-# - policy.json.j2
- notify:
- - restart aodh services
-
-- name: write services to monitor list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: remove default sqlite db
- shell: rm /var/lib/aodh/aodh.sqlite || touch aodh.sqllite.db.removed
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/main.yml
deleted file mode 100644
index 9b61915f..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/main.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include: aodh_install.yml
- tags:
- - install
- - aodh_install
- - aodh
-
-- include: aodh_config.yml
- when: inventory_hostname == groups['controller'][0]
- tags:
- - config
- - aodh_config
- - aodh
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/aodh.conf.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/aodh.conf.j2
deleted file mode 100644
index 752dd0f0..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/aodh.conf.j2
+++ /dev/null
@@ -1,46 +0,0 @@
-{% set memcached_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-
-[DEFAULT]
-bind_host = {{ internal_ip }}
-bind_port = 8042
-rpc_backend = rabbit
-auth_strategy = keystone
-debug = True
-
-[oslo_messaging_rabbit]
-rabbit_hosts = {{ internal_vip.ip }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-#rabbit_use_ssl = false
-
-[database]
-connection = mysql://aodh:{{ AODH_DBPASS }}@{{ db_host }}/aodh
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
-auth_url = http://{{ internal_vip.ip }}:35357
-identity_uri = http://{{ internal_vip.ip }}:35357
-auth_plugin = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = aodh
-password = {{ AODH_PASS }}
-memcached_servers = {{ memcached_servers }}
-token_cache_time = 300
-revocation_cache_time = 60
-
-[service_credentials]
-os_auth_url = http://{{ internal_vip.ip }}:5000/v2.0
-os_username = aodh
-os_tenant_name = service
-os_password = {{ AODH_PASS }}
-os_endpoint_type = internalURL
-os_region_name = RegionOne
-
-[api]
-host = {{ internal_ip }}
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/api_paste.ini.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/api_paste.ini.j2
deleted file mode 100644
index 151789c4..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/api_paste.ini.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-# aodh API WSGI Pipeline
-# Define the filters that make up the pipeline for processing WSGI requests
-# Note: This pipeline is PasteDeploy's term rather than aodh's pipeline
-# used for processing samples
-
-# Remove authtoken from the pipeline if you don't want to use keystone authentication
-[pipeline:main]
-pipeline = cors request_id authtoken api-server
-
-[app:api-server]
-paste.app_factory = aodh.api.app:app_factory
-
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-oslo_config_project = aodh
-
-[filter:request_id]
-paste.filter_factory = oslo_middleware:RequestId.factory
-
-[filter:cors]
-paste.filter_factory = oslo_middleware.cors:filter_factory
-oslo_config_project = aodh
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/policy.json.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/policy.json.j2
deleted file mode 100644
index 4fd873e9..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/policy.json.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-{
- "context_is_admin": "role:admin",
- "segregation": "rule:context_is_admin",
- "admin_or_owner": "rule:context_is_admin or project_id:%(project_id)s",
- "default": "rule:admin_or_owner",
-
- "telemetry:get_alarm": "rule:admin_or_owner",
- "telemetry:get_alarms": "rule:admin_or_owner",
- "telemetry:query_alarm": "rule:admin_or_owner",
-
- "telemetry:create_alarm": "",
- "telemetry:change_alarm": "rule:admin_or_owner",
- "telemetry:delete_alarm": "rule:admin_or_owner",
-
- "telemetry:get_alarm_state": "rule:admin_or_owner",
- "telemetry:change_alarm_state": "rule:admin_or_owner",
-
- "telemetry:alarm_history": "rule:admin_or_owner",
- "telemetry:query_alarm_history": "rule:admin_or_owner"
-}
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/Debian.yml
deleted file mode 100644
index bdf4655e..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/Debian.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-#############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#############################################################################
----
-packages:
- - aodh-api
- - aodh-evaluator
- - aodh-notifier
- - aodh-listener
- - aodh-expirer
- - python-ceilometerclient
-
-services:
- - aodh-api
- - aodh-notifier
- - aodh-evaluator
- - aodh-listener
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/RedHat.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/RedHat.yml
deleted file mode 100644
index a0381c6b..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/RedHat.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-#############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#############################################################################
----
-packages:
- - openstack-aodh-api
- - openstack-aodh-evaluator
- - openstack-aodh-notifier
- - openstack-aodh-listener
- - openstack-aodh-expirer
- - python-ceilometerclient
-
-services:
- - openstack-aodh-api
- - openstack-aodh-notifier
- - openstack-aodh-evaluator
- - openstack-aodh-listener
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/main.yml
deleted file mode 100644
index b17f6ed0..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-##############################################################################
-## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-##
-## All rights reserved. This program and the accompanying materials
-## are made available under the terms of the Apache License, Version 2.0
-## which accompanies this distribution, and is available at
-## http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
----
-packages_noarch: []
-
-services_noarch: []
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/Debian.yml
deleted file mode 100644
index 2a3c3249..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/Debian.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-ceilometer_packages:
- - ceilometer-api
- - ceilometer-collector
- - ceilometer-agent-central
- - ceilometer-agent-notification
-# - ceilometer-alarm-evaluator
-# - ceilometer-alarm-notifier
- - python-ceilometerclient
-
-ceilometer_services:
- - ceilometer-agent-central
- - ceilometer-agent-notification
- - ceilometer-api
- - ceilometer-collector
-# - ceilometer-alarm-evaluator
-# - ceilometer-alarm-notifier
-
-ceilometer_configs_templates:
- - src: ceilometer.j2
- dest:
- - /etc/ceilometer/ceilometer.conf
- - src: glance.j2
- dest:
- - /etc/glance/glance-api.conf
- - /etc/glance/glance-registry.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/RedHat.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/RedHat.yml
deleted file mode 100644
index 6c5f53ec..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/RedHat.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-ceilometer_packages:
- - openstack-ceilometer-api
- - openstack-ceilometer-collector
- - openstack-ceilometer-central
- - openstack-ceilometer-notification
-# - openstack-ceilometer-alarm
- - python-ceilometerclient
-
-ceilometer_services:
- - openstack-ceilometer-central
- - openstack-ceilometer-notification
- - openstack-ceilometer-api
- - openstack-ceilometer-collector
-# - openstack-ceilometer-alarm-evaluator
-# - openstack-ceilometer-alarm-notifier
-
-ceilometer_configs_templates:
- - src: ceilometer.j2
- dest:
- - /etc/ceilometer/ceilometer.conf
- - src: cinder.j2
- dest:
- - /etc/cinder/cinder.conf
- - src: glance.j2
- dest:
- - /etc/glance/glance-api.conf
- - /etc/glance/glance-registry.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/tasks/install_mon.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/tasks/install_mon.yml
deleted file mode 100644
index 1d14c2d2..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/tasks/install_mon.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: Create a default data directory
- file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}" state="directory"
-
-- name: Populate the monitor daemon
- shell: "ceph-mon --mkfs -i {{ inventory_hostname }} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring"
-
-- name: Change ceph/mon dir owner to ceph
- shell: "chown -R ceph:ceph /var/lib/ceph/mon"
- when: ansible_os_family == "Debian"
-
-- name: copy templates
- template:
- src: ceph-mon.service
- dest: /lib/systemd/system/ceph-mon.service
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: Touch the done and auto start file
- file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}/{{ item }}" state="touch"
- with_items:
- - "done"
- - "{{ ceph_start_type }}"
-
-- name: start mon daemon
- shell: "{{ ceph_start_script }}"
-
-- name: wait for creating osd keyring
- wait_for: path=/var/lib/ceph/bootstrap-osd/ceph.keyring
-
-- name: fetch osd keyring
- fetch: src="/var/lib/ceph/bootstrap-osd/ceph.keyring" dest="/tmp/ceph.osd.keyring" flat=yes
- run_once: True
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/vars/Debian.yml
deleted file mode 100644
index a792acad..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/vars/Debian.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-ceph_start_script: "service ceph-mon start"
-ceph_start_type: "systemd"
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/ceph_openstack_post.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/ceph_openstack_post.yml
deleted file mode 100644
index 2097ca57..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/ceph_openstack_post.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-##############################################################################
-## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-##
-## All rights reserved. This program and the accompanying materials
-## are made available under the terms of the Apache License, Version 2.0
-## which accompanies this distribution, and is available at
-## http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
----
-- name: get mount info
- command: mount
- register: mount_info
-
-- name: try unmount image nfs directory
- shell: |
- umount /var/lib/glance/images
- sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
- when: mount_info.stdout.find('images') != -1
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/main.yml
deleted file mode 100644
index 06c3acb6..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/main.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- include_vars: "{{ ansible_os_family }}.yml"
- tags:
- - ceph_deploy
- - ceph_openstack_pre
- - ceph_openstack_conf
- - ceph_openstack_post
- - ceph_openstack
-
-- include: ceph_openstack_pre.yml
- tags:
- - ceph_deploy
- - ceph_openstack_pre
- - ceph_openstack
-
-- include: ceph_openstack_conf.yml
- tags:
- - ceph_deploy
- - ceph_openstack_conf
- - ceph_openstack
-
-- include: ceph_openstack_post.yml
- tags:
- - ceph_deploy
- - ceph_openstack_post
- - ceph_openstack
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/vars/Debian.yml
deleted file mode 100755
index db10bd14..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/vars/Debian.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - ceph-deploy
- - python-flask
- - libgoogle-perftools4
- - libleveldb1v5
- - liblttng-ust0
- - libsnappy1v5
- - librbd1
- - librados2
- - python-ceph
- - ceph
- - ceph-mds
- - ceph-common
- - ceph-fs-common
- - gdisk
-
-services: []
-
-cinder_service: cinder-volume
-nova_service: nova-compute
-glance_service: glance-api
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-osd/tasks/install_osd.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-osd/tasks/install_osd.yml
deleted file mode 100644
index 363e5e6d..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-osd/tasks/install_osd.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: create osd lv and mount it on /var/local/osd
- script: create_osd.sh
-
-- name: fetch osd keyring from ceph_adm
- fetch: src="/var/lib/ceph/bootstrap-osd/ceph.keyring" dest="/tmp/ceph.osd.keyring" flat=yes
- delegate_to: "{{ public_vip.ip }}"
- when: compute_expansion
-
-- name: copy osd keyring
- copy: src="/tmp/ceph.osd.keyring" dest="/var/lib/ceph/bootstrap-osd/ceph.keyring"
-
-- name: prepare osd disk
- shell: ceph-disk prepare --fs-type xfs /var/local/osd
-
-- name: change local/osd dir owner to ceph
- shell: chown -R ceph:ceph /var/local/osd
- when: ansible_os_family == "Debian"
-
-- name: activate osd node
- shell: ceph-disk activate /var/local/osd
-
-- name: enable ceph service
- service: name=ceph enabled=yes
-
-- name: rebuild osd after reboot
- lineinfile: dest=/etc/init/ceph-osd-all-starter.conf insertafter="^task" line="pre-start script\n set -e\n /opt/setup_storage/losetup.sh\n sleep 3\n mount /dev/storage-volumes/ceph0 /var/local/osd\nend script"
- when: ansible_os_family == "Debian"
-
-- name: rebuild osd after reboot for centos
- lineinfile: dest=/etc/init.d/ceph insertafter="^### END INIT INFO" line="\nsleep 1\nmount /dev/storage-volumes/ceph0 /var/local/osd"
- when: ansible_os_family == "RedHat"
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/common/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/common/vars/Debian.yml
deleted file mode 100644
index 3d96123c..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/common/vars/Debian.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - ubuntu-cloud-keyring
- - python-dev
- - openvswitch-switch
- - openvswitch-switch-dpdk
- - python-memcache
- - python-iniparse
- - python-lxml
- #- python-d* #TODO, need remove
-
-pip_packages:
- - crudini
- - python-keyczar
- - yang2tosca
-
-pip_conf: pip.conf
-
-services:
- - ntp
-
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/dashboard/templates/openstack-dashboard.conf.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/dashboard/templates/openstack-dashboard.conf.j2
deleted file mode 100755
index 664af687..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/dashboard/templates/openstack-dashboard.conf.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
-{% if work_threads > 10 %}
-{% set work_threads = 10 %}
-{% endif %}
-
-<VirtualHost {{ internal_ip }}:80>
- WSGIScriptAlias /horizon {{ horizon_dir }}/wsgi/django.wsgi
- WSGIDaemonProcess horizon user=horizon group=horizon processes=4 threads={{ work_threads }}
- WSGIProcessGroup horizon
- Alias /static {{ horizon_dir }}/static/
- Alias /horizon/static {{ horizon_dir }}/static/
- <Directory {{ horizon_dir }}/wsgi>
- Order allow,deny
- Allow from all
- </Directory>
-</VirtualHost>
-
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/dashboard/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/dashboard/vars/Debian.yml
deleted file mode 100644
index aaeb8cdb..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/dashboard/vars/Debian.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages: []
-
-services:
- - memcached
- - apache2
-
-apache_config_dir: /etc/apache2
-horizon_dir: /usr/share/openstack-dashboard/openstack_dashboard
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_cluster_debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_cluster_debian.yml
deleted file mode 100644
index 442cd18b..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_cluster_debian.yml
+++ /dev/null
@@ -1,69 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: get cluster status
- shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}'
- register: cluster_status
- when:
- - inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: start first node to create new cluster
- shell: >
- service mysql bootstrap;
- service mysql start;
- when: |
- inventory_hostname == haproxy_hosts.keys()[0]
- and not cluster_status.stdout | search("OPERATIONAL")
-
-- name: wait for cluster ready
- shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}'
- register: cluster_status
- until: cluster_status|success
- failed_when: not cluster_status.stdout | search("OPERATIONAL")
- retries: 10
- delay: 3
- when: |
- inventory_hostname == haproxy_hosts.keys()[0]
- and not cluster_status.stdout | search("OPERATIONAL")
-
-- name: if I in the cluster nodes
- shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_incoming_addresses"'|awk '{print $2}'
- register: cluster_nodes
- changed_when: false
-
-- name: restart other nodes and join cluster1
- shell: service mysql restart;
- when: |
- inventory_hostname != haproxy_hosts.keys()[0]
- and not cluster_nodes.stdout | search( "{{ internal_ip }}" )
- ignore_errors: True
-
-- name: delay 60 seconds
- shell: sleep 60
-
-- name: restart other nodes and join cluster2
- shell: service mysql restart;
- when: |
- inventory_hostname != haproxy_hosts.keys()[0]
- and not cluster_nodes.stdout | search( "{{ internal_ip }}" )
-
-- name: chmod directory
- shell: >
- chmod 755 -R /var/lib/mysql/ ;
- chmod 755 -R /var/log/mysql/ ;
- chmod 755 -R /etc/mysql/conf.d/;
-
-- name: restart first nodes
- shell: service mysql restart
- when: |
- (inventory_hostname == haproxy_hosts.keys()[0]
- and haproxy_hosts|length > 1
- and not cluster_nodes.stdout | search( '{{ internal_ip }}' ))
-
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_install.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_install.yml
deleted file mode 100644
index 1b08172d..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_install.yml
+++ /dev/null
@@ -1,70 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: change open file limit
- copy:
- content: "* - nofile 65536 }}"
- dest: "/etc/security/limits.conf"
- mode: 0755
-
-- name: install python-mysqldb
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: maridb_packages | union(packages_noarch)
-
-- name: create conf dir for wsrep
- file: path=/etc/my.cnf.d state=directory mode=0755
- when: ansible_os_family == "RedHat"
-
-- name: update mariadb config file
- template:
- src: '{{ item.src }}'
- dest: '{{ item.dest }}'
- backup: yes
- mode: 0644
- with_items: mysql_config
-
-- name: bugfix for rsync version 3.1
- lineinfile:
- dest: /usr/bin/wsrep_sst_rsync
- state: absent
- regexp: '{{ item }}'
- with_items:
- - "\\s*uid = \\$MYUID$"
- - "\\s*gid = \\$MYGID$"
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: set owner
- file: path=/var/lib/mysql owner=mysql group=mysql recurse=yes state=directory mode=0755
-
-- name: get logfile stat
- stat: path='{{ mysql_data_dir }}/ib_logfile0'
- register: logfile_stat
-
-- debug: msg='{{ logfile_stat.stat.exists}}'
-- debug: msg='{{ logfile_stat.stat.size }}'
- when: logfile_stat.stat.exists
-
-- name: rm logfile if exist and size mismatch
- shell: 'rm -rf {{ mysql_data_dir }}/ib_logfile*'
- when: |
- logfile_stat.stat.exists
- and logfile_stat.stat.size != 1073741824
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/templates/data.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/templates/data.j2
deleted file mode 100644
index 66c2fead..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/templates/data.j2
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/sh
-mysql -uroot -Dmysql <<EOF
-drop database if exists keystone;
-drop database if exists glance;
-drop database if exists neutron;
-drop database if exists nova;
-drop database if exists cinder;
-drop database if exists heat;
-drop database if exists aodh;
-
-CREATE DATABASE keystone;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON keystone.* TO 'keystone'@'{{ host }}' IDENTIFIED BY '{{ KEYSTONE_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE glance;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON glance.* TO 'glance'@'{{ host }}' IDENTIFIED BY '{{ GLANCE_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE neutron;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON neutron.* TO 'neutron'@'{{ host }}' IDENTIFIED BY '{{ NEUTRON_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE nova;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON nova.* TO 'nova'@'{{ host }}' IDENTIFIED BY '{{ NOVA_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE cinder;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON cinder.* TO 'cinder'@'{{ host }}' IDENTIFIED BY '{{ CINDER_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE heat;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON heat.* TO 'heat'@'{{ host }}' IDENTIFIED BY '{{ HEAT_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE aodh;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON aodh.* TO 'aodh'@'{{ host }}' IDENTIFIED BY '{{ AODH_DBPASS }}';
-{% endfor %}
-
-{% if WSREP_SST_USER is defined %}
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON *.* TO '{{ WSREP_SST_USER }}'@'{{ host }}' IDENTIFIED BY '{{ WSREP_SST_PASS }}';
-{% endfor %}
-{% endif %}
-EOF
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/Debian.yml
deleted file mode 100644
index 1021524d..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/Debian.yml
+++ /dev/null
@@ -1,55 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-mongodb_packages:
- - mongodb-server
- - mongodb-clients
- - python-pymongo
-
-mysql_packages:
- - python-mysqldb
- - mysql-server
-
-maridb_packages:
- - apt-transport-https
- - debconf-utils
- - libaio1
- - libc6
- - libdbd-mysql-perl
- - libgcc1
- - libgcrypt20
- - libstdc++6
- - python-software-properties
- - mariadb-client
- - galera-3
- - rsync
- - socat
- - mariadb-galera-server-10.0
- - python-mysqldb
-
-pip_packages: []
-
-services: []
-
-mongodb_service: mongodb
-mysql_config:
- - dest: /etc/mysql/my.cnf
- src: my.cnf
- - dest: /etc/mysql/conf.d/wsrep.cnf
- src: wsrep.cnf
-
-mysql_config_dir: /etc/mysql/conf.d
-mysql_data_dir: /var/lib/mysql
-
-mongodb_config:
- dest: /etc/mongodb.conf
- src: mongodb.conf
- journal: /var/lib/mongodb/journal/*
-
-wsrep_provider_file: "/usr/lib/galera/libgalera_smm.so"
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/main.yml
deleted file mode 100644
index a32897f0..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/main.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch: []
-
-services_noarch:
- - mysql
-
-credentials:
- - user: keystone
- db: keystone
- password: "{{ KEYSTONE_DBPASS }}"
- - user: neutron
- db: neutron
- password: "{{ NEUTRON_DBPASS }}"
- - user: glance
- db: glance
- password: "{{ GLANCE_DBPASS }}"
- - user: nova
- db: nova_api
- password: "{{ NOVA_DBPASS }}"
- - user: nova
- db: nova
- password: "{{ NOVA_DBPASS }}"
- - user: cinder
- db: cinder
- password: "{{ CINDER_DBPASS }}"
- - user: heat
- db: heat
- password: "{{ HEAT_DBPASS }}"
- - user: aodh
- db: aodh
- password: "{{ AODH_DBPASS }}"
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/handlers/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/handlers/main.yml
deleted file mode 100644
index 36e39072..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/handlers/main.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart neutron-plugin-openvswitch-agent
- service: name=neutron-openvswitch-agent state=restarted enabled=yes
- when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: restart neutron-l3-agent
- service: name=neutron-l3-agent state=restarted enabled=yes
-
-- name: kill dnsmasq
- command: killall dnsmasq
- ignore_errors: True
-
-- name: restart neutron-dhcp-agent
- service: name=neutron-dhcp-agent state=restarted enabled=yes
-
-- name: restart neutron-metadata-agent
- service: name=neutron-metadata-agent state=restarted enabled=yes
-
-- name: restart xorp
- service: name=xorp state=restarted enabled=yes sleep=10
- ignore_errors: True
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/tasks/main.yml
deleted file mode 100644
index a8bce16e..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/tasks/main.yml
+++ /dev/null
@@ -1,54 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-# FIXME: temporary workaround for openstack api access random failure
-- name: restart api server
- service: name={{ item }} state=restarted enabled=yes
- with_items: api_services | union(api_services_noarch)
- ignore_errors: True
-
-- name: restart neutron server
- service: name=neutron-server state=restarted enabled=yes
-
-- name: create external net
- neutron_network:
- login_username: ADMIN
- login_password: "{{ ADMIN_PASS }}"
- login_tenant_name: admin
- auth_url: "http://{{ internal_vip.ip }}:35357/v2.0"
- name: "{{ public_net_info.network }}"
- provider_network_type: "{{ public_net_info.type }}"
- provider_physical_network: "{{ public_net_info.provider_network }}"
- provider_segmentation_id: "{{ public_net_info.segment_id}}"
- shared: false
- router_external: yes
- state: present
- run_once: true
- when: 'public_net_info.enable == True'
-
-- name: create external subnet
- neutron_subnet:
- login_username: ADMIN
- login_password: "{{ ADMIN_PASS }}"
- login_tenant_name: admin
- auth_url: "http://{{ internal_vip.ip }}:35357/v2.0"
- name: "{{ public_net_info.subnet }}"
- network_name: "{{ public_net_info.network }}"
- cidr: "{{ public_net_info.floating_ip_cidr }}"
- enable_dhcp: "{{ public_net_info.enable_dhcp }}"
- no_gateway: "{{ public_net_info.no_gateway }}"
- gateway_ip: "{{ public_net_info.external_gw }}"
- allocation_pool_start: "{{ public_net_info.floating_ip_start }}"
- allocation_pool_end: "{{ public_net_info.floating_ip_end }}"
- state: present
- run_once: true
- when: 'public_net_info.enable == True'
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/vars/Debian.yml
deleted file mode 100644
index 0b5c78b6..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/vars/Debian.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-api_services:
- - nova-api
- - glance-api
- - ceilometer-api
- - heat-api
- - heat-api-cfn
- - aodh-api
- - cinder-api
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/vars/RedHat.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/vars/RedHat.yml
deleted file mode 100644
index 886401fd..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/vars/RedHat.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-api_services:
- - openstack-nova-api
- - openstack-glance-api
- - openstack-ceilometer-api
- - openstack-heat-api
- - openstack-heat-api-cfn
- - openstack-cinder-api
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/vars/main.yml
deleted file mode 100644
index b19b6ebf..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/vars/main.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-api_services_noarch: []
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/tasks/nfs.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/tasks/nfs.yml
deleted file mode 100644
index 9dc72e31..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/tasks/nfs.yml
+++ /dev/null
@@ -1,68 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: install nfs packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: nfs_packages
-
-- name: install nfs
- local_action: yum name={{ item }} state=present
- with_items:
- - rpcbind
- - nfs-utils
- run_once: True
-
-- name: create image directory
- local_action: file path=/opt/images state=directory mode=0777
- run_once: True
-
-- name: remove nfs config item if exist
- local_action: lineinfile dest=/etc/exports state=absent
- regexp="^/opt/images"
- run_once: True
-
-- name: update nfs config
- local_action: lineinfile dest=/etc/exports state=present
- line="/opt/images *(rw,insecure,sync,all_squash)"
- run_once: True
-
-- name: restart compass nfs service
- local_action: service name={{ item }} state=restarted enabled=yes
- with_items:
- - rpcbind
- - nfs-server
- run_once: True
-
-- name: get mount info
- command: mount
- register: mount_info
- tags:
- - recovery
-
-- name: get nfs server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: ip_info
- tags:
- - recovery
-
-- name: restart host nfs service
- service: name={{ item }} state=restarted enabled=yes
- with_items: '{{ nfs_services }}'
-
-- name: mount image directory
- shell: |
- mkdir -p /var/lib/glance/images
- mount -t nfs -onfsvers=3 {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images
- sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
- #echo {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab
- when: mount_info.stdout.find('images') == -1
- retries: 5
- delay: 3
- tags:
- - recovery
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/Debian.yml
deleted file mode 100644
index d1825012..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/Debian.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - glance
- - nfs-common
-
-nfs_packages:
- - nfs-common
-
-nfs_services: []
-
-services:
- - glance-registry
- - glance-api
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/RedHat.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/RedHat.yml
deleted file mode 100644
index 2987d0c4..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/RedHat.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - openstack-glance
- - rpcbind
-
-nfs_packages:
- - nfs-utils
- - rpcbind
-
-nfs_services:
- - rpcbind
-
-services:
- - openstack-glance-api
- - openstack-glance-registry
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ha/templates/haproxy.cfg b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ha/templates/haproxy.cfg
deleted file mode 100644
index c0a0747d..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ha/templates/haproxy.cfg
+++ /dev/null
@@ -1,216 +0,0 @@
-
-global
- #chroot /var/run/haproxy
- daemon
- user haproxy
- group haproxy
- maxconn 4000
- pidfile /var/run/haproxy/haproxy.pid
- #log 127.0.0.1 local0
- tune.bufsize 1000000
- stats socket /var/run/haproxy.sock
- stats timeout 2m
-
-defaults
- log global
- maxconn 8000
- option redispatch
- option dontlognull
- option splice-auto
- timeout http-request 10s
- timeout queue 1m
- timeout connect 10s
- timeout client 50s
- timeout server 50s
- timeout check 10s
- retries 3
-
-listen proxy-mysql
- bind {{ internal_vip.ip }}:3306
- option tcpka
- option tcplog
- balance source
-{% for host, ip in haproxy_hosts.items() %}
-{% if loop.index == 1 %}
- server {{ host }} {{ ip }}:3306 weight 1 check inter 2000 rise 2 fall 5
-{% else %}
- server {{ host }} {{ ip }}:3306 weight 1 check inter 2000 rise 2 fall 5 backup
-{% endif %}
-{% endfor %}
-
-listen proxy-rabbit
- bind {{ internal_vip.ip }}:5672
- bind {{ public_vip.ip }}:5672
-
- option tcpka
- option tcplog
- timeout client 3h
- timeout server 3h
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:5672 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-glance_registry_cluster
- bind {{ internal_vip.ip }}:9191
- bind {{ public_vip.ip }}:9191
- option tcpka
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:9191 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-glance_api_cluster
- bind {{ internal_vip.ip }}:9292
- bind {{ public_vip.ip }}:9292
- option tcpka
- option tcplog
- option httpchk
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:9292 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-nova-novncproxy
- bind {{ internal_vip.ip }}:6080
- bind {{ public_vip.ip }}:6080
- option tcpka
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:6080 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-network
- bind {{ internal_vip.ip }}:9696
- bind {{ public_vip.ip }}:9696
- option tcpka
- option tcplog
- balance source
- option httpchk
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:9696 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-volume
- bind {{ internal_vip.ip }}:8776
- bind {{ public_vip.ip }}:8776
- option tcpka
- option httpchk
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-keystone_admin_cluster
- bind {{ internal_vip.ip }}:35357
- bind {{ public_vip.ip }}:35357
- option tcpka
- option httpchk
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:35357 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-keystone_public_internal_cluster
- bind {{ internal_vip.ip }}:5000
- bind {{ public_vip.ip }}:5000
- option tcpka
- option httpchk
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:5000 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-nova_compute_api_cluster
- bind {{ internal_vip.ip }}:8774
- bind {{ public_vip.ip }}:8774
- mode tcp
- option httpchk
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8774 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-nova_metadata_api_cluster
- bind {{ internal_vip.ip }}:8775
- bind {{ public_vip.ip }}:8775
- option tcpka
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8775 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-cinder_api_cluster
- bind {{ internal_vip.ip }}:8776
- bind {{ public_vip.ip }}:8776
- mode tcp
- option httpchk
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-#listen proxy-swift-proxy
-# bind {{ internal_vip.ip }}:8080
-# bind {{ public_vip.ip }}:8080
-# balance source
-# option tcpka
-# option tcplog
-#{% for host,ip in haproxy_hosts.items() %}
-# server {{ host }} {{ ip }}:8080 weight 1 check inter 2000 rise 2 fall 5
-#{% endfor %}
-
-listen proxy-ceilometer_api_cluster
- bind {{ internal_vip.ip }}:8777
- bind {{ public_vip.ip }}:8777
- mode tcp
- option tcp-check
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8777 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-aodh_api_cluster
- bind {{ internal_vip.ip }}:8042
- bind {{ public_vip.ip }}:8042
- mode tcp
- option tcp-check
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8042 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-dashboarad
- bind {{ public_vip.ip }}:80
- mode http
- balance source
- capture cookie vgnvisitor= len 32
- cookie SERVERID insert indirect nocache
- option forwardfor
- option httpchk
- option httpclose
- rspidel ^Set-cookie:\ IP=
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:80 cookie {{ host }} weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen stats
- mode http
- bind 0.0.0.0:9999
- stats enable
- stats refresh 30s
- stats uri /
- stats realm Global\ statistics
- stats auth admin:admin
-
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/heat/tasks/heat_install.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/heat/tasks/heat_install.yml
deleted file mode 100644
index b90e6402..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/heat/tasks/heat_install.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: install heat related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: generate heat service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-# '
-
-- name: create heat user domain
- shell: >
- . /opt/admin-openrc-v3.sh;
- openstack domain create --description "Stack projects and users" heat;
- openstack user create --domain heat --password {{ HEAT_PASS }} heat_domain_admin;
- openstack role add --domain heat --user-domain heat --user heat_domain_admin admin;
- openstack role create heat_stack_owner;
- openstack role add --project demo --user demo heat_stack_owner;
- when: inventory_hostname == groups['controller'][0]
-
-- name: update heat conf
- template: src=heat.j2
- dest=/etc/heat/heat.conf
- backup=yes
- notify:
- - restart heat service
- - remove heat-sqlite-db
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/heat/templates/heat.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/heat/templates/heat.j2
deleted file mode 100644
index 62df9fd9..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/heat/templates/heat.j2
+++ /dev/null
@@ -1,28 +0,0 @@
-[DEFAULT]
-heat_metadata_server_url = http://{{ internal_vip.ip }}:8000
-heat_waitcondition_server_url = http://{{ internal_vip.ip }}:8000/v1/waitcondition
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-log_dir = /var/log/heat
-stack_domain_admin = heat_domain_admin
-stack_domain_admin_password = {{ HEAT_PASS }}
-stack_user_domain_name = heat
-
-[database]
-connection = mysql://heat:{{ HEAT_DBPASS }}@{{ db_host }}/heat
-idle_timeout = 30
-use_db_reconnect = True
-pool_timeout = 10
-
-[ec2authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = heat
-admin_password = {{ HEAT_PASS }}
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/tasks/keystone_install.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/tasks/keystone_install.yml
deleted file mode 100644
index 79d02729..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/tasks/keystone_install.yml
+++ /dev/null
@@ -1,98 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install keystone packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: disable boot auto start
- file:
- path={{ item }}
- state=absent
- with_items:
- - /etc/init.d/keystone
- - /etc/init/keystone.conf
- - /lib/systemd/system/keystone.service
- when: ansible_os_family == "Debian"
-
-- name: generate keystone service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: delete sqlite database
- file:
- path: /var/lib/keystone/keystone.db
- state: absent
-
-- name: update keystone conf
- template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
- notify:
- - restart keystone services
-
-- name: assure listen port exist
- lineinfile:
- dest: '{{ apache_config_dir }}/ports.conf'
- regexp: '{{ item.regexp }}'
- line: '{{ item.line}}'
- with_items:
- - regexp: "^Listen {{ internal_ip }}:5000"
- line: "Listen {{ internal_ip }}:5000"
- - regexp: "^Listen {{ internal_ip }}:35357"
- line: "Listen {{ internal_ip }}:35357"
- notify:
- - restart keystone services
-
-- name: update apache2 configs
- template:
- src: wsgi-keystone.conf.j2
- dest: '{{ apache_config_dir }}/sites-available/wsgi-keystone.conf'
- when: ansible_os_family == 'Debian'
- notify:
- - restart keystone services
-
-- name: update apache2 configs
- template:
- src: wsgi-keystone.conf.j2
- dest: '{{ apache_config_dir }}/wsgi-keystone.conf'
- when: ansible_os_family == 'RedHat'
- notify:
- - restart keystone services
-
-- name: enable keystone server
- file:
- src: "{{ apache_config_dir }}/sites-available/wsgi-keystone.conf"
- dest: "{{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf"
- state: "link"
- when: ansible_os_family == 'Debian'
- notify:
- - restart keystone services
-
-- name: keystone source files
- template: src={{ item }} dest=/opt/{{ item }}
- with_items:
- - admin-openrc.sh
- - demo-openrc.sh
- - admin-openrc-v3.sh
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/templates/wsgi-keystone.conf.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/templates/wsgi-keystone.conf.j2
deleted file mode 100644
index 55c89839..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/templates/wsgi-keystone.conf.j2
+++ /dev/null
@@ -1,50 +0,0 @@
-{% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
-{% if work_threads > 10 %}
-{% set work_threads = 10 %}
-{% endif %}
-
-<VirtualHost {{ internal_ip }}:5000>
- WSGIDaemonProcess keystone-public processes=4 threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
- WSGIProcessGroup keystone-public
- WSGIScriptAlias / /usr/bin/keystone-wsgi-public
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- <IfVersion >= 2.4>
- ErrorLogFormat "%{cu}t %M"
- </IfVersion>
- ErrorLog /var/log/{{ http_service_name }}/keystone.log
- CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined
-
- <Directory /usr/bin>
- <IfVersion >= 2.4>
- Require all granted
- </IfVersion>
- <IfVersion < 2.4>
- Order allow,deny
- Allow from all
- </IfVersion>
- </Directory>
-</VirtualHost>
-
-<VirtualHost {{ internal_ip }}:35357>
- WSGIDaemonProcess keystone-admin processes=4 threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
- WSGIProcessGroup keystone-admin
- WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- <IfVersion >= 2.4>
- ErrorLogFormat "%{cu}t %M"
- </IfVersion>
- ErrorLog /var/log/{{ http_service_name }}/keystone.log
- CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined
-
- <Directory /usr/bin>
- <IfVersion >= 2.4>
- Require all granted
- </IfVersion>
- <IfVersion < 2.4>
- Order allow,deny
- Allow from all
- </IfVersion>
- </Directory>
-</VirtualHost>
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/Debian.yml
deleted file mode 100644
index 6000c6fd..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/Debian.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-cron_path: "/var/spool/cron/crontabs"
-
-packages:
- - keystone
- - apache2
- - libapache2-mod-wsgi
- - python-keystone
- - python-openstackclient
-
-services:
- - apache2
-
-apache_config_dir: /etc/apache2
-http_service_name: apache2
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/main.yml
deleted file mode 100644
index b4d3d7e0..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/main.yml
+++ /dev/null
@@ -1,179 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch:
- - python-keystoneclient
-
-services_noarch: []
-os_services:
- - name: keystone
- type: identity
- region: RegionOne
- description: "OpenStack Identity"
- publicurl: "http://{{ public_vip.ip }}:5000/v2.0"
- internalurl: "http://{{ internal_vip.ip }}:5000/v2.0"
- adminurl: "http://{{ internal_vip.ip }}:35357/v2.0"
-
- - name: glance
- type: image
- region: RegionOne
- description: "OpenStack Image Service"
- publicurl: "http://{{ public_vip.ip }}:9292"
- internalurl: "http://{{ internal_vip.ip }}:9292"
- adminurl: "http://{{ internal_vip.ip }}:9292"
-
- - name: nova
- type: compute
- region: RegionOne
- description: "OpenStack Compute"
- publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
-
- - name: neutron
- type: network
- region: RegionOne
- description: "OpenStack Networking"
- publicurl: "http://{{ public_vip.ip }}:9696"
- internalurl: "http://{{ internal_vip.ip }}:9696"
- adminurl: "http://{{ internal_vip.ip }}:9696"
-
- - name: ceilometer
- type: metering
- region: RegionOne
- description: "OpenStack Telemetry"
- publicurl: "http://{{ public_vip.ip }}:8777"
- internalurl: "http://{{ internal_vip.ip }}:8777"
- adminurl: "http://{{ internal_vip.ip }}:8777"
-
- - name: aodh
- type: alarming
- region: RegionOne
- description: "OpenStack Telemetry"
- publicurl: "http://{{ public_vip.ip }}:8042"
- internalurl: "http://{{ internal_vip.ip }}:8042"
- adminurl: "http://{{ internal_vip.ip }}:8042"
-
- - name: cinder
- type: volume
- region: RegionOne
- description: "OpenStack Block Storage"
- publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
-
- - name: cinderv2
- type: volumev2
- region: RegionOne
- description: "OpenStack Block Storage v2"
- publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
-
- - name: heat
- type: orchestration
- region: RegionOne
- description: "OpenStack Orchestration"
- publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
-
- - name: heat-cfn
- type: cloudformation
- region: RegionOne
- description: "OpenStack CloudFormation Orchestration"
- publicurl: "http://{{ public_vip.ip }}:8000/v1"
- internalurl: "http://{{ internal_vip.ip }}:8000/v1"
- adminurl: "http://{{ internal_vip.ip }}:8000/v1"
-
-# - name: swift
-# type: object-store
-# region: RegionOne
-# description: "OpenStack Object Storage"
-# publicurl: "http://{{ public_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-# internalurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-# adminurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-
-os_users:
- - user: admin
- password: "{{ ADMIN_PASS }}"
- email: admin@admin.com
- role: admin
- tenant: admin
- tenant_description: "Admin Tenant"
-
- - user: glance
- password: "{{ GLANCE_PASS }}"
- email: glance@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: nova
- password: "{{ NOVA_PASS }}"
- email: nova@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: keystone
- password: "{{ KEYSTONE_PASS }}"
- email: keystone@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: neutron
- password: "{{ NEUTRON_PASS }}"
- email: neutron@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: ceilometer
- password: "{{ CEILOMETER_PASS }}"
- email: ceilometer@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: cinder
- password: "{{ CINDER_PASS }}"
- email: cinder@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: aodh
- password: "{{ AODH_PASS }}"
- email: aodh@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: heat
- password: "{{ HEAT_PASS }}"
- email: heat@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: demo
- password: "{{ DEMO_PASS }}"
- email: heat@demo.com
- role: heat_stack_user
- tenant: demo
- tenant_description: "Demo Tenant"
-
-# - user: swift
-# password: "{{ CINDER_PASS }}"
-# email: swift@admin.com
-# role: admin
-# tenant: service
-# tenant_description: "Service Tenant"
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/files/controllers.py b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/files/controllers.py
deleted file mode 100644
index f6ec8be5..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/files/controllers.py
+++ /dev/null
@@ -1,1062 +0,0 @@
-# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
-# This software is distributed under the terms and conditions of
-# the 'Apache-2.0' license which can be found in the file 'LICENSE' in this
-# package distribution or at 'http://www.apache.org/licenses/LICENSE-2.0'.
-
-from keystone.common import controller
-from keystone import config
-from keystone import exception
-from keystone.models import token_model
-from keystone.contrib.moon.exception import * # noqa: F403
-from oslo_log import log
-from uuid import uuid4
-import requests
-
-
-CONF = config.CONF
-LOG = log.getLogger(__name__)
-
-
-@dependency.requires('configuration_api') # noqa: F405
-class Configuration(controller.V3Controller):
- collection_name = 'configurations'
- member_name = 'configuration'
-
- def __init__(self):
- super(Configuration, self).__init__()
-
- def _get_user_id_from_token(self, token_id):
- response = self.token_provider_api.validate_token(token_id)
- token_ref = token_model.KeystoneToken(
- token_id=token_id, token_data=response)
- return token_ref.get('user')
-
- @controller.protected()
- def get_policy_templates(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- return self.configuration_api.get_policy_templates_dict(user_id)
-
- @controller.protected()
- def get_aggregation_algorithms(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- return self.configuration_api.get_aggregation_algorithms_dict(user_id)
-
- @controller.protected()
- def get_sub_meta_rule_algorithms(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- return self.configuration_api.get_sub_meta_rule_algorithms_dict(
- user_id)
-
-
-@dependency.requires('tenant_api', 'resource_api') # noqa: F405
-class Tenants(controller.V3Controller):
-
- def __init__(self):
- super(Tenants, self).__init__()
-
- def _get_user_id_from_token(self, token_id):
- response = self.token_provider_api.validate_token(token_id)
- token_ref = token_model.KeystoneToken(
- token_id=token_id, token_data=response)
- return token_ref.get('user')
-
- @controller.protected()
- def get_tenants(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- return self.tenant_api.get_tenants_dict(user_id)
-
- def __get_keystone_tenant_dict(
- self, tenant_id="", tenant_name="", tenant_description="", domain="default"): # noqa
- tenants = self.resource_api.list_projects()
- for tenant in tenants:
- if tenant_id and tenant_id == tenant['id']:
- return tenant
- if tenant_name and tenant_name == tenant['name']:
- return tenant
- if not tenant_id:
- tenant_id = uuid4().hex
- if not tenant_name:
- tenant_name = tenant_id
- tenant = {
- "id": tenant_id,
- "name": tenant_name,
- "description": tenant_description,
- "enabled": True,
- "domain_id": domain
- }
- keystone_tenant = self.resource_api.create_project(
- tenant["id"], tenant)
- return keystone_tenant
-
- @controller.protected()
- def add_tenant(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- k_tenant_dict = self.__get_keystone_tenant_dict(
- tenant_name=kw.get('tenant_name'),
- tenant_description=kw.get(
- 'tenant_description', kw.get('tenant_name')),
- domain=kw.get('tenant_domain', "default"),
-
- )
- tenant_dict = dict()
- tenant_dict['id'] = k_tenant_dict['id']
- tenant_dict['name'] = kw.get('tenant_name', None)
- tenant_dict['description'] = kw.get('tenant_description', None)
- tenant_dict['intra_authz_extension_id'] = kw.get(
- 'tenant_intra_authz_extension_id', None)
- tenant_dict['intra_admin_extension_id'] = kw.get(
- 'tenant_intra_admin_extension_id', None)
- return self.tenant_api.add_tenant_dict(
- user_id, tenant_dict['id'], tenant_dict)
-
- @controller.protected()
- def get_tenant(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- tenant_id = kw.get('tenant_id', None)
- return self.tenant_api.get_tenant_dict(user_id, tenant_id)
-
- @controller.protected()
- def del_tenant(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- tenant_id = kw.get('tenant_id', None)
- return self.tenant_api.del_tenant(user_id, tenant_id)
-
- @controller.protected()
- def set_tenant(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- # Next line will raise an error if tenant doesn't exist
- k_tenant_dict = self.resource_api.get_project(
- kw.get('tenant_id', None))
- tenant_id = kw.get('tenant_id', None)
- tenant_dict = dict()
- tenant_dict['name'] = k_tenant_dict.get('name', None)
- if 'tenant_description' in kw:
- tenant_dict['description'] = kw.get('tenant_description', None)
- if 'tenant_intra_authz_extension_id' in kw:
- tenant_dict['intra_authz_extension_id'] = kw.get(
- 'tenant_intra_authz_extension_id', None)
- if 'tenant_intra_admin_extension_id' in kw:
- tenant_dict['intra_admin_extension_id'] = kw.get(
- 'tenant_intra_admin_extension_id', None)
- self.tenant_api.set_tenant_dict(user_id, tenant_id, tenant_dict)
-
-
-def callback(self, context, prep_info, *args, **kwargs):
- token_ref = ""
- if context.get('token_id') is not None:
- token_ref = token_model.KeystoneToken(
- token_id=context['token_id'],
- token_data=self.token_provider_api.validate_token(
- context['token_id']))
- if not token_ref:
- raise exception.Unauthorized
-
-
-@dependency.requires('authz_api') # noqa: F405
-class Authz_v3(controller.V3Controller):
-
- def __init__(self):
- super(Authz_v3, self).__init__()
-
- @controller.protected(callback)
- def get_authz(self, context, tenant_id, subject_k_id,
- object_name, action_name):
- try:
- return self.authz_api.authz(
- tenant_id, subject_k_id, object_name, action_name)
- except Exception as e:
- return {'authz': False, 'comment': unicode(e)}
-
-
-@dependency.requires('admin_api', 'root_api') # noqa: F405
-class IntraExtensions(controller.V3Controller):
- collection_name = 'intra_extensions'
- member_name = 'intra_extension'
-
- def __init__(self):
- super(IntraExtensions, self).__init__()
-
- def _get_user_id_from_token(self, token_id):
- response = self.token_provider_api.validate_token(token_id)
- token_ref = token_model.KeystoneToken(
- token_id=token_id, token_data=response)
- return token_ref.get('user')['id']
-
- # IntraExtension functions
- @controller.protected()
- def get_intra_extensions(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- return self.admin_api.get_intra_extensions_dict(user_id)
-
- @controller.protected()
- def add_intra_extension(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_dict = dict()
- intra_extension_dict['name'] = kw.get('intra_extension_name', None)
- intra_extension_dict['model'] = kw.get('intra_extension_model', None)
- intra_extension_dict['genre'] = kw.get('intra_extension_genre', None)
- intra_extension_dict['description'] = kw.get(
- 'intra_extension_description', None)
- intra_extension_dict['subject_categories'] = kw.get(
- 'intra_extension_subject_categories', dict())
- intra_extension_dict['object_categories'] = kw.get(
- 'intra_extension_object_categories', dict())
- intra_extension_dict['action_categories'] = kw.get(
- 'intra_extension_action_categories', dict())
- intra_extension_dict['subjects'] = kw.get(
- 'intra_extension_subjects', dict())
- intra_extension_dict['objects'] = kw.get(
- 'intra_extension_objects', dict())
- intra_extension_dict['actions'] = kw.get(
- 'intra_extension_actions', dict())
- intra_extension_dict['subject_scopes'] = kw.get(
- 'intra_extension_subject_scopes', dict())
- intra_extension_dict['object_scopes'] = kw.get(
- 'intra_extension_object_scopes', dict())
- intra_extension_dict['action_scopes'] = kw.get(
- 'intra_extension_action_scopes', dict())
- intra_extension_dict['subject_assignments'] = kw.get(
- 'intra_extension_subject_assignments', dict())
- intra_extension_dict['object_assignments'] = kw.get(
- 'intra_extension_object_assignments', dict())
- intra_extension_dict['action_assignments'] = kw.get(
- 'intra_extension_action_assignments', dict())
- intra_extension_dict['aggregation_algorithm'] = kw.get(
- 'intra_extension_aggregation_algorithm', dict())
- intra_extension_dict['sub_meta_rules'] = kw.get(
- 'intra_extension_sub_meta_rules', dict())
- intra_extension_dict['rules'] = kw.get('intra_extension_rules', dict())
- ref = self.admin_api.load_intra_extension_dict(
- user_id, intra_extension_dict=intra_extension_dict)
- return self.admin_api.populate_default_data(ref)
-
- @controller.protected()
- def get_intra_extension(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_intra_extension_dict(
- user_id, intra_extension_id)
-
- @controller.protected()
- def del_intra_extension(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- self.admin_api.del_intra_extension(user_id, intra_extension_id)
-
- @controller.protected()
- def set_intra_extension(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- intra_extension_dict = dict()
- intra_extension_dict['name'] = kw.get('intra_extension_name', None)
- intra_extension_dict['model'] = kw.get('intra_extension_model', None)
- intra_extension_dict['genre'] = kw.get('intra_extension_genre', None)
- intra_extension_dict['description'] = kw.get(
- 'intra_extension_description', None)
- return self.admin_api.set_intra_extension_dict(
- user_id, intra_extension_id, intra_extension_dict)
-
- @controller.protected()
- def load_root_intra_extension(self, context, **kw):
- self.root_api.load_root_intra_extension_dict()
-
- # Metadata functions
- @controller.protected()
- def get_subject_categories(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_subject_categories_dict(
- user_id, intra_extension_id)
-
- @controller.protected()
- def add_subject_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_dict = dict()
- subject_category_dict['name'] = kw.get('subject_category_name', None)
- subject_category_dict['description'] = kw.get(
- 'subject_category_description', None)
- return self.admin_api.add_subject_category_dict(
- user_id, intra_extension_id, subject_category_dict)
-
- @controller.protected()
- def get_subject_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- return self.admin_api.get_subject_category_dict(
- user_id, intra_extension_id, subject_category_id)
-
- @controller.protected()
- def del_subject_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- self.admin_api.del_subject_category(
- user_id, intra_extension_id, subject_category_id)
-
- @controller.protected()
- def set_subject_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- subject_category_dict = dict()
- subject_category_dict['name'] = kw.get('subject_category_name', None)
- subject_category_dict['description'] = kw.get(
- 'subject_category_description', None)
- return self.admin_api.set_subject_category_dict(
- user_id, intra_extension_id, subject_category_id, subject_category_dict) # noqa
-
- @controller.protected()
- def get_object_categories(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_object_categories_dict(
- user_id, intra_extension_id)
-
- @controller.protected()
- def add_object_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_dict = dict()
- object_category_dict['name'] = kw.get('object_category_name', None)
- object_category_dict['description'] = kw.get(
- 'object_category_description', None)
- return self.admin_api.add_object_category_dict(
- user_id, intra_extension_id, object_category_dict)
-
- @controller.protected()
- def get_object_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- return self.admin_api.get_object_categories_dict(
- user_id, intra_extension_id, object_category_id)
-
- @controller.protected()
- def del_object_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- self.admin_api.del_object_category(
- user_id, intra_extension_id, object_category_id)
-
- @controller.protected()
- def set_object_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- object_category_dict = dict()
- object_category_dict['name'] = kw.get('object_category_name', None)
- object_category_dict['description'] = kw.get(
- 'object_category_description', None)
- return self.admin_api.set_object_category_dict(
- user_id, intra_extension_id, object_category_id, object_category_dict) # noqa
-
- @controller.protected()
- def get_action_categories(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_action_categories_dict(
- user_id, intra_extension_id)
-
- @controller.protected()
- def add_action_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_dict = dict()
- action_category_dict['name'] = kw.get('action_category_name', None)
- action_category_dict['description'] = kw.get(
- 'action_category_description', None)
- return self.admin_api.add_action_category_dict(
- user_id, intra_extension_id, action_category_dict)
-
- @controller.protected()
- def get_action_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- return self.admin_api.get_action_categories_dict(
- user_id, intra_extension_id, action_category_id)
-
- @controller.protected()
- def del_action_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- self.admin_api.del_action_category(
- user_id, intra_extension_id, action_category_id)
-
- @controller.protected()
- def set_action_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- action_category_dict = dict()
- action_category_dict['name'] = kw.get('action_category_name', None)
- action_category_dict['description'] = kw.get(
- 'action_category_description', None)
- return self.admin_api.set_action_category_dict(
- user_id, intra_extension_id, action_category_id, action_category_dict) # noqa
-
- # Perimeter functions
- @controller.protected()
- def get_subjects(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_subjects_dict(user_id, intra_extension_id)
-
- @controller.protected()
- def add_subject(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_dict = dict()
- subject_dict['name'] = kw.get('subject_name', None)
- subject_dict['description'] = kw.get('subject_description', None)
- subject_dict['password'] = kw.get('subject_password', None)
- subject_dict['email'] = kw.get('subject_email', None)
- return self.admin_api.add_subject_dict(
- user_id, intra_extension_id, subject_dict)
-
- @controller.protected()
- def get_subject(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_id = kw.get('subject_id', None)
- return self.admin_api.get_subject_dict(
- user_id, intra_extension_id, subject_id)
-
- @controller.protected()
- def del_subject(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_id = kw.get('subject_id', None)
- self.admin_api.del_subject(user_id, intra_extension_id, subject_id)
-
- @controller.protected()
- def set_subject(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_id = kw.get('subject_id', None)
- subject_dict = dict()
- subject_dict['name'] = kw.get('subject_name', None)
- subject_dict['description'] = kw.get('subject_description', None)
- return self.admin_api.set_subject_dict(
- user_id, intra_extension_id, subject_id, subject_dict)
-
- @controller.protected()
- def get_objects(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_objects_dict(user_id, intra_extension_id)
-
- @controller.protected()
- def add_object(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_dict = dict()
- object_dict['name'] = kw.get('object_name', None)
- object_dict['description'] = kw.get('object_description', None)
- return self.admin_api.add_object_dict(
- user_id, intra_extension_id, object_dict)
-
- @controller.protected()
- def get_object(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_id = kw.get('object_id', None)
- return self.admin_api.get_object_dict(
- user_id, intra_extension_id, object_id)
-
- @controller.protected()
- def del_object(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_id = kw.get('object_id', None)
- self.admin_api.del_object(user_id, intra_extension_id, object_id)
-
- @controller.protected()
- def set_object(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_id = kw.get('object_id', None)
- object_dict = dict()
- object_dict['name'] = kw.get('object_name', None)
- object_dict['description'] = kw.get('object_description', None)
- return self.admin_api.set_object_dict(
- user_id, intra_extension_id, object_id, object_dict)
-
- @controller.protected()
- def get_actions(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_actions_dict(user_id, intra_extension_id)
-
- @controller.protected()
- def add_action(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_dict = dict()
- action_dict['name'] = kw.get('action_name', None)
- action_dict['description'] = kw.get('action_description', None)
- return self.admin_api.add_action_dict(
- user_id, intra_extension_id, action_dict)
-
- @controller.protected()
- def get_action(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_id = kw.get('action_id', None)
- return self.admin_api.get_action_dict(
- user_id, intra_extension_id, action_id)
-
- @controller.protected()
- def del_action(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_id = kw.get('action_id', None)
- self.admin_api.del_action(user_id, intra_extension_id, action_id)
-
- @controller.protected()
- def set_action(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_id = kw.get('action_id', None)
- action_dict = dict()
- action_dict['name'] = kw.get('action_name', None)
- action_dict['description'] = kw.get('action_description', None)
- return self.admin_api.set_action_dict(
- user_id, intra_extension_id, action_id, action_dict)
-
- # Scope functions
- @controller.protected()
- def get_subject_scopes(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- return self.admin_api.get_subject_scopes_dict(
- user_id, intra_extension_id, subject_category_id)
-
- @controller.protected()
- def add_subject_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- subject_scope_dict = dict()
- subject_scope_dict['name'] = kw.get('subject_scope_name', None)
- subject_scope_dict['description'] = kw.get(
- 'subject_scope_description', None)
- return self.admin_api.add_subject_scope_dict(
- user_id, intra_extension_id, subject_category_id, subject_scope_dict) # noqa
-
- @controller.protected()
- def get_subject_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- subject_scope_id = kw.get('subject_scope_id', None)
- return self.admin_api.get_subject_scope_dict(
- user_id, intra_extension_id, subject_category_id, subject_scope_id)
-
- @controller.protected()
- def del_subject_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- subject_scope_id = kw.get('subject_scope_id', None)
- self.admin_api.del_subject_scope(
- user_id,
- intra_extension_id,
- subject_category_id,
- subject_scope_id)
-
- @controller.protected()
- def set_subject_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- subject_scope_id = kw.get('subject_scope_id', None)
- subject_scope_dict = dict()
- subject_scope_dict['name'] = kw.get('subject_scope_name', None)
- subject_scope_dict['description'] = kw.get(
- 'subject_scope_description', None)
- return self.admin_api.set_subject_scope_dict(
- user_id, intra_extension_id, subject_category_id, subject_scope_id, subject_scope_dict) # noqa
-
- @controller.protected()
- def get_object_scopes(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- return self.admin_api.get_object_scopes_dict(
- user_id, intra_extension_id, object_category_id)
-
- @controller.protected()
- def add_object_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- object_scope_dict = dict()
- object_scope_dict['name'] = kw.get('object_scope_name', None)
- object_scope_dict['description'] = kw.get(
- 'object_scope_description', None)
- return self.admin_api.add_object_scope_dict(
- user_id, intra_extension_id, object_category_id, object_scope_dict)
-
- @controller.protected()
- def get_object_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- object_scope_id = kw.get('object_scope_id', None)
- return self.admin_api.get_object_scope_dict(
- user_id, intra_extension_id, object_category_id, object_scope_id)
-
- @controller.protected()
- def del_object_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- object_scope_id = kw.get('object_scope_id', None)
- self.admin_api.del_object_scope(
- user_id,
- intra_extension_id,
- object_category_id,
- object_scope_id)
-
- @controller.protected()
- def set_object_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- object_scope_id = kw.get('object_scope_id', None)
- object_scope_dict = dict()
- object_scope_dict['name'] = kw.get('object_scope_name', None)
- object_scope_dict['description'] = kw.get(
- 'object_scope_description', None)
- return self.admin_api.set_object_scope_dict(
- user_id, intra_extension_id, object_category_id, object_scope_id, object_scope_dict) # noqa
-
- @controller.protected()
- def get_action_scopes(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- return self.admin_api.get_action_scopes_dict(
- user_id, intra_extension_id, action_category_id)
-
- @controller.protected()
- def add_action_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- action_scope_dict = dict()
- action_scope_dict['name'] = kw.get('action_scope_name', None)
- action_scope_dict['description'] = kw.get(
- 'action_scope_description', None)
- return self.admin_api.add_action_scope_dict(
- user_id, intra_extension_id, action_category_id, action_scope_dict)
-
- @controller.protected()
- def get_action_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- action_scope_id = kw.get('action_scope_id', None)
- return self.admin_api.get_action_scope_dict(
- user_id, intra_extension_id, action_category_id, action_scope_id)
-
- @controller.protected()
- def del_action_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- action_scope_id = kw.get('action_scope_id', None)
- self.admin_api.del_action_scope(
- user_id,
- intra_extension_id,
- action_category_id,
- action_scope_id)
-
- @controller.protected()
- def set_action_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- action_scope_id = kw.get('action_scope_id', None)
- action_scope_dict = dict()
- action_scope_dict['name'] = kw.get('action_scope_name', None)
- action_scope_dict['description'] = kw.get(
- 'action_scope_description', None)
- return self.admin_api.set_action_scope_dict(
- user_id, intra_extension_id, action_category_id, action_scope_id, action_scope_dict) # noqa
-
- # Assignment functions
-
- @controller.protected()
- def add_subject_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_id = kw.get('subject_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- subject_scope_id = kw.get('subject_scope_id', None)
- return self.admin_api.add_subject_assignment_list(
- user_id, intra_extension_id, subject_id, subject_category_id, subject_scope_id) # noqa
-
- @controller.protected()
- def get_subject_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_id = kw.get('subject_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- return self.admin_api.get_subject_assignment_list(
- user_id, intra_extension_id, subject_id, subject_category_id)
-
- @controller.protected()
- def del_subject_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_id = kw.get('subject_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- subject_scope_id = kw.get('subject_scope_id', None)
- self.admin_api.del_subject_assignment(
- user_id,
- intra_extension_id,
- subject_id,
- subject_category_id,
- subject_scope_id)
-
- @controller.protected()
- def add_object_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_id = kw.get('object_id', None)
- object_category_id = kw.get('object_category_id', None)
- object_scope_id = kw.get('object_scope_id', None)
- return self.admin_api.add_object_assignment_list(
- user_id, intra_extension_id, object_id, object_category_id, object_scope_id) # noqa
-
- @controller.protected()
- def get_object_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_id = kw.get('object_id', None)
- object_category_id = kw.get('object_category_id', None)
- return self.admin_api.get_object_assignment_list(
- user_id, intra_extension_id, object_id, object_category_id)
-
- @controller.protected()
- def del_object_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_id = kw.get('object_id', None)
- object_category_id = kw.get('object_category_id', None)
- object_scope_id = kw.get('object_scope_id', None)
- self.admin_api.del_object_assignment(
- user_id,
- intra_extension_id,
- object_id,
- object_category_id,
- object_scope_id)
-
- @controller.protected()
- def add_action_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_id = kw.get('action_id', None)
- action_category_id = kw.get('action_category_id', None)
- action_scope_id = kw.get('action_scope_id', None)
- return self.admin_api.add_action_assignment_list(
- user_id, intra_extension_id, action_id, action_category_id, action_scope_id) # noqa
-
- @controller.protected()
- def get_action_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_id = kw.get('action_id', None)
- action_category_id = kw.get('action_category_id', None)
- return self.admin_api.get_action_assignment_list(
- user_id, intra_extension_id, action_id, action_category_id)
-
- @controller.protected()
- def del_action_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_id = kw.get('action_id', None)
- action_category_id = kw.get('action_category_id', None)
- action_scope_id = kw.get('action_scope_id', None)
- self.admin_api.del_action_assignment(
- user_id,
- intra_extension_id,
- action_id,
- action_category_id,
- action_scope_id)
-
- # Metarule functions
-
- @controller.protected()
- def get_aggregation_algorithm(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_aggregation_algorithm_id(
- user_id, intra_extension_id)
-
- @controller.protected()
- def set_aggregation_algorithm(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- aggregation_algorithm_id = kw.get('aggregation_algorithm_id', None)
- return self.admin_api.set_aggregation_algorithm_id(
- user_id, intra_extension_id, aggregation_algorithm_id)
-
- @controller.protected()
- def get_sub_meta_rules(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_sub_meta_rules_dict(
- user_id, intra_extension_id)
-
- @controller.protected()
- def add_sub_meta_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_dict = dict()
- sub_meta_rule_dict['name'] = kw.get('sub_meta_rule_name', None)
- sub_meta_rule_dict['algorithm'] = kw.get(
- 'sub_meta_rule_algorithm', None)
- sub_meta_rule_dict['subject_categories'] = kw.get(
- 'sub_meta_rule_subject_categories', None)
- sub_meta_rule_dict['object_categories'] = kw.get(
- 'sub_meta_rule_object_categories', None)
- sub_meta_rule_dict['action_categories'] = kw.get(
- 'sub_meta_rule_action_categories', None)
- return self.admin_api.add_sub_meta_rule_dict(
- user_id, intra_extension_id, sub_meta_rule_dict)
-
- @controller.protected()
- def get_sub_meta_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- return self.admin_api.get_sub_meta_rule_dict(
- user_id, intra_extension_id, sub_meta_rule_id)
-
- @controller.protected()
- def del_sub_meta_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- self.admin_api.del_sub_meta_rule(
- user_id, intra_extension_id, sub_meta_rule_id)
-
- @controller.protected()
- def set_sub_meta_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- sub_meta_rule_dict = dict()
- sub_meta_rule_dict['name'] = kw.get('sub_meta_rule_name', None)
- sub_meta_rule_dict['algorithm'] = kw.get(
- 'sub_meta_rule_algorithm', None)
- sub_meta_rule_dict['subject_categories'] = kw.get(
- 'sub_meta_rule_subject_categories', None)
- sub_meta_rule_dict['object_categories'] = kw.get(
- 'sub_meta_rule_object_categories', None)
- sub_meta_rule_dict['action_categories'] = kw.get(
- 'sub_meta_rule_action_categories', None)
- return self.admin_api.set_sub_meta_rule_dict(
- user_id, intra_extension_id, sub_meta_rule_id, sub_meta_rule_dict)
-
- # Rules functions
- @controller.protected()
- def get_rules(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- return self.admin_api.get_rules_dict(
- user_id, intra_extension_id, sub_meta_rule_id)
-
- @controller.protected()
- def add_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- subject_category_list = kw.get('subject_categories', [])
- object_category_list = kw.get('object_categories', [])
- action_category_list = kw.get('action_categories', [])
- enabled_bool = kw.get('enabled', True)
- rule_list = subject_category_list + action_category_list + \
- object_category_list + [enabled_bool, ]
- return self.admin_api.add_rule_dict(
- user_id, intra_extension_id, sub_meta_rule_id, rule_list)
-
- @controller.protected()
- def get_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- rule_id = kw.get('rule_id', None)
- return self.admin_api.get_rule_dict(
- user_id, intra_extension_id, sub_meta_rule_id, rule_id)
-
- @controller.protected()
- def del_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- rule_id = kw.get('rule_id', None)
- self.admin_api.del_rule(
- user_id,
- intra_extension_id,
- sub_meta_rule_id,
- rule_id)
-
- @controller.protected()
- def set_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- rule_id = kw.get('rule_id', None)
- rule_list = list()
- subject_category_list = kw.get('subject_categories', [])
- object_category_list = kw.get('object_categories', [])
- action_category_list = kw.get('action_categories', [])
- rule_list = subject_category_list + action_category_list + object_category_list # noqa
- return self.admin_api.set_rule_dict(
- user_id, intra_extension_id, sub_meta_rule_id, rule_id, rule_list)
-
-
-@dependency.requires('authz_api') # noqa: F405
-class InterExtensions(controller.V3Controller):
-
- def __init__(self):
- super(InterExtensions, self).__init__()
-
- def _get_user_from_token(self, token_id):
- response = self.token_provider_api.validate_token(token_id)
- token_ref = token_model.KeystoneToken(
- token_id=token_id, token_data=response)
- return token_ref['user']
-
- # @controller.protected()
- # def get_inter_extensions(self, context, **kw):
- # user = self._get_user_from_token(context.get('token_id'))
- # return {
- # 'inter_extensions':
- # self.interextension_api.get_inter_extensions()
- # }
-
- # @controller.protected()
- # def get_inter_extension(self, context, **kw):
- # user = self._get_user_from_token(context.get('token_id'))
- # return {
- # 'inter_extensions':
- # self.interextension_api.get_inter_extension(uuid=kw['inter_extension_id'])
- # }
-
- # @controller.protected()
- # def create_inter_extension(self, context, **kw):
- # user = self._get_user_from_token(context.get('token_id'))
- # return self.interextension_api.create_inter_extension(kw)
-
- # @controller.protected()
- # def delete_inter_extension(self, context, **kw):
- # user = self._get_user_from_token(context.get('token_id'))
- # if 'inter_extension_id' not in kw:
- # raise exception.Error
- # return
- # self.interextension_api.delete_inter_extension(kw['inter_extension_id'])
-
-
-@dependency.requires('moonlog_api', 'authz_api') # noqa: F405
-class Logs(controller.V3Controller):
-
- def __init__(self):
- super(Logs, self).__init__()
-
- def _get_user_id_from_token(self, token_id):
- response = self.token_provider_api.validate_token(token_id)
- token_ref = token_model.KeystoneToken(
- token_id=token_id, token_data=response)
- return token_ref['user']
-
- @controller.protected()
- def get_logs(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- options = kw.get('options', '')
- return self.moonlog_api.get_logs(user_id, options)
-
-
-@dependency.requires('identity_api', "token_provider_api", "resource_api") # noqa: F405
-class MoonAuth(controller.V3Controller):
-
- def __init__(self):
- super(MoonAuth, self).__init__()
-
- def _get_project(self, uuid="", name=""):
- projects = self.resource_api.list_projects()
- for project in projects:
- if uuid and uuid == project['id']:
- return project
- elif name and name == project['name']:
- return project
-
- def get_token(self, context, **kw):
- data_auth = {
- "auth": {
- "identity": {
- "methods": [
- "password"
- ],
- "password": {
- "user": {
- "domain": {
- "id": "Default"
- },
- "name": kw['username'],
- "password": kw['password']
- }
- }
- }
- }
- }
-
- message = {}
- if "project" in kw:
- project = self._get_project(name=kw['project'])
- if project:
- data_auth["auth"]["scope"] = dict()
- data_auth["auth"]["scope"]['project'] = dict()
- data_auth["auth"]["scope"]['project']['id'] = project['id']
- else:
- message = {
- "error": {
- "message": "Unable to find project {}".format(kw['project']), # noqa
- "code": 200,
- "title": "UnScopedToken"
- }}
-
-# req = requests.post("http://localhost:5000/v3/auth/tokens",
-# json=data_auth,
-# headers={"Content-Type": "application/json"}
-# )
- req = requests.post("http://172.16.1.222:5000/v3/auth/tokens",
- json=data_auth,
- headers={"Content-Type": "application/json"}
- )
- if req.status_code not in (200, 201):
- LOG.error(req.text)
- else:
- _token = req.headers['X-Subject-Token']
- _data = req.json()
- _result = {
- "token": _token,
- 'message': message
- }
- try:
- _result["roles"] = map(
- lambda x: x['name'], _data["token"]["roles"])
- except KeyError:
- pass
- return _result
- return {"token": None, 'message': req.json()}
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/files/deb.conf.bak b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/files/deb.conf.bak
deleted file mode 100644
index 6e1159a1..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/files/deb.conf.bak
+++ /dev/null
@@ -1,11 +0,0 @@
-keystone/admin-password: password
-keystone/auth-token: password
-keystone/admin-password-confirm: password
-keystone/admin-email: root@localhost
-keystone/admin-role-name: admin
-keystone/admin-user: admin
-keystone/create-admin-tenant: false
-keystone/region-name: Orange
-keystone/admin-tenant-name: admin
-keystone/register-endpoint: false
-keystone/configure_db: false
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/handlers/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/handlers/main.yml
deleted file mode 100644
index ca4e8088..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/handlers/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart neutron compute service
- service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
-
-- name: restart nova-compute services
- service: name=nova-compute state=restarted enabled=yes
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/tasks/main.yml
deleted file mode 100644
index fd3e51d3..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/tasks/main.yml
+++ /dev/null
@@ -1,75 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: activate ipv4 forwarding
- sysctl: name=net.ipv4.ip_forward value=1
- state=present reload=yes
-
-- name: deactivate ipv4 rp filter
- sysctl: name=net.ipv4.conf.all.rp_filter value=0
- state=present reload=yes
-
-- name: deactivate ipv4 default rp filter
- sysctl: name=net.ipv4.conf.default.rp_filter
- value=0 state=present reload=yes
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install compute-related neutron packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: fix openstack neutron plugin config file
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
- systemctl daemon-reload
- when: ansible_os_family == 'RedHat'
-
-- name: fix openstack neutron plugin config file ubuntu
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
- when: ansible_os_family == "Debian"
-
-- name: generate neutron compute service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: config ml2 plugin
- template: src=templates/ml2_conf.ini
- dest=/etc/neutron/plugins/ml2/ml2_conf.ini
- backup=yes
-
-- name: ln plugin.ini
- file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
-
-- name: config neutron
- template: src=templates/neutron.conf
- dest=/etc/neutron/neutron.conf backup=yes
- notify:
- - restart neutron compute service
- - restart nova-compute services
-
-- meta: flush_handlers
-
-- include: ../../neutron-network/tasks/odl.yml
- when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/vars/Debian.yml
deleted file mode 100644
index 83d7f323..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/vars/Debian.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-packages:
- - neutron-common
- - neutron-plugin-ml2
- - openvswitch-switch-dpdk
- - openvswitch-switch
- - neutron-plugin-openvswitch-agent
-
-services:
- - neutron-openvswitch-agent
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/tasks/main.yml
deleted file mode 100644
index 31f7f17c..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/tasks/main.yml
+++ /dev/null
@@ -1,117 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: activate ipv4 forwarding
- sysctl: name=net.ipv4.ip_forward value=1
- state=present reload=yes
-
-- name: deactivate ipv4 rp filter
- sysctl: name=net.ipv4.conf.all.rp_filter value=0
- state=present reload=yes
-
-- name: deactivate ipv4 default rp filter
- sysctl: name=net.ipv4.conf.default.rp_filter
- value=0 state=present reload=yes
-
-- name: assert kernel support for vxlan
- command: modinfo -F version vxlan
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-
-- name: assert iproute2 suppport for vxlan
- command: ip link add type vxlan help
- register: iproute_out
- failed_when: iproute_out.rc == 255
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install neutron network related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: generate neutron network service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: fix openstack neutron plugin config file
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
- systemctl daemon-reload
- when: ansible_os_family == 'RedHat'
-
-- name: fix openstack neutron plugin config file ubuntu
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
- when: ansible_os_family == "Debian"
-
-- name: config l3 agent
- template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini
- backup=yes
-
-- name: config dhcp agent
- template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini
- backup=yes
-
-- name: update dnsmasq-neutron.conf
- template: src=templates/dnsmasq-neutron.conf
- dest=/etc/neutron/dnsmasq-neutron.conf
-
-- name: config metadata agent
- template: src=metadata_agent.ini
- dest=/etc/neutron/metadata_agent.ini backup=yes
-
-- name: config ml2 plugin
- template: src=templates/ml2_conf.ini
- dest=/etc/neutron/plugins/ml2/ml2_conf.ini
- backup=yes
-
-- name: ln plugin.ini
- file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
-
-- name: config neutron
- template: src=templates/neutron.conf
- dest=/etc/neutron/neutron.conf backup=yes
-
-- name: force mtu to 1450 for vxlan
- lineinfile:
- dest: /etc/neutron/dnsmasq-neutron.conf
- regexp: '^dhcp-option-force'
- line: 'dhcp-option-force=26,1450'
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-
-- include: firewall.yml
- when: enable_fwaas == True
-
-- include: vpn.yml
- when: enable_vpnaas == True
-
-- include: odl.yml
- when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: restart neutron network relation service
- service: name={{ item }} state=restarted enabled=yes
- with_flattened:
- - services_noarch
- - services
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/vars/Debian.yml
deleted file mode 100644
index 1a78ca8c..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/vars/Debian.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - neutron-plugin-ml2
- - openvswitch-switch-dpdk
- - openvswitch-switch
- - neutron-l3-agent
- - neutron-dhcp-agent
- - neutron-plugin-openvswitch-agent
-
-services:
- - openvswitch-switch
- - neutron-openvswitch-agent
-
-openvswitch_agent: neutron-plugin-openvswitch-agent
-
-xorp_packages:
- - xorp
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-compute/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-compute/tasks/main.yml
deleted file mode 100644
index 7bb4f347..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-compute/tasks/main.yml
+++ /dev/null
@@ -1,63 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install nova-compute related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: restart virtlogd
- service: name=virtlogd state=started enabled=yes
- when: ansible_os_family == "Debian"
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: update nova-compute conf
- template: src=templates/{{ item }} dest=/etc/nova/{{ item }}
- with_items:
- - nova.conf
- notify:
- - restart nova-compute services
-
-- name: get number of cpu support virtualization
- shell: egrep -c '(vmx|svm)' /proc/cpuinfo
- register: kvm_cpu_num
-
-- name: update nova-compute conf
- template: src={{ item }} dest=/etc/nova/{{ item }}
- with_items:
- - nova-compute.conf
- notify:
- - restart nova-compute services
-
-- name: generate neutron control service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-#'
-- name: remove nova sqlite db
- shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed
-
-- meta: flush_handlers
-
-- name: restart nova-compute and libvirt-bin
- shell: >
- service nova-compute restart;
- service libvirt-bin restart;
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-compute/templates/nova-compute.conf b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-compute/templates/nova-compute.conf
deleted file mode 100644
index 305d408b..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-compute/templates/nova-compute.conf
+++ /dev/null
@@ -1,11 +0,0 @@
-[DEFAULT]
-compute_driver=libvirt.LibvirtDriver
-force_raw_images = true
-[libvirt]
-{% if kvm_cpu_num.stdout_lines[0]|int == 0 %}
-virt_type=qemu
-{% else %}
-virt_type=kvm
-{% endif %}
-images_type = raw
-mem_stats_period_seconds=0
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-controller/tasks/nova_config.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-controller/tasks/nova_config.yml
deleted file mode 100644
index f332c97a..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-controller/tasks/nova_config.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: nova api db sync
- shell: su -s /bin/sh -c "nova-manage api_db sync" nova
- ignore_errors: True
- notify:
- - restart nova service
-
-- name: nova db sync
- nova_manage: action=dbsync
- notify:
- - restart nova service
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_00_download_packages.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_00_download_packages.yml
deleted file mode 100644
index efd359db..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_00_download_packages.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: download oracle-jdk8 package file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }}
-
-#"
-
-- name: download oracle-jdk8 script file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/
-
-#"
-
-- name: download odl package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/odl/{{ odl_pkg_url }}" dest=/opt/{{ odl_pkg_name }}
-
-# "
-
-- name: download odl pip package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/pip/{{ networking_odl_pkg_name }}" dest=/opt/{{ networking_odl_pkg_name }}
-
-#"
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml
deleted file mode 100644
index 8d71606f..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml
+++ /dev/null
@@ -1,53 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: opendaylight system file
- copy:
- src: "{{ service_file.src }}"
- dest: "{{ service_file.dst }}"
- mode: 0755
-
-- name: set l3 fwd enable in custom.properties
- template:
- src: custom.properties
- dest: "{{ odl_home }}/etc/custom.properties"
- owner: odl
- group: odl
- mode: 0775
- when: odl_l3_agent == "Enable"
-
-- name: create karaf config
- template:
- src: org.apache.karaf.features.cfg.Debian
- dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg"
- owner: odl
- group: odl
- mode: 0775
- when: ansible_os_family == "Debian"
-
-- name: create karaf config
- template:
- src: org.apache.karaf.features.cfg.Redhat
- dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg"
- owner: odl
- group: odl
- mode: 0775
- when: ansible_os_family == "RedHat"
-
-- name: create tomcat config
- template:
- src: tomcat-server.xml
- dest: "{{ odl_home }}/configuration/tomcat-server.xml"
-
-- name: create tomcat config
- template:
- src: jetty.xml
- dest: "{{ odl_home }}/etc/jetty.xml"
-
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_04_install_pip_packages.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_04_install_pip_packages.yml
deleted file mode 100644
index 869d264a..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_04_install_pip_packages.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: patch odl pip package
- shell: |
- cd /opt
- tar xf /opt/{{ networking_odl_pkg_name }}
- rm -rf /opt/{{ networking_odl_pkg_name }}
- sed -i 's/^neutron-lib.*/neutron-lib/' networking-odl-2.0.0/requirements.txt
- tar zcf /opt/{{ networking_odl_pkg_name }} networking-odl-2.0.0
- rm -rf networking-odl-2.0.0
- cd -
-
-- name: odl pip package install
- shell: |
- cd /opt
- pip install {{ networking_odl_pkg_name }}
- rm -rf {{ networking_odl_pkg_name }}
- cd -
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml
deleted file mode 100644
index f44b373b..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: turn off neutron-server neutron-plugins-openvswitch-agent Daemon on control node
- shell: >
- sed -i '/{{ service_ovs_agent_name }}/d' /opt/service ;
- sed -i '/neutron-server/d' /opt/service;
- sed -i '/keepalived/d' /opt/service;
-
-- name: turn off neutron-server on control node
- service: name=neutron-server state=stopped
-
-- name: turn off keepalived on control node
- service: name=keepalived state=stopped
- when: ansible_os_family == "Debian"
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_odl_controller.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_odl_controller.yml
deleted file mode 100644
index d78a76e0..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/01_odl_controller.yml
+++ /dev/null
@@ -1,47 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: download packages
- include: 01_00_download_packages.yml
-
-- name: create odl user and group
- include: 01_01_create_odl_user_and_group.yml
-
-- name: unarchive odl and jdk
- include: 01_02_unarchive_odl_and_jdk.yml
-
-- name: copy odl configuration files
- include: 01_03_copy_odl_configuration_files.yml
-
-- name: install pip packages
- include: 01_04_install_pip_packages.yml
-
-- name: clean up karaf data
- include: 01_05_clean_up_karaf_data.yml
-
-- name: stop openstack services
- include: 01_06_stop_openstack_services.yml
-
-- name: set opendaylight cluster
- include: 05_set_opendaylight_cluster.yml
- when: groups['odl']|length > 1
-
-- name: install moon
- include: moon-odl.yml
- when: moon == "Enable"
-
-- name: start and check odl
- include: 01_07_start_check_odl.yml
-
-- name: run openswitch
- include: 03_openvswitch.yml
-
-- name: configure neutron
- include: 01_08_configure_neutron.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
deleted file mode 100644
index 2c5332f9..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: set opendaylight as the manager
- command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ internal_vip.ip }}:6640;"
-
-- name: restart keepalived to recover external IP before check br-int
- shell: service keepalived restart
- when: inventory_hostname in groups['odl']
- ignore_errors: True
-
-- name: check br-int
- shell: ovs-vsctl list-br | grep br-int; while [ $? -ne 0 ]; do sleep 10; ovs-vsctl list-br | grep br-int; done
-
-- name: set local ip in openvswitch
- shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config={'local_ip'=' {{ internal_ip }} '};
-
-#'
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/main.yml
deleted file mode 100644
index 32952c51..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/main.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
- tags:
- - test_odl
-
-- name: Provision Common on all nodes
- include: 00_odl_common.yml
- when: groups['odl']|length !=0
-
-- name: Provision ODL on Controller nodes
- include: 01_odl_controller.yml
- when: inventory_hostname in groups['odl']
-
-- name: Provision ODL on Compute nodes
- include: 02_odl_compute.yml
- when: groups['odl']|length !=0 and inventory_hostname not in groups['odl']
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/jetty.xml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/jetty.xml
deleted file mode 100755
index 50ac7c35..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/jetty.xml
+++ /dev/null
@@ -1,88 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied. See the License for the
- specific language governing permissions and limitations
- under the License.
--->
-<!DOCTYPE Configure PUBLIC "-//Mort Bay Consulting//
-DTD Configure//EN" "http://jetty.mortbay.org/configure.dtd">
-
-<Configure class="org.eclipse.jetty.server.Server">
-
- <!-- =========================================================== -->
- <!-- Set connectors -->
- <!-- =========================================================== -->
- <!-- One of each type! -->
- <!-- =========================================================== -->
-
- <!-- Use this connector for many frequently idle connections and for
- threadless continuations. -->
- <Call name="addConnector">
- <Arg>
- <New class="org.eclipse.jetty.server.nio.SelectChannelConnector">
- <Set name="host">
- <Property name="jetty.host"/>
- </Set>
- <Set name="port">
- <Property name="jetty.port" default="8181" />
- </Set>
- <Set name="maxIdleTime">300000</Set>
- <Set name="Acceptors">2</Set>
- <Set name="statsOn">false</Set>
- <Set name="confidentialPort">8543</Set>
- <Set name="lowResourcesConnections">20000</Set>
- <Set name="lowResourcesMaxIdleTime">5000</Set>
- </New>
- </Arg>
- </Call>
-
- <!-- =========================================================== -->
- <!-- Configure Authentication Realms -->
- <!-- Realms may be configured for the entire server here, or -->
- <!-- they can be configured for a specific web app in a context -->
- <!-- configuration (see $(jetty.home)/contexts/test.xml for an -->
- <!-- example). -->
- <!-- =========================================================== -->
- <Call name="addBean">
- <Arg>
- <New class="org.eclipse.jetty.plus.jaas.JAASLoginService">
- <Set name="name">karaf</Set>
- <Set name="loginModuleName">karaf</Set>
- <Set name="roleClassNames">
- <Array type="java.lang.String">
- <Item>org.apache.karaf.jaas.boot.principal.RolePrincipal
- </Item>
- </Array>
- </Set>
- </New>
- </Arg>
- </Call>
- <Call name="addBean">
- <Arg>
- <New class="org.eclipse.jetty.plus.jaas.JAASLoginService">
- <Set name="name">default</Set>
- <Set name="loginModuleName">karaf</Set>
- <Set name="roleClassNames">
- <Array type="java.lang.String">
- <Item>org.apache.karaf.jaas.boot.principal.RolePrincipal
- </Item>
- </Array>
- </Set>
- </New>
- </Arg>
- </Call>
-
-</Configure>
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/ml2_conf.sh b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/ml2_conf.sh
deleted file mode 100755
index 5e3627bf..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/ml2_conf.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-cat <<EOT>> /etc/neutron/plugins/ml2/ml2_conf.ini
-[ml2_odl]
-password = admin
-username = admin
-url = http://{{ internal_vip.ip }}:8181/controller/nb/v2/neutron
-EOT
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/tomcat-server.xml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/tomcat-server.xml
deleted file mode 100755
index bc7ab13d..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/tomcat-server.xml
+++ /dev/null
@@ -1,61 +0,0 @@
-<?xml version='1.0' encoding='utf-8'?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<Server>
- <!--APR library loader. Documentation at /docs/apr.html -->
- <Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
- <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html -->
- <Listener className="org.apache.catalina.core.JasperListener" />
- <!-- Prevent memory leaks due to use of particular java/javax APIs-->
- <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
- <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
- <Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />
-
- <Service name="Catalina">
- <Connector port="{{ odl_api_port }}" protocol="HTTP/1.1"
- connectionTimeout="20000"
- redirectPort="8443" />
-
-<!--
- Please remove the comments around the following Connector tag to enable HTTPS Authentication support.
- Remember to add a valid keystore in the configuration folder.
- More info : http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
--->
-
- <!--
- <Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true"
- maxThreads="150" scheme="https" secure="true"
- clientAuth="false" sslProtocol="TLS"
- keystoreFile="configuration/keystore"
- keystorePass="changeit"/>
- -->
-
- <Engine name="Catalina" defaultHost="localhost">
- <Host name="localhost" appBase=""
- unpackWARs="false" autoDeploy="false"
- deployOnStartup="false" createDirs="false">
- <Realm className="org.opendaylight.controller.karafsecurity.ControllerCustomRealm" />
- <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
- <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
- prefix="web_access_log_" suffix=".txt" resolveHosts="false"
- rotatable="true" fileDateFormat="yyyy-MM"
- pattern="%{yyyy-MM-dd HH:mm:ss.SSS z}t - [%a] - %r"/>
- </Host>
- </Engine>
- </Service>
-</Server>
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/Debian.yml
deleted file mode 100755
index 640a264a..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/Debian.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-common_packages:
- - crudini
-
-service_ovs_name: openvswitch-switch
-service_ovs_agent_name: neutron-openvswitch-agent
-
-service_file:
- src: opendaylight.service
- dst: /lib/systemd/system/opendaylight.service
-
-networking_odl_pkg_name: networking-odl-2.0.0.tar.gz
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/main.yml
deleted file mode 100755
index e5f52b42..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/main.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-odl_username: admin
-odl_password: admin
-odl_api_port: 8181
-
-#odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.3.0-Lithium/distribution-karaf-0.3.0-Lithium.tar.gz
-odl_pkg_url: karaf.tar.gz
-odl_pkg_name: karaf.tar.gz
-odl_home: "/opt/opendaylight-0.3.0/"
-odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'odl-restconf','odl-l2switch-switch','odl-openflowplugin-all','odl-mdsal-apidocs','odl-dlux-all','odl-adsal-northbound','odl-nsf-all','odl-ovsdb-openstack','odl-ovsdb-northbound','odl-dlux-core']
-odl_extra_features: ['odl-restconf-all','odl-mdsal-clustering','odl-openflowplugin-flow-services','http','jolokia-osgi']
-odl_features: "{{ odl_base_features + odl_extra_features }}"
-
-odl_aaa_moon: odl-aaa-moon.tar.gz
-
-jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz
-jdk8_script_name: install_jdk8.tar
-
-common_packages_noarch: []
-
-odl_pip:
- - networking_odl
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/handlers/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/handlers/main.yml
deleted file mode 100755
index e099fcf4..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/handlers/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart onos service
- service: name=onos state=restarted enabled=yes
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/main.yml
deleted file mode 100755
index c8ce1155..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/main.yml
+++ /dev/null
@@ -1,51 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: remove neutron-plugin-openvswitch-agent auto start
- shell: >
- update-rc.d neutron-plugin-openvswitch-agent remove;
- sed -i /neutron-plugin-openvswitch-agent/d /opt/service
- when: groups['onos']|length !=0
- ignore_errors: True
-
-- name: shut down and disable Neutron's agent services
- service: name=neutron-plugin-openvswitch-agent state=stopped
- when: groups['onos']|length !=0
- ignore_errors: True
-
-- name: remove neutron-l3-agent auto start
- shell: >
- update-rc.d neutron-l3-agent remove;
- sed -i /neutron-l3-agent/d /opt/service
- when: inventory_hostname in groups['onos']
- ignore_errors: True
-
-- name: shut down and disable Neutron's l3 agent services
- service: name=neutron-l3-agent state=stopped
- when: inventory_hostname in groups['onos']
- ignore_errors: True
-
-- name: Stop the Open vSwitch service and clear existing OVSDB
- shell: >
- ovs-vsctl del-br br-int ;
- ovs-vsctl del-br br-tun ;
- ovs-vsctl del-manager ;
- ip link delete onos_port1 type veth peer name onos_port2;
- when: groups['onos']|length !=0
- ignore_errors: True
-
-- name: Install ONOS Cluster on Controller
- include: onos_controller.yml
- when: inventory_hostname in groups['onos']
-
-- name: Config ONOS Cluster
- include: openvswitch.yml
- when: groups['onos']|length !=0
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/onos_controller.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/onos_controller.yml
deleted file mode 100755
index d51151a9..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/onos_controller.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: download onos driver packages
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_driver }}" dest=/opt/
-
-- name: upload onos sfc driver package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_sfc_driver }}" dest=/opt/
-
-- name: unarchive onos driver package
- command: su -s /bin/sh -c "tar xvf /opt/networking-onos.tar -C /opt/"
-
-- name: upload onos sfc driver package
- command: su -s /bin/sh -c "tar xvf /opt/networking-sfc.tar -C /opt/"
-
-- name: install onos driver
- command: su -s /bin/sh -c "/opt/networking-onos/install_driver.sh"
-
-- name: install onos sfc driver
- command: su -s /bin/sh -c "/opt/networking-sfc/install_driver.sh"
-
-- name: install onos required packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages
-
-- name: download oracle-jdk8 package file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }}
-
-- name: download oracle-jdk8 script file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/
-
-- name: unarchive onos driver package
- command: su -s /bin/sh -c "tar xvf /opt/install_jdk8.tar -C /opt/"
-
-- name: install install_jdk8 package
- command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh"
-
-- name: create JAVA_HOME environment variable
- shell: >
- export J2SDKDIR=/usr/lib/jvm/java-8-oracle;
- export J2REDIR=/usr/lib/jvm/java-8-oracle/jre;
- export PATH=$PATH:/usr/lib/jvm/java-8-oracle/bin:/usr/lib/jvm/java-8-oracle/db/bin:/usr/lib/jvm/java-8-oracle/jre/bin;
- export JAVA_HOME=/usr/lib/jvm/java-8-oracle;
- export DERBY_HOME=/usr/lib/jvm/java-8-oracle/db;
-
-- name: create onos group
- group: name=onos system=yes state=present
-
-- name: create onos user
- user:
- name: onos
- group: onos
- home: "{{ onos_home }}"
- createhome: "yes"
- system: "yes"
- shell: "/bin/false"
-
-- name: download onos package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_pkg_name }}" dest=/opt/{{ onos_pkg_name }}
-
-- name: create new jar repository
- command: su -s /bin/sh -c "mkdir ~/.m2"
- ignore_errors: True
-
-- name: download jar repository
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ repository }}" dest=~/.m2/
-
-- name: extract jar repository
- command: su -s /bin/sh -c "tar xvf ~/.m2/repository.tar -C ~/.m2/"
-
-- name: extract onos package
- command: su -s /bin/sh -c "tar xzf /opt/{{ onos_pkg_name }} -C {{ onos_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" onos
-
-- name: configure onos service
- shell: >
- echo 'export ONOS_OPTS=debug' > {{ onos_home }}/options;
- echo 'export ONOS_USER=root' >> {{ onos_home }}/options;
- mkdir {{ onos_home }}/var;
- mkdir {{ onos_home }}/config;
- sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' {{ onos_home }}/init/onos.conf;
- cp -rf {{ onos_home }}/init/onos.conf /etc/init/;
- cp -rf {{ onos_home }}/init/onos.conf /etc/init.d/;
-
-- name: configure onos boot feature
- shell: >
- sed -i '/^featuresBoot=/c\featuresBoot={{ onos_boot_features }}' {{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg;
-
-- name: wait for config time
- shell: "sleep 10"
-
-- name: start onos service
- service: name=onos state=started enabled=yes
-
-- name: wait for onos start time
- shell: "sleep 200"
-
-- name: add onos auto start
- shell: >
- echo "onos">>/opt/service
-
-##########################################################################################################
-################################ ONOS connect with OpenStack ################################
-##########################################################################################################
-- name: Configure Neutron1
- shell: >
- crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_sfc.services.sfc.plugin.SfcPlugin, networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin, onos_router;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers onos_ml2;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers vxlan
-
-- name: Create ML2 Configuration File
- template:
- src: ml2_conf.sh
- dest: "/opt/ml2_conf.sh"
- mode: 0777
-
-- name: Configure Neutron2
- command: su -s /bin/sh -c "/opt/ml2_conf.sh;"
-
-- name: Configure Neutron3
- shell: >
- mysql -e "drop database if exists neutron_ml2;";
- mysql -e "create database neutron_ml2 character set utf8;";
- mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';";
- su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron;
- su -s /bin/sh -c "neutron-db-manage --subproject networking-sfc upgrade head" neutron;
-
-- name: Restart neutron-server
- service: name=neutron-server state=restarted
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/openvswitch.yml
deleted file mode 100755
index aac787ea..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/openvswitch.yml
+++ /dev/null
@@ -1,57 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: set veth port
- shell: >
- ip link add onos_port1 type veth peer name onos_port2;
- ifconfig onos_port1 up;
- ifconfig onos_port2 up;
- ignore_errors: True
-
-- name: add openflow-base feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow-base'";
- when: inventory_hostname in groups['onos']
-
-- name: add openflow feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow'";
- when: inventory_hostname in groups['onos']
-
-- name: add ovsdatabase feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdatabase'";
- when: inventory_hostname in groups['onos']
-
-- name: add ovsdb-base feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-base'";
- when: inventory_hostname in groups['onos']
-
-- name: add onos driver ovsdb feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-drivers-ovsdb'";
- when: inventory_hostname in groups['onos']
-
-- name: add ovsdb provider host feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-provider-host'";
- when: inventory_hostname in groups['onos']
-
-- name: add vtn feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-app-vtn-onosfw'";
- when: inventory_hostname in groups['onos']
-
-- name: set public eth card start
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'externalportname-set -n onos_port2'"
- when: inventory_hostname in groups['onos']
-
-- name: Set ONOS as the manager
- command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:6640;"
-
-- name: delete default gateway
- shell: >
- route delete default;
- when: inventory_hostname not in groups['onos']
- ignore_errors: True
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/templates/ml2_conf.sh b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/templates/ml2_conf.sh
deleted file mode 100755
index 8af03df4..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/templates/ml2_conf.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-cat <<EOT>> /etc/neutron/plugins/ml2/ml2_conf.ini
-[onos]
-password = admin
-username = admin
-url_path = http://{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:8181/onos/vtn
-EOT
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/Debian.yml
deleted file mode 100755
index 59a4dbd9..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/Debian.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - software-properties-common
- - crudini
-
-services: []
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/RedHat.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/RedHat.yml
deleted file mode 100755
index 59a4dbd9..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/RedHat.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - software-properties-common
- - crudini
-
-services: []
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/main.yml
deleted file mode 100755
index f11f1102..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-onos_pkg_name: onos-1.6.0.tar.gz
-onos_home: /opt/onos/
-karaf_dist: apache-karaf-3.0.5
-jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz
-jdk8_script_name: install_jdk8.tar
-onos_driver: networking-onos.tar
-onos_sfc_driver: networking-sfc.tar
-repository: repository.tar
-onos_boot_features: config,standard,region,package,kar,ssh,management,webconsole,onos-api,onos-core,onos-incubator,onos-cli,onos-rest,onos-gui,onos-openflow-base, onos-openflow, onos-ovsdatabase, onos-ovsdb-base, onos-drivers-ovsdb, onos-ovsdb-provider-host, onos-app-vtn-onosfw
-
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/open-contrail/tasks/uninstall-openvswitch.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/open-contrail/tasks/uninstall-openvswitch.yml
deleted file mode 100755
index 836cb78b..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/open-contrail/tasks/uninstall-openvswitch.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: del ovs bridge
- shell: ovs-vsctl del-br br-int; ovs-vsctl del-br br-tun; ovs-vsctl del-br br-prv;
-
-- name: remove ovs and ovs-plugin daeman
- shell: >
- sed -i '/neutron-openvswitch-agent/d' /opt/service ;
- sed -i '/openvswitch-switch/d' /opt/service ;
-
-- name: stop ovs and ovs-plugin
- shell: service openvswitch-switch stop; service neutron-openvswitch-agent stop;
-
-- name: remove ovs and ovs-plugin files
- shell: >
- update-rc.d -f neutron-openvswitch-agent remove;
- mv /etc/init.d/neutron-openvswitch-agent /home/neutron-openvswitch-agent;
- mv /etc/init/neutron-openvswitch-agent.conf /home/neutron-openvswitch-agent.conf;
- update-rc.d -f openvswitch-switch remove ;
- mv /etc/init.d/openvswitch-switch /home/openvswitch-switch ;
- mv /etc/init/openvswitch-switch.conf /home/openvswitch-switch.conf ;
- update-rc.d -f neutron-ovs-cleanup remove ;
- mv /etc/init.d/neutron-ovs-cleanup /home/neutron-ovs-cleanup ;
- mv /etc/init/neutron-ovs-cleanup.conf /home/neutron-ovs-cleanup.conf ;
-
-- name: remove ovs kernel module
- shell: rmmod vport_vxlan; rmmod openvswitch;
- ignore_errors: True
-
-- name: copy recovery script
- copy: src={{ item }} dest=/opt/setup_networks
- with_items:
-# - recover_network_opencontrail.py
- - setup_networks_opencontrail.py
-
-#- name: recover external script
-# shell: python /opt/setup_networks/recover_network_opencontrail.py
-
-- name: modify net-init
- shell: sed -i 's/setup_networks.py/setup_networks_opencontrail.py/g' /etc/init.d/net_init
-
-- name: resolve dual NIC problem
- shell: >
- echo "net.ipv4.conf.all.arp_ignore=1" >> /etc/sysctl.conf ;
- /sbin/sysctl -p ;
- echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore ;
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/templates/neutron.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/templates/neutron.j2
deleted file mode 100644
index e7107660..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/templates/neutron.j2
+++ /dev/null
@@ -1,7 +0,0 @@
-[securitygroup]
-firewall_driver = neutron.agent.firewall.NoopFirewallDriver
-enable_security_group = True
-
-[agent]
-prevent_arp_spoofing = False
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/templates/nova.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/templates/nova.j2
deleted file mode 100644
index 7dbc216a..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/templates/nova.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-[DEFAULT]
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/vars/Debian.yml
deleted file mode 100644
index 221a3d92..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/vars/Debian.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-configs_templates:
- - src: nova.j2
- dest:
- - /etc/nova/nova.conf
- - src: neutron.j2
- dest:
- - /etc/neutron/plugins/ml2/ml2_conf.ini
- - /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
- - /etc/neutron/plugins/ml2/restproxy.ini
-
-controller_services:
- - nova-api
- - nova-cert
- - nova-conductor
- - nova-consoleauth
- - nova-novncproxy
- - nova-scheduler
- - neutron-server
- - neutron-openvswitch-agent
- - neutron-l3-agent
- - neutron-dhcp-agent
- - neutron-metadata-agent
-
-compute_services:
- - nova-compute
- - neutron-openvswitch-agent
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/setup-network/files/setup_networks/net_init b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/setup-network/files/setup_networks/net_init
deleted file mode 100755
index 41ccb988..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/setup-network/files/setup_networks/net_init
+++ /dev/null
@@ -1,24 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides: anamon.init
-# Required-Start: $network
-# Required-Stop:
-# Should-Start:
-# Should-Stop:
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: Starts the cobbler anamon boot notification program
-# Description: anamon runs the first time a machine is booted after installation.
-### END INIT INFO
-
-
-
-#
-# anamon.init: Starts the cobbler post-install boot notification program
-#
-# chkconfig: 35 0 6
-#
-# description: anamon runs the first time a machine is booted after
-# installation.
-#
-python /opt/setup_networks/setup_networks.py
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/storage/files/storage b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/storage/files/storage
deleted file mode 100755
index 3acc6115..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/storage/files/storage
+++ /dev/null
@@ -1,10 +0,0 @@
-#! /bin/bash
-### BEGIN INIT INFO
-# Provides: Storage
-# Required-Start: $remote_fs $network
-# Required-Stop: $remote_fs $network
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Description: Storage
-### END INIT INFO
-loop_dev=`sh /opt/setup_storage/losetup.sh`
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift.yml
deleted file mode 100644
index 10a513f0..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift.yml
+++ /dev/null
@@ -1,89 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- include: swift-controller1.yml
- when: inventory_hostname in groups['controller']
-
-- include: swift-compute1.yml
- when: inventory_hostname in groups['compute']
-
-- include: swift-controller2.yml
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: copy swift.conf
- template: src=swift.conf dest=/etc/swift/swift.conf backup=yes
-
-- name: chown /etc/swift
- shell: chown -R root:swift /etc/swift
-
-- name: restart tasks on controller
- service: name={{ item }} state=restarted enabled=yes
- with_items:
- - memcached
- - swift-proxy
- when: inventory_hostname in groups['controller']
-
-- name: restart tasks on compute
- shell: swift-init all start
- when: inventory_hostname in groups['compute']
- ignore_errors: True
-
-- name: restart tasks on controller
- service: name={{ item }} state=restarted enabled=yes
- with_items:
- - rsync
- when: inventory_hostname in groups['compute']
-
-#- name: upload swift lib
-# unarchive: src=swift-lib.tar.gz dest=/tmp/
-
-- name: download swift lib package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/moon/{{ swift_lib }}" dest=/tmp/
-
-- name: unarchive swift lib
- command: su -s /bin/sh -c "tar xvf /tmp/{{ swift_lib }} -C /tmp/"
-
-- name: copy swift lib
- command: su -s /bin/sh -c "cp /tmp/swift-lib/* /usr/lib/"
-
-- name: wait 30 seconds
- shell: sleep 30
-
-- name: create swift task script
- shell: echo {{ item }} >> /opt/swift-service
- with_items:
- - swift-account
- - swift-account-replicator
- - swift-container-replicator
- - swift-object
- - swift-object-updater
- - swift-account-auditor
- - swift-container
- - swift-container-sync
- - swift-object-auditor
- - swift-account-reaper
- - swift-container-auditor
- - swift-container-updater
- - swift-object-replicator
- when: inventory_hostname in groups['compute']
- ignore_errors: True
-
-- name: restart swift task
- shell: >
- for i in `cat /opt/swift-service`; do service $i start; done;
- sleep 10;
- for i in `cat /opt/swift-service`; do service $i restart; done;
- when: inventory_hostname in groups['compute']
- ignore_errors: True
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/main.yml
deleted file mode 100644
index dc009551..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-swift_lib: swift-lib.tar.gz
-
-packages_noarch: []
-
-services_noarch: []
-
-controller_packages_noarch: []
-compute_packages_noarch: []
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/tacker/templates/tacker.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/tacker/templates/tacker.j2
deleted file mode 100644
index 4f186b67..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/tacker/templates/tacker.j2
+++ /dev/null
@@ -1,426 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = True
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = True
-
-# Where to store Tacker state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/tacker
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-auth_strategy = keystone
-policy_file = /usr/local/etc/tacker/policy.json
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-# log_dir =
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ internal_ip }}
-
-# Port the bind the API server to
-bind_port = 8888
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of tacker.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Tacker core plugin entrypoint to be loaded from the
-# tacker.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the tacker source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-# core_plugin =
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# tacker.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the tacker source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-
-service_plugins = vnfm,nfvo
-
-# Paste configuration file
-# api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-# auth_strategy = keystone
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Tacker is
-# being used in conjunction with nova security groups
-# allow_overlapping_ips = False
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = tacker.openstack.common.rpc.impl_kombu
-# Size of RPC thread pool
-# rpc_thread_pool_size = 64
-# Size of RPC connection pool
-# rpc_conn_pool_size = 30
-# Seconds to wait for a response from call or multicall
-# rpc_response_timeout = 60
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-# rpc_cast_timeout = 30
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = tacker.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = tacker
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# IP address of the RabbitMQ installation
-# rabbit_host = localhost
-# Password of the RabbitMQ server
-# rabbit_password = guest
-# Port where RabbitMQ server is running/listening
-# rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-# rabbit_userid = guest
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-
-# QPID
-# rpc_backend=tacker.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=tacker.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = tacker.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = tacker.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = tacker.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-# default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-# notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-# agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# tacker server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to tacker server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-# api_workers = 0
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-# rpc_workers = 0
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== tacker nova interactions ==========
-# Send notification to nova when port status is active.
-# notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-# notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-# nova_url = http://127.0.0.1:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-# nova_region_name =
-
-# Username for connection to nova in admin context
-# nova_admin_username =
-
-# The uuid of the admin nova tenant
-# nova_admin_tenant_id =
-
-# Password for connection to nova in admin context.
-# nova_admin_password =
-
-# Authorization URL for connection to nova in admin context.
-# nova_admin_auth_url =
-
-# CA file for novaclient to verify server certificates
-# nova_ca_certificates_file =
-
-# Boolean to control ignoring SSL errors on the nova url
-# nova_api_insecure = False
-
-# Number of seconds between sending events to nova if there are any events to send
-# send_events_interval = 2
-
-# ======== end of tacker nova interactions ==========
-
-[agent]
-# Use "sudo tacker-rootwrap /etc/tacker/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = sudo /usr/local/bin/tacker-rootwrap /usr/local/etc/tacker/rootwrap.conf
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-# report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-signing_dir = /var/cache/tacker
-#cafile = /opt/stack/data/ca-bundle.pem
-#project_domain_id = default
-project_name = service
-#user_domain_id = default
-password = console
-username = tacker
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_plugin = password
-identity_uri = http://{{ internal_vip.ip }}:5000/v2.0
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
-
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/tacker
-connection = mysql://tacker:TACKER_DBPASS@{{ internal_vip.ip }}:3306/tacker?charset=utf8
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main tacker server. (Leave it as is if the database runs on this host.)
-# connection = sqlite://
-# NOTE: In deployment the [database] section and its connection attribute may
-# be set in the corresponding core plugin '.ini' file. However, it is suggested
-# to put the [database] section and its connection attribute in this
-# configuration file.
-
-# Database engine for which script will be generated when using offline
-# migration
-# engine =
-
-# The SQLAlchemy connection string used to connect to the slave database
-# slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-# max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-# retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-# min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-# max_pool_size = 10
-
-# Timeout in seconds before idle sql connections are reaped
-# idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-# max_overflow = 20
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-# connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-# connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-# pool_timeout = 10
-
-[tacker]
-# Specify drivers for hosting device
-# infra_driver = heat,nova,noop
-
-# Specify drivers for mgmt
-# mgmt_driver = noop,openwrt
-
-# Specify drivers for monitoring
-# monitor_driver = ping, http_ping
-
-[nfvo_vim]
-# Supported VIM drivers, resource orchestration controllers such as OpenStack, kvm
-#Default VIM driver is OpenStack
-#vim_drivers = openstack
-#Default VIM placement if vim id is not provided
-default_vim = VIM0
-
-[vim_keys]
-#openstack = /etc/tacker/vim/fernet_keys
-[tacker_nova]
-# parameters for novaclient to talk to nova
-region_name = RegionOne
-#project_domain_id = default
-project_name = service
-#user_domain_id = default
-password = console
-username = nova
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_plugin = password
-
-[tacker_heat]
-heat_uri = http://{{ internal_vip.ip }}:8004/v1
-stack_retries = 60
-stack_retry_wait = 5
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/templates/neutron.conf b/deploy/adapters/ansible/openstack_mitaka_xenial/templates/neutron.conf
deleted file mode 100644
index 33231ed5..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/templates/neutron.conf
+++ /dev/null
@@ -1,486 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ VERBOSE }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-notify_nova_on_port_status_changes = True
-notify_nova_on_port_data_changes = True
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ internal_vip.ip }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = RegionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-{% if NOVA_ADMIN_TENANT_ID|default('') %}
-nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
-{% endif %}
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 30
-use_db_reconnect = True
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewllDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
-
-{% if enable_fwaas %}
-[fwaas]
-driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
-enabled = True
-{% endif %}
-
-[nova]
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-project_name = service
-username = nova
-password = {{ NOVA_PASS }}
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/templates/nova.conf b/deploy/adapters/ansible/openstack_mitaka_xenial/templates/nova.conf
deleted file mode 100644
index 3a5735cf..00000000
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/templates/nova.conf
+++ /dev/null
@@ -1,96 +0,0 @@
-{% set memcached_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-
-[DEFAULT]
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/var/lib/nova/tmp
-force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
-enabled_apis=osapi_compute,metadata
-
-default_floating_pool={{ public_net_info.network }}
-auth_strategy = keystone
-
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-osapi_compute_listen={{ internal_ip }}
-metadata_listen={{ internal_ip }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
-
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-
-memcached_servers = {{ memcached_servers }}
-
-[database]
-# The SQLAlchemy connection string used to connect to the database
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-idle_timeout = 30
-use_db_reconnect = True
-pool_timeout = 10
-
-[api_database]
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova_api
-idle_timeout = 30
-use_db_reconnect = True
-pool_timeout = 10
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/2.0
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-memcached_servers = {{ memcached_servers }}
-
-[glance]
-host = {{ internal_vip.ip }}
-
-[neutron]
-url = http://{{ internal_vip.ip }}:9696
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
-service_metadata_proxy = True
-metadata_proxy_shared_secret = {{ METADATA_SECRET }}
-auth_type = password
-auth_url = http://{{ internal_vip.ip }}:35357
-password = {{ NEUTRON_PASS }}
-username = neutron
-project_domain_name = default
-user_domain_name = default
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack_newton_xenial/HA-ansible-multinodes.yml
deleted file mode 100644
index 236035e0..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/HA-ansible-multinodes.yml
+++ /dev/null
@@ -1,264 +0,0 @@
----
-- hosts: all
- remote_user: root
- pre_tasks:
- - name: make sure ssh dir exist
- file:
- path: '{{ item.path }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- state: directory
- mode: 0755
- with_items:
- - path: /root/.ssh
- owner: root
- group: root
-
- - name: write ssh config
- copy:
- content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no"
- dest: '{{ item.dest }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- mode: 0600
- with_items:
- - dest: /root/.ssh/config
- owner: root
- group: root
-
- - name: generate ssh keys
- shell: if [ ! -f ~/.ssh/id_rsa.pub ]; then ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ""; else echo "already gen ssh key!"; fi;
-
- - name: fetch ssh keys
- fetch: src=/root/.ssh/id_rsa.pub dest=/tmp/ssh-keys-{{ ansible_hostname }} flat=yes
-
- - authorized_key:
- user: root
- key: "{{ lookup('file', 'item') }}"
- with_fileglob:
- - /tmp/ssh-keys-*
- max_fail_percentage: 0
- roles:
- - common
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - setup-network
-
-- hosts: ha
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ha
-
-- hosts: controller
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - memcached
- - apache
- - database
- - mq
- - keystone
- - nova-controller
- - neutron-controller
- - cinder-controller
- - glance
- - neutron-common
- - neutron-network
- - ceilometer_controller
- - dashboard
- - heat
- - aodh
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - storage
-
-- hosts: compute
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - nova-compute
- - neutron-compute
- - cinder-volume
- - ceilometer_compute
-
-#- hosts: all
-# remote_user: root
-# accelerate: true
-# max_fail_percentage: 0
-# roles:
-# - moon
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - secgroup
-
-- hosts: ceph_adm
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles: []
- # - ceph-deploy
-
-- hosts: ceph
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-purge
- - ceph-config
-
-- hosts: ceph_mon
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-mon
-
-- hosts: ceph_osd
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-osd
-
-- hosts: ceph
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-openstack
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - monitor
-
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- tasks:
- - name: set bash to nova
- user:
- name: nova
- shell: /bin/bash
-
- - name: make sure ssh dir exist
- file:
- path: '{{ item.path }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- state: directory
- mode: 0755
- with_items:
- - path: /var/lib/nova/.ssh
- owner: nova
- group: nova
-
- - name: copy ssh keys for nova
- shell: cp -rf /root/.ssh/id_rsa /var/lib/nova/.ssh;
-
- - name: write ssh config
- copy:
- content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no"
- dest: '{{ item.dest }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- mode: 0600
- with_items:
- - dest: /var/lib/nova/.ssh/config
- owner: nova
- group: nova
-
- - authorized_key:
- user: nova
- key: "{{ lookup('file', 'item') }}"
- with_fileglob:
- - /tmp/ssh-keys-*
-
- - name: chown ssh file
- shell: chown -R nova:nova /var/lib/nova/.ssh;
-
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - odl_cluster
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - onos_cluster
-
-- hosts: all
- remote_user: root
- sudo: True
- max_fail_percentage: 0
- roles:
- - open-contrail
-
-- hosts: all
- remote_user: root
- accelerate: true
- serial: 1
- max_fail_percentage: 0
- roles:
- - odl_cluster_neutron
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - odl_cluster_post
-
-- hosts: controller
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - ext-network
-
-- hosts: controller
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - boot-recovery
-
-- hosts: controller
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - controller-recovery
-
-- hosts: compute
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - compute-recovery
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/handlers/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/handlers/main.yml
deleted file mode 100644
index b3399e0c..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/handlers/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart aodh services
- service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/aodh_config.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/aodh_config.yml
deleted file mode 100644
index e60d5338..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/aodh_config.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: aodh db sync
- shell: su -s /bin/sh -c "aodh-dbsync" aodh
- notify:
- - restart aodh services
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/main.yml
deleted file mode 100644
index 9b61915f..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/main.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include: aodh_install.yml
- tags:
- - install
- - aodh_install
- - aodh
-
-- include: aodh_config.yml
- when: inventory_hostname == groups['controller'][0]
- tags:
- - config
- - aodh_config
- - aodh
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/main.yml
deleted file mode 100644
index b17f6ed0..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-##############################################################################
-## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-##
-## All rights reserved. This program and the accompanying materials
-## are made available under the terms of the Apache License, Version 2.0
-## which accompanies this distribution, and is available at
-## http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
----
-packages_noarch: []
-
-services_noarch: []
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/handlers/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/handlers/main.yml
deleted file mode 100644
index 10b7c683..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/handlers/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart ceilometer service
- service: name={{ item }} state=restarted enabled=yes
- with_items: ceilometer_services
-
-- name: restart nova service
- service: name={{ item }} state=restarted enabled=yes
- with_items: nova_services
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/tasks/main.yml
deleted file mode 100644
index 1e3c04d7..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/tasks/main.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include: ceilometer_install.yml
- tags:
- - install
- - ceilometer_install
- - ceilometer
-
-- include: ceilometer_config.yml
- tags:
- - config
- - ceilometer_config
- - ceilometer
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/vars/Debian.yml
deleted file mode 100644
index 1bf3956f..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/vars/Debian.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-ceilometer_packages:
- - ceilometer-agent-compute
-
-ceilometer_services:
- - ceilometer-agent-compute
-
-nova_services:
- - nova-compute
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/vars/RedHat.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/vars/RedHat.yml
deleted file mode 100644
index c5778a49..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/vars/RedHat.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-ceilometer_packages:
- - openstack-ceilometer-compute
- - python-ceilometerclient
- - python-pecan
-
-ceilometer_services:
- - openstack-ceilometer-compute
- - openstack-nova-compute
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/vars/main.yml
deleted file mode 100644
index 209e1e00..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/vars/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch: []
-metering_secret: 1c5df72079b31fb47747
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/handlers/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/handlers/main.yml
deleted file mode 100644
index a3bfb85d..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/handlers/main.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart ceilometer service
- service: name={{ item }} state=restarted enabled=yes
- with_items: ceilometer_services
-
-- name: restart glance_cinder service
- service: name={{ item }} state=restarted enabled=yes
- with_items: glance_cinder_services
-
-- name: reload apache server
- service: name=apache2 state=reloaded
-
-- name: restart apache server
- service: name=apache2 state=restarted enabled=yes
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/tasks/main.yml
deleted file mode 100644
index 1e3c04d7..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/tasks/main.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include: ceilometer_install.yml
- tags:
- - install
- - ceilometer_install
- - ceilometer
-
-- include: ceilometer_config.yml
- tags:
- - config
- - ceilometer_config
- - ceilometer
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/Debian.yml
deleted file mode 100644
index de860533..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/Debian.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-ceilometer_packages:
- - ceilometer-api
- - ceilometer-collector
- - ceilometer-agent-central
- - ceilometer-agent-notification
- - python-ceilometerclient
-
-ceilometer_services:
- - ceilometer-agent-central
- - ceilometer-agent-notification
- - ceilometer-collector
-
-glance_cinder_services:
- - glance-registry
- - glance-api
- - cinder-api
- - cinder-scheduler
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/RedHat.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/RedHat.yml
deleted file mode 100644
index de860533..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/RedHat.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-ceilometer_packages:
- - ceilometer-api
- - ceilometer-collector
- - ceilometer-agent-central
- - ceilometer-agent-notification
- - python-ceilometerclient
-
-ceilometer_services:
- - ceilometer-agent-central
- - ceilometer-agent-notification
- - ceilometer-collector
-
-glance_cinder_services:
- - glance-registry
- - glance-api
- - cinder-api
- - cinder-scheduler
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/main.yml
deleted file mode 100644
index 209e1e00..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch: []
-metering_secret: 1c5df72079b31fb47747
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/tasks/install_mon.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/tasks/install_mon.yml
deleted file mode 100644
index 1d14c2d2..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/tasks/install_mon.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: Create a default data directory
- file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}" state="directory"
-
-- name: Populate the monitor daemon
- shell: "ceph-mon --mkfs -i {{ inventory_hostname }} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring"
-
-- name: Change ceph/mon dir owner to ceph
- shell: "chown -R ceph:ceph /var/lib/ceph/mon"
- when: ansible_os_family == "Debian"
-
-- name: copy templates
- template:
- src: ceph-mon.service
- dest: /lib/systemd/system/ceph-mon.service
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: Touch the done and auto start file
- file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}/{{ item }}" state="touch"
- with_items:
- - "done"
- - "{{ ceph_start_type }}"
-
-- name: start mon daemon
- shell: "{{ ceph_start_script }}"
-
-- name: wait for creating osd keyring
- wait_for: path=/var/lib/ceph/bootstrap-osd/ceph.keyring
-
-- name: fetch osd keyring
- fetch: src="/var/lib/ceph/bootstrap-osd/ceph.keyring" dest="/tmp/ceph.osd.keyring" flat=yes
- run_once: True
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/templates/ceph-mon.service b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/templates/ceph-mon.service
deleted file mode 100644
index 5a3cf753..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/templates/ceph-mon.service
+++ /dev/null
@@ -1,22 +0,0 @@
-[Unit]
-Description=Ceph cluster monitor daemon
-Documentation=man:ceph-mon
-
-After=network-online.target local-fs.target ceph-create-keys.service
-Wants=network-online.target local-fs.target ceph-create-keys.service
-
-PartOf=ceph.target
-
-[Service]
-LimitNOFILE=1048576
-LimitNPROC=1048576
-EnvironmentFile=-/etc/default/ceph
-Environment=CLUSTER=ceph
-ExecStart=/usr/bin/ceph-mon -f --cluster ${CLUSTER} --id {{ inventory_hostname }} --setuser ceph --setgroup ceph
-ExecReload=/bin/kill -HUP $MAINPID
-Restart=on-failure
-RestartSec=30
-TasksMax=infinity
-
-[Install]
-WantedBy=multi-user.target
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/vars/Debian.yml
deleted file mode 100644
index a792acad..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/vars/Debian.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-ceph_start_script: "service ceph-mon start"
-ceph_start_type: "systemd"
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/tasks/ceph_openstack_post.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/tasks/ceph_openstack_post.yml
deleted file mode 100644
index 2097ca57..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/tasks/ceph_openstack_post.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-##############################################################################
-## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-##
-## All rights reserved. This program and the accompanying materials
-## are made available under the terms of the Apache License, Version 2.0
-## which accompanies this distribution, and is available at
-## http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
----
-- name: get mount info
- command: mount
- register: mount_info
-
-- name: try unmount image nfs directory
- shell: |
- umount /var/lib/glance/images
- sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
- when: mount_info.stdout.find('images') != -1
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/tasks/main.yml
deleted file mode 100644
index 06c3acb6..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/tasks/main.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- include_vars: "{{ ansible_os_family }}.yml"
- tags:
- - ceph_deploy
- - ceph_openstack_pre
- - ceph_openstack_conf
- - ceph_openstack_post
- - ceph_openstack
-
-- include: ceph_openstack_pre.yml
- tags:
- - ceph_deploy
- - ceph_openstack_pre
- - ceph_openstack
-
-- include: ceph_openstack_conf.yml
- tags:
- - ceph_deploy
- - ceph_openstack_conf
- - ceph_openstack
-
-- include: ceph_openstack_post.yml
- tags:
- - ceph_deploy
- - ceph_openstack_post
- - ceph_openstack
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/vars/Debian.yml
deleted file mode 100755
index db10bd14..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/vars/Debian.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - ceph-deploy
- - python-flask
- - libgoogle-perftools4
- - libleveldb1v5
- - liblttng-ust0
- - libsnappy1v5
- - librbd1
- - librados2
- - python-ceph
- - ceph
- - ceph-mds
- - ceph-common
- - ceph-fs-common
- - gdisk
-
-services: []
-
-cinder_service: cinder-volume
-nova_service: nova-compute
-glance_service: glance-api
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-osd/tasks/install_osd.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-osd/tasks/install_osd.yml
deleted file mode 100644
index 363e5e6d..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-osd/tasks/install_osd.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: create osd lv and mount it on /var/local/osd
- script: create_osd.sh
-
-- name: fetch osd keyring from ceph_adm
- fetch: src="/var/lib/ceph/bootstrap-osd/ceph.keyring" dest="/tmp/ceph.osd.keyring" flat=yes
- delegate_to: "{{ public_vip.ip }}"
- when: compute_expansion
-
-- name: copy osd keyring
- copy: src="/tmp/ceph.osd.keyring" dest="/var/lib/ceph/bootstrap-osd/ceph.keyring"
-
-- name: prepare osd disk
- shell: ceph-disk prepare --fs-type xfs /var/local/osd
-
-- name: change local/osd dir owner to ceph
- shell: chown -R ceph:ceph /var/local/osd
- when: ansible_os_family == "Debian"
-
-- name: activate osd node
- shell: ceph-disk activate /var/local/osd
-
-- name: enable ceph service
- service: name=ceph enabled=yes
-
-- name: rebuild osd after reboot
- lineinfile: dest=/etc/init/ceph-osd-all-starter.conf insertafter="^task" line="pre-start script\n set -e\n /opt/setup_storage/losetup.sh\n sleep 3\n mount /dev/storage-volumes/ceph0 /var/local/osd\nend script"
- when: ansible_os_family == "Debian"
-
-- name: rebuild osd after reboot for centos
- lineinfile: dest=/etc/init.d/ceph insertafter="^### END INIT INFO" line="\nsleep 1\nmount /dev/storage-volumes/ceph0 /var/local/osd"
- when: ansible_os_family == "RedHat"
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-controller/templates/cinder.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-controller/templates/cinder.conf
deleted file mode 100644
index d428a078..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-controller/templates/cinder.conf
+++ /dev/null
@@ -1,85 +0,0 @@
-{% set memcached_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-
-[DEFAULT]
-rootwrap_config = /etc/cinder/rootwrap.conf
-api_paste_confg = /etc/cinder/api-paste.ini
-iscsi_helper = tgtadm
-volume_name_template = volume-%s
-volume_group = storage-volumes
-verbose = {{ VERBOSE }}
-debug = {{ DEBUG }}
-auth_strategy = keystone
-state_path = /var/lib/cinder
-lock_path = /var/lock/cinder
-notification_driver = cinder.openstack.common.notifier.rpc_notifier
-volumes_dir = /var/lib/cinder/volumes
-transport_url = rabbit://{{ RABBIT_USER }}:{{ RABBIT_PASS }}@{{ rabbit_host }}
-log_file = /var/log/cinder/cinder.log
-
-control_exchange = cinder
-rpc_backend = rabbit
-my_ip = {{ storage_controller_host }}
-
-glance_host = {{ internal_vip.ip }}
-glance_port = 9292
-api_rate_limit = False
-storage_availability_zone = nova
-
-quota_volumes = 10
-quota_gigabytes = 1000
-quota_driver = cinder.quota.DbQuotaDriver
-
-osapi_volume_listen = {{ storage_controller_host }}
-osapi_volume_listen_port = 8776
-
-db_backend = sqlalchemy
-volume_name_template = volume-%s
-snapshot_name_template = snapshot-%s
-
-max_gigabytes = 10000
-
-volume_clear = zero
-volume_clear_size = 10
-
-iscsi_ip_address = {{ storage_controller_host }}
-iscsi_port = 3260
-iscsi_helper = tgtadm
-
-volumes_dir = /var/lib/cinder/volumes
-volume_driver = cinder.volume.drivers.lvm.LVMISCSIDriver
-
-[database]
-connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
-idle_timeout = 30
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000
-auth_url = http://{{ internal_vip.ip }}:35357
-memcached_servers = {{ memcached_servers }}
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-project_name = service
-username = cinder
-password = {{ CINDER_PASS }}
-
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = cinder
-admin_password = {{ CINDER_PASS }}
-
-[keymgr]
-encryption_auth_url=http://{{ internal_vip.ip }}:5000/v3
-
-[oslo_messaging_rabbit]
-rabbit_host = {{ rabbit_host }}
-rabbit_port = 5672
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-[oslo_concurrency]
-lock_path = /var/lib/cinder/tmp
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-volume/templates/cinder.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-volume/templates/cinder.conf
deleted file mode 100644
index e7946b5c..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-volume/templates/cinder.conf
+++ /dev/null
@@ -1,75 +0,0 @@
-[DEFAULT]
-rootwrap_config = /etc/cinder/rootwrap.conf
-api_paste_confg = /etc/cinder/api-paste.ini
-iscsi_helper = tgtadm
-volume_name_template = volume-%s
-volume_group = storage-volumes
-verbose = True
-auth_strategy = keystone
-state_path = /var/lib/cinder
-lock_path = /var/lib/cinder/tmp
-notification_driver=cinder.openstack.common.notifier.rpc_notifier
-volumes_dir = /var/lib/cinder/volumes
-transport_url = rabbit://{{ RABBIT_USER }}:{{ RABBIT_PASS }}@{{ rabbit_host }}
-log_file=/var/log/cinder/cinder.log
-
-control_exchange = cinder
-rpc_backend = rabbit
-my_ip = {{ storage_controller_host }}
-
-glance_host = {{ internal_vip.ip }}
-glance_port = 9292
-glance_api_servers = http://{{ internal_vip.ip }}:9292
-api_rate_limit = False
-storage_availability_zone = nova
-
-quota_volumes = 10
-quota_gigabytes = 1000
-quota_driver = cinder.quota.DbQuotaDriver
-
-osapi_volume_listen = {{ storage_controller_host }}
-osapi_volume_listen_port = 8776
-
-db_backend = sqlalchemy
-volume_name_template = volume-%s
-snapshot_name_template = snapshot-%s
-
-max_gigabytes = 10000
-
-volume_clear = zero
-volume_clear_size = 10
-
-iscsi_ip_address = {{ storage_controller_host }}
-iscsi_port=3260
-iscsi_helper=tgtadm
-
-volumes_dir=/var/lib/cinder/volumes
-volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
-
-[database]
-connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
-idle_timeout = 30
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-project_name = service
-username = cinder
-password = {{ CINDER_PASS }}
-
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = cinder
-admin_password = {{ CINDER_PASS }}
-
-[lvm]
-volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
-volume_group = cinder-volumes
-iscsi_protocol = iscsi
-iscsi_helper = tgtadm
-
-[oslo_concurrency]
-lock_path = /var/lib/cinder/tmp
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/common/templates/pip.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/common/templates/pip.conf
deleted file mode 100644
index 59981258..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/common/templates/pip.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-[global]
-find-links = http://{{ COMPASS_SERVER.stdout_lines[0] }}/pip-openstack
-no-index = true
-[install]
-trusted-host={{ COMPASS_SERVER.stdout_lines[0] }}
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/common/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/common/vars/Debian.yml
deleted file mode 100644
index 46e0374f..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/common/vars/Debian.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - ubuntu-cloud-keyring
- - python-dev
- - openvswitch-switch
- - openvswitch-switch-dpdk
- - python-memcache
- - python-iniparse
- - python-lxml
- - python-crypto
- #- python-d* #TODO, need remove
-
-pip_packages:
- - crudini
- - python-keyczar
- - yang2tosca
-
-pip_conf: pip.conf
-
-services:
- - ntp
-
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/handlers/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/handlers/main.yml
deleted file mode 100644
index cf535a11..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/handlers/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart congress services
- service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/tasks/congress_db.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/tasks/congress_db.yml
deleted file mode 100644
index 1883509b..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/tasks/congress_db.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: create congress db
- mysql_db:
- login_unix_socket: /var/run/mysqld/mysqld.sock
- name: "{{ item.db }}"
- state: present
- with_items: "{{ credentials }}"
-
-- name: create congress db user
- mysql_user:
- login_unix_socket: /var/run/mysqld/mysqld.sock
- name: "{{ item[0].user }}"
- password: "{{ item[0].password }}"
- priv: "*.*:ALL,GRANT"
- host: "{{ item[1] }}"
- state: present
- with_nested:
- - "{{ credentials }}"
- - ['%', 'localhost']
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/templates/api-paste.ini b/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/templates/api-paste.ini
deleted file mode 100644
index 39be570b..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/templates/api-paste.ini
+++ /dev/null
@@ -1,34 +0,0 @@
-[composite:congress]
-use = egg:Paste#urlmap
-/: congressversions
-/v1: congress_api_v1
-
-[pipeline:congressversions]
-pipeline = cors catch_errors congressversionapp
-
-[app:congressversionapp]
-paste.app_factory = congress.api.versions:Versions.factory
-
-[composite:congress_api_v1]
-use = call:congress.auth:pipeline_factory
-keystone = cors request_id catch_errors authtoken keystonecontext congress_api
-noauth = cors request_id catch_errors congress_api
-
-[app:congress_api]
-paste.app_factory = congress.service:congress_app_factory
-
-[filter:request_id]
-paste.filter_factory = oslo_middleware:RequestId.factory
-
-[filter:catch_errors]
-paste.filter_factory = oslo_middleware:CatchErrors.factory
-
-[filter:keystonecontext]
-paste.filter_factory = congress.auth:CongressKeystoneContext.factory
-
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-
-[filter:cors]
-paste.filter_factory = oslo_middleware.cors:filter_factory
-oslo_config_project = congress
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/templates/congress.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/templates/congress.conf
deleted file mode 100644
index 0305b418..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/templates/congress.conf
+++ /dev/null
@@ -1,510 +0,0 @@
-{% set memcached_servers = [] %}
-{% set rabbitmq_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% set _ = rabbitmq_servers.append('%s:5672'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-{% set rabbitmq_servers = rabbitmq_servers|join(',') %}
-[DEFAULT]
-
-#
-# From congress
-#
-# The host IP to bind to (string tmq_serversvalue)
-bind_host = {{ internal_ip }}
-
-# The port to bind to (port value)
-# Minimum value: 0
-# Maximum value: 65535
-bind_port = 1789
-
-# Thread pool size for eventlet. (integer value)
-#max_simultaneous_requests = 1024
-
-# Set this to true to enable TCP_KEEALIVE socket option on connections received
-# by the API server. (boolean value)
-#tcp_keepalive = false
-
-# Sets the value of TCP_KEEPIDLE in seconds for each server socket. Only
-# applies if tcp_keepalive is true. Not supported on OS X. (integer value)
-#tcp_keepidle = 600
-
-# The path to the latest policy dump (string value)
-policy_path = /etc/congress/policy.json
-
-# The file containing datasource configuration (string value)
-#datasource_file = <None>
-
-# The absolute path to the congress repo (string value)
-#root_path = <None>
-
-# The number of worker processes to serve the congress API application.
-# (integer value)
-#api_workers = 1
-
-# The API paste config file to use (string value)
-#api_paste_config = api-paste.ini
-
-# The type of authentication to use (string value)
-auth_strategy = keystone
-
-# List of driver class paths to import. (list value)
-drivers = congress.datasources.neutronv2_driver.NeutronV2Driver,congress.datasources.glancev2_driver.GlanceV2Driver,congress.datasources.nova_driver.NovaDriver,congress.datasources.keystone_driver.KeystoneDriver,congress.datasources.ceilometer_driver.CeilometerDriver,congress.datasources.cinder_driver.CinderDriver,congress.datasources.swift_driver.SwiftDriver,congress.datasources.plexxi_driver.PlexxiDriver,congress.datasources.vCenter_driver.VCenterDriver,congress.datasources.cloudfoundryv2_driver.CloudFoundryV2Driver,congress.datasources.murano_driver.MuranoDriver,congress.datasources.ironic_driver.IronicDriver
-
-
-# The number of seconds to wait between synchronizing datasource config from
-# the database (integer value)
-#datasource_sync_period = 0
-
-# Sets the flag to False if you don't want the congress to execute actions.
-# (boolean value)
-#enable_execute_action = true
-
-# The flag to use congress new distributed architecture.Don't set it to True in
-# L release since the new architecture is under implementation. (boolean value)
-#distributed_architecture = false
-
-# Explicitly specify the temporary working directory (string value)
-#tempdir = <None>
-
-# Make exception message format errors fatal (boolean value)
-#fatal_exception_format_errors = false
-
-#
-# From oslo.log
-#
-
-# If set to true, the logging level will be set to DEBUG instead of the default
-# INFO level. (boolean value)
-# Note: This option can be changed without restarting.
-debug = True
-
-# DEPRECATED: If set to false, the logging level will be set to WARNING instead
-# of the default INFO level. (boolean value)
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#verbose = true
-
-# The name of a logging configuration file. This file is appended to any
-# existing logging configuration files. For details about logging configuration
-# files, see the Python logging module documentation. Note that when logging
-# configuration files are used then all logging configuration is set in the
-# configuration file and other logging configuration options are ignored (for
-# example, logging_context_format_string). (string value)
-# Note: This option can be changed without restarting.
-# Deprecated group/name - [DEFAULT]/log_config
-#log_config_append = <None>
-
-# Defines the format string for %%(asctime)s in log records. Default:
-# %(default)s . This option is ignored if log_config_append is set. (string
-# value)
-#log_date_format = %Y-%m-%d %H:%M:%S
-
-# (Optional) Name of log file to send logging output to. If no default is set,
-# logging will go to stderr as defined by use_stderr. This option is ignored if
-# log_config_append is set. (string value)
-# Deprecated group/name - [DEFAULT]/logfile
-log_file = congress.log
-
-# (Optional) The base directory used for relative log_file paths. This option
-# is ignored if log_config_append is set. (string value)
-# Deprecated group/name - [DEFAULT]/logdir
-log_dir = /var/log/congress
-
-# Uses logging handler designed to watch file system. When log file is moved or
-# removed this handler will open a new log file with specified path
-# instantaneously. It makes sense only if log_file option is specified and
-# Linux platform is used. This option is ignored if log_config_append is set.
-# (boolean value)
-#watch_log_file = false
-
-# Use syslog for logging. Existing syslog format is DEPRECATED and will be
-# changed later to honor RFC5424. This option is ignored if log_config_append
-# is set. (boolean value)
-#use_syslog = false
-
-# Syslog facility to receive log lines. This option is ignored if
-# log_config_append is set. (string value)
-#syslog_log_facility = LOG_USER
-
-# Log output to standard error. This option is ignored if log_config_append is
-# set. (boolean value)
-#use_stderr = true
-
-# Format string to use for log messages with context. (string value)
-#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
-
-# Format string to use for log messages when context is undefined. (string
-# value)
-#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
-
-# Additional data to append to log message when logging level for the message
-# is DEBUG. (string value)
-#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
-
-# Prefix each line of exception output with this format. (string value)
-#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
-
-# Defines the format string for %(user_identity)s that is used in
-# logging_context_format_string. (string value)
-#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
-
-# List of package logging levels in logger=LEVEL pairs. This option is ignored
-# if log_config_append is set. (list value)
-#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
-
-# Enables or disables publication of error events. (boolean value)
-#publish_errors = false
-
-# The format for an instance that is passed with the log message. (string
-# value)
-#instance_format = "[instance: %(uuid)s] "
-
-# The format for an instance UUID that is passed with the log message. (string
-# value)
-#instance_uuid_format = "[instance: %(uuid)s] "
-
-# Enables or disables fatal status of deprecations. (boolean value)
-#fatal_deprecations = false
-
-
-[cors]
-
-#
-# From oslo.middleware.cors
-#
-
-# Indicate whether this resource may be shared with the domain received in the
-# requests "origin" header. Format: "<protocol>://<host>[:<port>]", no trailing
-# slash. Example: https://horizon.example.com (list value)
-#allowed_origin = <None>
-
-# Indicate that the actual request can include user credentials (boolean value)
-#allow_credentials = true
-
-# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
-# Headers. (list value)
-#expose_headers = X-Auth-Token,X-OpenStack-Request-ID,X-Subject-Token,X-Service-Token
-
-# Maximum cache age of CORS preflight requests. (integer value)
-#max_age = 3600
-
-# Indicate which methods can be used during the actual request. (list value)
-#allow_methods = GET,PUT,POST,DELETE,PATCH
-
-# Indicate which header field names may be used during the actual request.
-# (list value)
-#allow_headers = X-Auth-Token,X-OpenStack-Request-ID,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id
-
-
-[cors.subdomain]
-
-#
-# From oslo.middleware.cors
-#
-
-# Indicate whether this resource may be shared with the domain received in the
-# requests "origin" header. Format: "<protocol>://<host>[:<port>]", no trailing
-# slash. Example: https://horizon.example.com (list value)
-#allowed_origin = <None>
-
-# Indicate that the actual request can include user credentials (boolean value)
-#allow_credentials = true
-
-# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
-# Headers. (list value)
-#expose_headers = X-Auth-Token,X-OpenStack-Request-ID,X-Subject-Token,X-Service-Token
-
-# Maximum cache age of CORS preflight requests. (integer value)
-#max_age = 3600
-
-# Indicate which methods can be used during the actual request. (list value)
-#allow_methods = GET,PUT,POST,DELETE,PATCH
-
-# Indicate which header field names may be used during the actual request.
-# (list value)
-#allow_headers = X-Auth-Token,X-OpenStack-Request-ID,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id
-
-
-[database]
-
-#
-# From oslo.db
-#
-
-# DEPRECATED: The file name to use with SQLite. (string value)
-# Deprecated group/name - [DEFAULT]/sqlite_db
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-# Reason: Should use config option connection or slave_connection to connect
-# the database.
-#sqlite_db = oslo.sqlite
-
-# If True, SQLite uses synchronous mode. (boolean value)
-# Deprecated group/name - [DEFAULT]/sqlite_synchronous
-#sqlite_synchronous = true
-
-# The back end to use for the database. (string value)
-# Deprecated group/name - [DEFAULT]/db_backend
-#backend = sqlalchemy
-
-# The SQLAlchemy connection string to use to connect to the database. (string
-# value)
-# Deprecated group/name - [DEFAULT]/sql_connection
-# Deprecated group/name - [DATABASE]/sql_connection
-# Deprecated group/name - [sql]/connection
-connection = mysql+pymysql://congress:{{ CONGRESS_DBPASS }}@{{ db_host }}/congress
-
-# The SQLAlchemy connection string to use to connect to the slave database.
-# (string value)
-#slave_connection = <None>
-
-# The SQL mode to be used for MySQL sessions. This option, including the
-# default, overrides any server-set SQL mode. To use whatever SQL mode is set
-# by the server configuration, set this to no value. Example: mysql_sql_mode=
-# (string value)
-#mysql_sql_mode = TRADITIONAL
-
-# Timeout before idle SQL connections are reaped. (integer value)
-# Deprecated group/name - [DEFAULT]/sql_idle_timeout
-# Deprecated group/name - [DATABASE]/sql_idle_timeout
-# Deprecated group/name - [sql]/idle_timeout
-#idle_timeout = 3600
-
-# Minimum number of SQL connections to keep open in a pool. (integer value)
-# Deprecated group/name - [DEFAULT]/sql_min_pool_size
-# Deprecated group/name - [DATABASE]/sql_min_pool_size
-#min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool. Setting a value of
-# 0 indicates no limit. (integer value)
-# Deprecated group/name - [DEFAULT]/sql_max_pool_size
-# Deprecated group/name - [DATABASE]/sql_max_pool_size
-#max_pool_size = 5
-
-# Maximum number of database connection retries during startup. Set to -1 to
-# specify an infinite retry count. (integer value)
-# Deprecated group/name - [DEFAULT]/sql_max_retries
-# Deprecated group/name - [DATABASE]/sql_max_retries
-#max_retries = 10
-
-# Interval between retries of opening a SQL connection. (integer value)
-# Deprecated group/name - [DEFAULT]/sql_retry_interval
-# Deprecated group/name - [DATABASE]/reconnect_interval
-#retry_interval = 10
-
-# If set, use this value for max_overflow with SQLAlchemy. (integer value)
-# Deprecated group/name - [DEFAULT]/sql_max_overflow
-# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
-#max_overflow = 50
-
-# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
-# value)
-# Minimum value: 0
-# Maximum value: 100
-# Deprecated group/name - [DEFAULT]/sql_connection_debug
-#connection_debug = 0
-
-# Add Python stack traces to SQL as comment strings. (boolean value)
-# Deprecated group/name - [DEFAULT]/sql_connection_trace
-#connection_trace = false
-
-# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
-# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
-#pool_timeout = <None>
-
-# Enable the experimental use of database reconnect on connection lost.
-# (boolean value)
-#use_db_reconnect = false
-
-# Seconds between retries of a database transaction. (integer value)
-#db_retry_interval = 1
-
-# If True, increases the interval between retries of a database operation up to
-# db_max_retry_interval. (boolean value)
-#db_inc_retry_interval = true
-
-# If db_inc_retry_interval is set, the maximum seconds between retries of a
-# database operation. (integer value)
-#db_max_retry_interval = 10
-
-# Maximum retries in case of connection error or deadlock error before error is
-# raised. Set to -1 to specify an infinite retry count. (integer value)
-#db_max_retries = 20
-
-
-[keystone_authtoken]
-
-#
-# From keystonemiddleware.auth_token
-#
-
-# Complete "public" Identity API endpoint. This endpoint should not be an
-# "admin" endpoint, as it should be accessible by all end users.
-# Unauthenticated clients are redirected to this endpoint to authenticate.
-# Although this endpoint should ideally be unversioned, client support in the
-# wild varies. If you're using a versioned v2 endpoint here, then this should
-# *not* be the same endpoint the service user utilizes for validating tokens,
-# because normal end users may not be able to reach that endpoint. (string
-# value)
-auth_uri = http://{{ internal_vip.ip }}:5000
-auth_url = http://{{ internal_vip.ip }}:35357
-memcached_servers = {{ memcached_servers }}
-project_name = service
-password = {{ CONGRESS_PASS }}
-username = congress
-auth_type = password
-# API version of the admin Identity API endpoint. (string value)
-
-# Do not handle authorization requests within the middleware, but delegate the
-# authorization decision to downstream WSGI components. (boolean value)
-#delay_auth_decision = false
-
-# Request timeout value for communicating with Identity API server. (integer
-# value)
-#http_connect_timeout = <None>
-
-# How many times are we trying to reconnect when communicating with Identity
-# API Server. (integer value)
-#http_request_max_retries = 3
-
-# Request environment key where the Swift cache object is stored. When
-# auth_token middleware is deployed with a Swift cache, use this option to have
-# the middleware share a caching backend with swift. Otherwise, use the
-# ``memcached_servers`` option instead. (string value)
-#cache = <None>
-
-# Required if identity server requires client certificate (string value)
-#certfile = <None>
-
-# Required if identity server requires client certificate (string value)
-#keyfile = <None>
-
-# A PEM encoded Certificate Authority to use when verifying HTTPs connections.
-# Defaults to system CAs. (string value)
-#cafile = <None>
-
-# Verify HTTPS connections. (boolean value)
-#insecure = false
-
-# The region in which the identity server can be found. (string value)
-#region_name = <None>
-
-# Directory used to cache files related to PKI tokens. (string value)
-#signing_dir = <None>
-
-# Optionally specify a list of memcached server(s) to use for caching. If left
-# undefined, tokens will instead be cached in-process. (list value)
-# Deprecated group/name - [keystone_authtoken]/memcache_servers
-#memcached_servers = <None>
-
-# In order to prevent excessive effort spent validating tokens, the middleware
-# caches previously-seen tokens for a configurable duration (in seconds). Set
-# to -1 to disable caching completely. (integer value)
-#token_cache_time = 300
-
-# Determines the frequency at which the list of revoked tokens is retrieved
-# from the Identity service (in seconds). A high number of revocation events
-# combined with a low cache duration may significantly reduce performance. Only
-# valid for PKI tokens. (integer value)
-#revocation_cache_time = 10
-
-# (Optional) If defined, indicate whether token data should be authenticated or
-# authenticated and encrypted. If MAC, token data is authenticated (with HMAC)
-# in the cache. If ENCRYPT, token data is encrypted and authenticated in the
-# cache. If the value is not one of these options or empty, auth_token will
-# raise an exception on initialization. (string value)
-# Allowed values: None, MAC, ENCRYPT
-#memcache_security_strategy = None
-
-# (Optional, mandatory if memcache_security_strategy is defined) This string is
-# used for key derivation. (string value)
-#memcache_secret_key = <None>
-
-# (Optional) Number of seconds memcached server is considered dead before it is
-# tried again. (integer value)
-#memcache_pool_dead_retry = 300
-
-# (Optional) Maximum total number of open connections to every memcached
-# server. (integer value)
-#memcache_pool_maxsize = 10
-
-# (Optional) Socket timeout in seconds for communicating with a memcached
-# server. (integer value)
-#memcache_pool_socket_timeout = 3
-
-# (Optional) Number of seconds a connection to memcached is held unused in the
-# pool before it is closed. (integer value)
-#memcache_pool_unused_timeout = 60
-
-# (Optional) Number of seconds that an operation will wait to get a memcached
-# client connection from the pool. (integer value)
-#memcache_pool_conn_get_timeout = 10
-
-# (Optional) Use the advanced (eventlet safe) memcached client pool. The
-# advanced pool will only work under python 2.x. (boolean value)
-#memcache_use_advanced_pool = false
-
-# (Optional) Indicate whether to set the X-Service-Catalog header. If False,
-# middleware will not ask for service catalog on token validation and will not
-# set the X-Service-Catalog header. (boolean value)
-#include_service_catalog = true
-
-# Used to control the use and type of token binding. Can be set to: "disabled"
-# to not check token binding. "permissive" (default) to validate binding
-# information if the bind type is of a form known to the server and ignore it
-# if not. "strict" like "permissive" but if the bind type is unknown the token
-# will be rejected. "required" any form of token binding is needed to be
-# allowed. Finally the name of a binding method that must be present in tokens.
-# (string value)
-#enforce_token_bind = permissive
-
-# If true, the revocation list will be checked for cached tokens. This requires
-# that PKI tokens are configured on the identity server. (boolean value)
-#check_revocations_for_cached = false
-
-# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm
-# or multiple. The algorithms are those supported by Python standard
-# hashlib.new(). The hashes will be tried in the order given, so put the
-# preferred one first for performance. The result of the first hash will be
-# stored in the cache. This will typically be set to multiple values only while
-# migrating from a less secure algorithm to a more secure one. Once all the old
-# tokens are expired this option should be set to a single value for better
-# performance. (list value)
-#hash_algorithms = md5
-
-# Authentication type to load (string value)
-# Deprecated group/name - [keystone_authtoken]/auth_plugin
-#auth_type = <None>
-
-# Config Section from which to load plugin specific options (string value)
-#auth_section = <None>
-
-
-[oslo_policy]
-
-#
-# From oslo.policy
-#
-
-# The JSON file that defines policies. (string value)
-# Deprecated group/name - [DEFAULT]/policy_file
-#policy_file = policy.json
-
-# Default rule. Enforced when a requested rule is not found. (string value)
-# Deprecated group/name - [DEFAULT]/policy_default_rule
-#policy_default_rule = default
-
-# Directories where policy configuration files are stored. They can be relative
-# to any directory in the search path defined by the config_dir option, or
-# absolute paths. The file defined by policy_file must exist for these
-# directories to be searched. Missing or empty directories are ignored. (multi
-# valued)
-# Deprecated group/name - [DEFAULT]/policy_dirs
-#policy_dirs = policy.d
-
-[oslo_messaging_rabbit]
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-rabbit_hosts = {{ rabbitmq_servers }}
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/templates/policy.json b/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/templates/policy.json
deleted file mode 100644
index 4476051d..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/templates/policy.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "context_is_admin": "role:admin",
- "admin_only": "rule:context_is_admin",
- "regular_user": "",
- "default": "rule:admin_only"
-}
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/vars/Debian.yml
deleted file mode 100644
index 1cc4645e..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/vars/Debian.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - congress
- - python-congressclient
- - python-cloudfoundryclient
-
-service:
- - congress
-
-credentials:
- - user: congress
- db: congress
- password: "{{ CONGRESS_DBPASS }}"
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/vars/main.yml
deleted file mode 100644
index f6fef749..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/vars/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch: []
-
-services_noarch: []
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/tasks/main.yml
deleted file mode 100644
index 9be6fd6c..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/tasks/main.yml
+++ /dev/null
@@ -1,106 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install dashboard packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: remove ubuntu theme
- action: "{{ ansible_pkg_mgr }} name=openstack-dashboard-ubuntu-theme state=absent"
- when: ansible_os_family == 'Debian' and not enable_ubuntu_theme
- notify:
- - restart dashboard services
-
-- name: remove default apache2 config
- file:
- path: '{{ item }}'
- state: absent
- when: ansible_os_family == 'Debian'
- with_items:
- - '{{ apache_config_dir }}/conf-available/openstack-dashboard.conf'
- - '{{ apache_config_dir }}/conf-enabled/openstack-dashboard.conf'
- - '{{ apache_config_dir }}/sites-available/000-default.conf'
- - '{{ apache_config_dir }}/sites-enabled/000-default.conf'
- notify:
- - restart dashboard services
-
-- name: update apache2 configs
- template:
- src: openstack-dashboard.conf.j2
- dest: '{{ apache_config_dir }}/sites-available/openstack-dashboard.conf'
- when: ansible_os_family == 'Debian'
- notify:
- - restart dashboard services
-
-- name: update apache2 configs redhat
- template:
- src: openstack-dashboard-redhat.conf.j2
- dest: '{{ apache_config_dir }}/conf.d/openstack-dashboard.conf'
- when: ansible_os_family == 'RedHat'
- notify:
- - restart dashboard services
-
-- name: enable dashboard
- file:
- src: "/etc/apache2/sites-available/openstack-dashboard.conf"
- dest: "/etc/apache2/sites-enabled/openstack-dashboard.conf"
- state: "link"
- when: ansible_os_family == 'Debian'
- notify:
- - restart dashboard services
-
-- name: update ubuntu horizon settings
- template:
- src: local_settings.py.j2
- dest: "/etc/openstack-dashboard/local_settings.py"
- when: ansible_os_family == 'Debian'
- notify:
- - restart dashboard services
-
-- name: precompile horizon css
- shell: /usr/bin/python /usr/share/openstack-dashboard/manage.py compress --force
- ignore_errors: True
- when: ansible_os_family == 'Debian'
- notify:
- - restart dashboard services
-
-- name: update redhat version horizon settings
- lineinfile:
- dest: /etc/openstack-dashboard/local_settings
- regexp: '{{ item.regexp }}'
- line: '{{ item.line }}'
- with_items:
- - regexp: '^WEBROOT[ \t]*=.*'
- line: 'WEBROOT = "/horizon"'
- - regexp: '^COMPRESS_OFFLINE[ \t]*=.*'
- line: 'COMPRESS_OFFLINE=False'
- - regexp: '^ALLOWED_HOSTS[ \t]*=.*'
- line: 'ALLOWED_HOSTS = ["*"]'
- - regexp: '^OPENSTACK_HOST[ \t]*=.*'
- line: 'OPENSTACK_HOST = "{{ internal_ip }}"'
- when: ansible_os_family == 'RedHat'
- notify:
- - restart dashboard services
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/templates/openstack-dashboard.conf.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/templates/openstack-dashboard.conf.j2
deleted file mode 100755
index 664af687..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/templates/openstack-dashboard.conf.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
-{% if work_threads > 10 %}
-{% set work_threads = 10 %}
-{% endif %}
-
-<VirtualHost {{ internal_ip }}:80>
- WSGIScriptAlias /horizon {{ horizon_dir }}/wsgi/django.wsgi
- WSGIDaemonProcess horizon user=horizon group=horizon processes=4 threads={{ work_threads }}
- WSGIProcessGroup horizon
- Alias /static {{ horizon_dir }}/static/
- Alias /horizon/static {{ horizon_dir }}/static/
- <Directory {{ horizon_dir }}/wsgi>
- Order allow,deny
- Allow from all
- </Directory>
-</VirtualHost>
-
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/vars/Debian.yml
deleted file mode 100644
index aaeb8cdb..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/vars/Debian.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages: []
-
-services:
- - memcached
- - apache2
-
-apache_config_dir: /etc/apache2
-horizon_dir: /usr/share/openstack-dashboard/openstack_dashboard
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/database/tasks/mariadb_cluster_debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/database/tasks/mariadb_cluster_debian.yml
deleted file mode 100644
index 442cd18b..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/database/tasks/mariadb_cluster_debian.yml
+++ /dev/null
@@ -1,69 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: get cluster status
- shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}'
- register: cluster_status
- when:
- - inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: start first node to create new cluster
- shell: >
- service mysql bootstrap;
- service mysql start;
- when: |
- inventory_hostname == haproxy_hosts.keys()[0]
- and not cluster_status.stdout | search("OPERATIONAL")
-
-- name: wait for cluster ready
- shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}'
- register: cluster_status
- until: cluster_status|success
- failed_when: not cluster_status.stdout | search("OPERATIONAL")
- retries: 10
- delay: 3
- when: |
- inventory_hostname == haproxy_hosts.keys()[0]
- and not cluster_status.stdout | search("OPERATIONAL")
-
-- name: if I in the cluster nodes
- shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_incoming_addresses"'|awk '{print $2}'
- register: cluster_nodes
- changed_when: false
-
-- name: restart other nodes and join cluster1
- shell: service mysql restart;
- when: |
- inventory_hostname != haproxy_hosts.keys()[0]
- and not cluster_nodes.stdout | search( "{{ internal_ip }}" )
- ignore_errors: True
-
-- name: delay 60 seconds
- shell: sleep 60
-
-- name: restart other nodes and join cluster2
- shell: service mysql restart;
- when: |
- inventory_hostname != haproxy_hosts.keys()[0]
- and not cluster_nodes.stdout | search( "{{ internal_ip }}" )
-
-- name: chmod directory
- shell: >
- chmod 755 -R /var/lib/mysql/ ;
- chmod 755 -R /var/log/mysql/ ;
- chmod 755 -R /etc/mysql/conf.d/;
-
-- name: restart first nodes
- shell: service mysql restart
- when: |
- (inventory_hostname == haproxy_hosts.keys()[0]
- and haproxy_hosts|length > 1
- and not cluster_nodes.stdout | search( '{{ internal_ip }}' ))
-
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/database/tasks/mariadb_install.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/database/tasks/mariadb_install.yml
deleted file mode 100644
index 1b08172d..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/database/tasks/mariadb_install.yml
+++ /dev/null
@@ -1,70 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: change open file limit
- copy:
- content: "* - nofile 65536 }}"
- dest: "/etc/security/limits.conf"
- mode: 0755
-
-- name: install python-mysqldb
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: maridb_packages | union(packages_noarch)
-
-- name: create conf dir for wsrep
- file: path=/etc/my.cnf.d state=directory mode=0755
- when: ansible_os_family == "RedHat"
-
-- name: update mariadb config file
- template:
- src: '{{ item.src }}'
- dest: '{{ item.dest }}'
- backup: yes
- mode: 0644
- with_items: mysql_config
-
-- name: bugfix for rsync version 3.1
- lineinfile:
- dest: /usr/bin/wsrep_sst_rsync
- state: absent
- regexp: '{{ item }}'
- with_items:
- - "\\s*uid = \\$MYUID$"
- - "\\s*gid = \\$MYGID$"
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: set owner
- file: path=/var/lib/mysql owner=mysql group=mysql recurse=yes state=directory mode=0755
-
-- name: get logfile stat
- stat: path='{{ mysql_data_dir }}/ib_logfile0'
- register: logfile_stat
-
-- debug: msg='{{ logfile_stat.stat.exists}}'
-- debug: msg='{{ logfile_stat.stat.size }}'
- when: logfile_stat.stat.exists
-
-- name: rm logfile if exist and size mismatch
- shell: 'rm -rf {{ mysql_data_dir }}/ib_logfile*'
- when: |
- logfile_stat.stat.exists
- and logfile_stat.stat.size != 1073741824
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/database/templates/data.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/database/templates/data.j2
deleted file mode 100644
index 66c2fead..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/database/templates/data.j2
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/sh
-mysql -uroot -Dmysql <<EOF
-drop database if exists keystone;
-drop database if exists glance;
-drop database if exists neutron;
-drop database if exists nova;
-drop database if exists cinder;
-drop database if exists heat;
-drop database if exists aodh;
-
-CREATE DATABASE keystone;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON keystone.* TO 'keystone'@'{{ host }}' IDENTIFIED BY '{{ KEYSTONE_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE glance;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON glance.* TO 'glance'@'{{ host }}' IDENTIFIED BY '{{ GLANCE_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE neutron;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON neutron.* TO 'neutron'@'{{ host }}' IDENTIFIED BY '{{ NEUTRON_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE nova;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON nova.* TO 'nova'@'{{ host }}' IDENTIFIED BY '{{ NOVA_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE cinder;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON cinder.* TO 'cinder'@'{{ host }}' IDENTIFIED BY '{{ CINDER_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE heat;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON heat.* TO 'heat'@'{{ host }}' IDENTIFIED BY '{{ HEAT_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE aodh;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON aodh.* TO 'aodh'@'{{ host }}' IDENTIFIED BY '{{ AODH_DBPASS }}';
-{% endfor %}
-
-{% if WSREP_SST_USER is defined %}
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON *.* TO '{{ WSREP_SST_USER }}'@'{{ host }}' IDENTIFIED BY '{{ WSREP_SST_PASS }}';
-{% endfor %}
-{% endif %}
-EOF
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/database/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/database/vars/Debian.yml
deleted file mode 100644
index 1021524d..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/database/vars/Debian.yml
+++ /dev/null
@@ -1,55 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-mongodb_packages:
- - mongodb-server
- - mongodb-clients
- - python-pymongo
-
-mysql_packages:
- - python-mysqldb
- - mysql-server
-
-maridb_packages:
- - apt-transport-https
- - debconf-utils
- - libaio1
- - libc6
- - libdbd-mysql-perl
- - libgcc1
- - libgcrypt20
- - libstdc++6
- - python-software-properties
- - mariadb-client
- - galera-3
- - rsync
- - socat
- - mariadb-galera-server-10.0
- - python-mysqldb
-
-pip_packages: []
-
-services: []
-
-mongodb_service: mongodb
-mysql_config:
- - dest: /etc/mysql/my.cnf
- src: my.cnf
- - dest: /etc/mysql/conf.d/wsrep.cnf
- src: wsrep.cnf
-
-mysql_config_dir: /etc/mysql/conf.d
-mysql_data_dir: /var/lib/mysql
-
-mongodb_config:
- dest: /etc/mongodb.conf
- src: mongodb.conf
- journal: /var/lib/mongodb/journal/*
-
-wsrep_provider_file: "/usr/lib/galera/libgalera_smm.so"
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/database/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/database/vars/main.yml
deleted file mode 100644
index a32897f0..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/database/vars/main.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch: []
-
-services_noarch:
- - mysql
-
-credentials:
- - user: keystone
- db: keystone
- password: "{{ KEYSTONE_DBPASS }}"
- - user: neutron
- db: neutron
- password: "{{ NEUTRON_DBPASS }}"
- - user: glance
- db: glance
- password: "{{ GLANCE_DBPASS }}"
- - user: nova
- db: nova_api
- password: "{{ NOVA_DBPASS }}"
- - user: nova
- db: nova
- password: "{{ NOVA_DBPASS }}"
- - user: cinder
- db: cinder
- password: "{{ CINDER_DBPASS }}"
- - user: heat
- db: heat
- password: "{{ HEAT_DBPASS }}"
- - user: aodh
- db: aodh
- password: "{{ AODH_DBPASS }}"
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/handlers/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/handlers/main.yml
deleted file mode 100644
index 36e39072..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/handlers/main.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart neutron-plugin-openvswitch-agent
- service: name=neutron-openvswitch-agent state=restarted enabled=yes
- when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: restart neutron-l3-agent
- service: name=neutron-l3-agent state=restarted enabled=yes
-
-- name: kill dnsmasq
- command: killall dnsmasq
- ignore_errors: True
-
-- name: restart neutron-dhcp-agent
- service: name=neutron-dhcp-agent state=restarted enabled=yes
-
-- name: restart neutron-metadata-agent
- service: name=neutron-metadata-agent state=restarted enabled=yes
-
-- name: restart xorp
- service: name=xorp state=restarted enabled=yes sleep=10
- ignore_errors: True
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/tasks/main.yml
deleted file mode 100644
index f68105f1..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/tasks/main.yml
+++ /dev/null
@@ -1,44 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-# FIXME: temporary workaround for openstack api access random failure
-- name: restart api server
- service: name={{ item }} state=restarted enabled=yes
- with_items: api_services | union(api_services_noarch)
- ignore_errors: True
-
-- name: restart neutron server
- service: name=neutron-server state=restarted enabled=yes
-
-- name: wait for neutron ready
- wait_for: port=9696 delay=10 timeout=60 host={{ internal_ip }}
-
-- name: create external net
- shell:
- . /opt/admin-openrc.sh;
- neutron net-create \
- {{ public_net_info.network }} \
- --provider:network_type {{ public_net_info.type }} \
- --provider:physical_network {{ public_net_info.provider_network }} \
- --router:external True
- when: public_net_info.enable == True and inventory_hostname == groups['controller'][0]
-
-- name: create external subnet
- shell:
- . /opt/admin-openrc.sh;
- neutron subnet-create \
- --name {{ public_net_info.subnet }} \
- --gateway {{ public_net_info.external_gw }} \
- --allocation-pool \
- start={{ public_net_info.floating_ip_start }},end={{ public_net_info.floating_ip_end }} \
- {{ public_net_info.network }} {{ public_net_info.floating_ip_cidr }}
- when: public_net_info.enable == True and inventory_hostname == groups['controller'][0]
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/Debian.yml
deleted file mode 100644
index 0b5c78b6..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/Debian.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-api_services:
- - nova-api
- - glance-api
- - ceilometer-api
- - heat-api
- - heat-api-cfn
- - aodh-api
- - cinder-api
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/RedHat.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/RedHat.yml
deleted file mode 100644
index 886401fd..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/RedHat.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-api_services:
- - openstack-nova-api
- - openstack-glance-api
- - openstack-ceilometer-api
- - openstack-heat-api
- - openstack-heat-api-cfn
- - openstack-cinder-api
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/main.yml
deleted file mode 100644
index b19b6ebf..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/main.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-api_services_noarch: []
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/tasks/nfs.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/tasks/nfs.yml
deleted file mode 100644
index 39a49dc1..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/tasks/nfs.yml
+++ /dev/null
@@ -1,68 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: install nfs packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: nfs_packages
-
-- name: install nfs
- local_action: yum name={{ item }} state=present
- with_items:
- - rpcbind
- - nfs-utils
- run_once: True
-
-- name: create image directory
- local_action: file path=/opt/images state=directory mode=0777
- run_once: True
-
-- name: remove nfs config item if exist
- local_action: lineinfile dest=/etc/exports state=absent
- regexp="^/opt/images"
- run_once: True
-
-- name: update nfs config
- local_action: lineinfile dest=/etc/exports state=present
- line="/opt/images *(rw,insecure,sync,all_squash)"
- run_once: True
-
-- name: restart compass nfs service
- local_action: service name={{ item }} state=restarted enabled=yes
- with_items:
- - rpcbind
- - nfs-server
- run_once: True
-
-- name: get mount info
- command: mount
- register: mount_info
- tags:
- - recovery
-
-- name: get nfs server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: ip_info
- tags:
- - recovery
-
-- name: restart host nfs service
- service: name={{ item }} state=restarted enabled=yes
- with_items: '{{ nfs_services }}'
-
-- name: mount image directory
- shell: |
- mkdir -p /var/lib/glance/images
- mount -t nfs -onfsvers=3 {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images
- sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
- echo {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab
- when: mount_info.stdout.find('images') == -1
- retries: 5
- delay: 3
- tags:
- - recovery
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-api.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-api.conf
deleted file mode 100644
index 241f04ce..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-api.conf
+++ /dev/null
@@ -1,93 +0,0 @@
-{% set workers = ansible_processor_vcpus // 2 %}
-{% set workers = workers if workers else 1 %}
-{% set memcached_servers = [] %}
-{% set rabbitmq_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% set _ = rabbitmq_servers.append('%s:5672'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-{% set rabbitmq_servers = rabbitmq_servers|join(',') %}
-
-[DEFAULT]
-verbose = {{ VERBOSE }}
-debug = {{ DEBUG }}
-log_file = /var/log/glance/api.log
-bind_host = {{ image_host }}
-bind_port = 9292
-backlog = 4096
-workers = {{ workers }}
-registry_host = {{ internal_ip }}
-registry_port = 9191
-registry_client_protocol = http
-cinder_catalog_info = volume:cinder:internalURL
-
-enable_v1_api = True
-enable_v1_registry = True
-enable_v2_api = True
-enable_v2_registry = True
-
-notification_driver = messagingv2
-rpc_backend = rabbit
-
-delayed_delete = False
-scrubber_datadir = /var/lib/glance/scrubber
-scrub_time = 43200
-image_cache_dir = /var/lib/glance/image-cache/
-show_image_direct_url = True
-
-[database]
-backend = sqlalchemy
-connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance?charset=utf8
-idle_timeout = 30
-sqlite_db = /var/lib/glance/glance.sqlite
-
-[task]
-task_executor = taskflow
-
-[glance_store]
-default_store = file
-stores = file,http,cinder,rbd
-filesystem_store_datadir = /var/lib/glance/images/
-
-[image_format]
-disk_formats = ami,ari,aki,vhd,vhdx,vmdk,raw,qcow2,vdi,iso,root-tar
-
-[profiler]
-enabled = True
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000
-auth_url = http://{{ internal_vip.ip }}:35357
-memcached_servers = {{ memcached_servers }}
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-project_name = service
-username = glance
-password = {{ GLANCE_PASS }}
-token_cache_time = 300
-revocation_cache_time = 60
-
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = glance
-admin_password = {{ GLANCE_PASS }}
-
-[paste_deploy]
-flavor= keystone
-
-[oslo_messaging_amqp]
-idle_timeout = 7200
-
-[oslo_messaging_rabbit]
-rabbit_hosts = {{ rabbitmq_servers }}
-rabbit_use_ssl = false
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-rabbit_virtual_host = /
-default_notification_exchange = glance
-
-rabbit_notification_exchange = glance
-rabbit_notification_topic = notifications
-rabbit_durable_queues = False
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-registry.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-registry.conf
deleted file mode 100644
index ccd8f1bb..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-registry.conf
+++ /dev/null
@@ -1,64 +0,0 @@
-{% set workers = ansible_processor_vcpus // 2 %}
-{% set workers = workers if workers else 1 %}
-{% set memcached_servers = [] %}
-{% set rabbitmq_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% set _ = rabbitmq_servers.append('%s:5672'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-{% set rabbitmq_servers = rabbitmq_servers|join(',') %}
-
-[DEFAULT]
-verbose = {{ VERBOSE }}
-debug = {{ DEBUG }}
-log_file = /var/log/glance/api.log
-bind_host = {{ image_host }}
-bind_port = 9191
-backlog = 4096
-workers = {{ workers }}
-
-notification_driver = messagingv2
-rpc_backend = rabbit
-
-[database]
-backend = sqlalchemy
-connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance?charset=utf8
-idle_timeout = 30
-
-[profiler]
-enabled = True
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000
-auth_url = http://{{ internal_vip.ip }}:35357
-memcached_servers = {{ memcached_servers }}
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-project_name = service
-username = glance
-password = {{ GLANCE_PASS }}
-
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = glance
-admin_password = {{ GLANCE_PASS }}
-token_cache_time = 300
-revocation_cache_time = 60
-
-[paste_deploy]
-flavor= keystone
-
-[oslo_messaging_amqp]
-idle_timeout = 7200
-
-[oslo_messaging_rabbit]
-rabbit_hosts = {{ rabbitmq_servers }}
-rabbit_use_ssl = false
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-rabbit_virtual_host = /
-rabbit_notification_exchange = glance
-rabbit_notification_topic = notifications
-rabbit_durable_queues = False
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/vars/Debian.yml
deleted file mode 100644
index d1825012..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/vars/Debian.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - glance
- - nfs-common
-
-nfs_packages:
- - nfs-common
-
-nfs_services: []
-
-services:
- - glance-registry
- - glance-api
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/vars/RedHat.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/vars/RedHat.yml
deleted file mode 100644
index 2987d0c4..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/vars/RedHat.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - openstack-glance
- - rpcbind
-
-nfs_packages:
- - nfs-utils
- - rpcbind
-
-nfs_services:
- - rpcbind
-
-services:
- - openstack-glance-api
- - openstack-glance-registry
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ha/templates/haproxy.cfg b/deploy/adapters/ansible/openstack_newton_xenial/roles/ha/templates/haproxy.cfg
deleted file mode 100644
index 5fbcc9d9..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ha/templates/haproxy.cfg
+++ /dev/null
@@ -1,227 +0,0 @@
-
-global
- #chroot /var/run/haproxy
- daemon
- user haproxy
- group haproxy
- maxconn 4000
- pidfile /var/run/haproxy/haproxy.pid
- #log 127.0.0.1 local0
- tune.bufsize 1000000
- stats socket /var/run/haproxy.sock
- stats timeout 2m
-
-defaults
- log global
- maxconn 8000
- option redispatch
- option dontlognull
- option splice-auto
- timeout http-request 10s
- timeout queue 1m
- timeout connect 10s
- timeout client 50s
- timeout server 50s
- timeout check 10s
- retries 3
-
-listen proxy-mysql
- bind {{ internal_vip.ip }}:3306
- option tcpka
- option tcplog
- balance source
-{% for host, ip in haproxy_hosts.items() %}
-{% if loop.index == 1 %}
- server {{ host }} {{ ip }}:3306 weight 1 check inter 2000 rise 2 fall 5
-{% else %}
- server {{ host }} {{ ip }}:3306 weight 1 check inter 2000 rise 2 fall 5 backup
-{% endif %}
-{% endfor %}
-
-listen proxy-rabbit
- bind {{ internal_vip.ip }}:5672
- bind {{ public_vip.ip }}:5672
-
- option tcpka
- option tcplog
- timeout client 3h
- timeout server 3h
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:5672 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-glance_registry_cluster
- bind {{ internal_vip.ip }}:9191
- bind {{ public_vip.ip }}:9191
- option tcpka
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:9191 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-glance_api_cluster
- bind {{ internal_vip.ip }}:9292
- bind {{ public_vip.ip }}:9292
- option tcpka
- option tcplog
- option httpchk
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:9292 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-nova-novncproxy
- bind {{ internal_vip.ip }}:6080
- bind {{ public_vip.ip }}:6080
- option tcpka
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:6080 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-network
- bind {{ internal_vip.ip }}:9696
- bind {{ public_vip.ip }}:9696
- option tcpka
- option tcplog
- balance source
- option httpchk
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:9696 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-volume
- bind {{ internal_vip.ip }}:8776
- bind {{ public_vip.ip }}:8776
- option tcpka
- option httpchk
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-keystone_admin_cluster
- bind {{ internal_vip.ip }}:35357
- bind {{ public_vip.ip }}:35357
- option tcpka
- option httpchk
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:35357 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-keystone_public_internal_cluster
- bind {{ internal_vip.ip }}:5000
- bind {{ public_vip.ip }}:5000
- option tcpka
- option httpchk
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:5000 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-nova_compute_api_cluster
- bind {{ internal_vip.ip }}:8774
- bind {{ public_vip.ip }}:8774
- mode tcp
- option httpchk
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8774 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-nova_metadata_api_cluster
- bind {{ internal_vip.ip }}:8775
- bind {{ public_vip.ip }}:8775
- option tcpka
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8775 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-cinder_api_cluster
- bind {{ internal_vip.ip }}:8776
- bind {{ public_vip.ip }}:8776
- mode tcp
- option httpchk
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-#listen proxy-swift-proxy
-# bind {{ internal_vip.ip }}:8080
-# bind {{ public_vip.ip }}:8080
-# balance source
-# option tcpka
-# option tcplog
-#{% for host,ip in haproxy_hosts.items() %}
-# server {{ host }} {{ ip }}:8080 weight 1 check inter 2000 rise 2 fall 5
-#{% endfor %}
-
-listen proxy-ceilometer_api_cluster
- bind {{ internal_vip.ip }}:8777
- bind {{ public_vip.ip }}:8777
- mode tcp
- option tcp-check
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8777 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-aodh_api_cluster
- bind {{ internal_vip.ip }}:8042
- bind {{ public_vip.ip }}:8042
- mode tcp
- option tcp-check
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:8042 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-congress_api_cluster
- bind {{ internal_vip.ip }}:1789
- bind {{ public_vip.ip }}:1789
- mode tcp
- option tcp-check
- option tcplog
- balance source
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:1789 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-dashboarad
- bind {{ public_vip.ip }}:80
- mode http
- balance source
- capture cookie vgnvisitor= len 32
- cookie SERVERID insert indirect nocache
- option forwardfor
- option httpchk
- option httpclose
- rspidel ^Set-cookie:\ IP=
-{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:80 cookie {{ host }} weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen stats
- mode http
- bind 0.0.0.0:9999
- stats enable
- stats refresh 30s
- stats uri /
- stats realm Global\ statistics
- stats auth admin:admin
-
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/tasks/heat_install.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/tasks/heat_install.yml
deleted file mode 100644
index 6a0f1c73..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/tasks/heat_install.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: install heat related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: generate heat service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-# '
-
-- name: create heat user domain
- shell: >
- . /opt/admin-openrc.sh;
- openstack domain create --description "Stack projects and users" heat;
- openstack user create --domain heat --password {{ HEAT_PASS }} heat_domain_admin;
- openstack role add --domain heat --user-domain heat --user heat_domain_admin admin;
- openstack role create heat_stack_owner;
- openstack role add --project demo --user demo heat_stack_owner;
- when: inventory_hostname == groups['controller'][0]
-
-- name: update heat conf
- template: src=heat.j2
- dest=/etc/heat/heat.conf
- backup=yes
- notify:
- - restart heat service
- - remove heat-sqlite-db
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/templates/heat.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/templates/heat.j2
deleted file mode 100644
index 72d4b61e..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/templates/heat.j2
+++ /dev/null
@@ -1,54 +0,0 @@
-{% set memcached_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-
-[DEFAULT]
-heat_metadata_server_url = http://{{ internal_vip.ip }}:8000
-heat_waitcondition_server_url = http://{{ internal_vip.ip }}:8000/v1/waitcondition
-rpc_backend = rabbit
-log_dir = /var/log/heat
-stack_domain_admin = heat_domain_admin
-stack_domain_admin_password = {{ HEAT_PASS }}
-stack_user_domain_name = heat
-
-[database]
-connection = mysql://heat:{{ HEAT_DBPASS }}@{{ db_host }}/heat
-idle_timeout = 30
-use_db_reconnect = True
-pool_timeout = 10
-
-[ec2authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000
-
-[clients_keystone]
-auth_uri = http://{{ internal_vip.ip }}:35357
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000
-auth_url = http://{{ internal_vip.ip }}:35357
-memcached_servers = {{ memcached_servers }}
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-project_name = service
-username = heat
-password = {{ HEAT_PASS }}
-
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = heat
-admin_password = {{ HEAT_PASS }}
-
-[oslo_messaging_rabbit]
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-[trustee]
-auth_type = password
-auth_url = http://{{ internal_vip.ip }}:35357
-username = heat
-password = {{ HEAT_PASS }}
-user_domain_name = default
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_config.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_config.yml
deleted file mode 100644
index ea211470..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_config.yml
+++ /dev/null
@@ -1,96 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: keystone-manage db-sync
- shell: su -s /bin/sh -c 'keystone-manage db_sync' keystone
-
-- name: Check if fernet keys already exist
- stat:
- path: "/etc/keystone/fernet-keys/0"
- register: fernet_keys_0
-
-- name: Create fernet keys for Keystone
- command:
- keystone-manage fernet_setup
- --keystone-user keystone
- --keystone-group keystone
- when: not fernet_keys_0.stat.exists
- notify:
- - restart keystone services
-
-- name: Rotate fernet keys for Keystone
- command:
- keystone-manage fernet_rotate
- --keystone-user keystone
- --keystone-group keystone
- when: fernet_keys_0.stat.exists
- notify:
- - restart keystone services
-
-- name: Distribute the fernet key repository
- shell: rsync -e 'ssh -o StrictHostKeyChecking=no' \
- -avz \
- --delete \
- /etc/keystone/fernet-keys \
- root@{{ hostvars[ item ].ansible_eth0.ipv4.address }}:/etc/keystone/
- with_items: groups['controller'][1:]
- notify:
- - restart keystone services
-
-- name: Check if credential keys already exist
- stat:
- path: "/etc/keystone/credential-keys/0"
- register: credential_keys_0
-
-- name: Create credential keys for Keystone
- command:
- keystone-manage credential_setup
- --keystone-user keystone
- --keystone-group keystone
- when: not credential_keys_0.stat.exists
- notify:
- - restart keystone services
-
-- name: Rotate credential keys for Keystone
- command:
- keystone-manage credential_rotate
- --keystone-user keystone
- --keystone-group keystone
- when: credential_keys_0.stat.exists
- notify:
- - restart keystone services
-
-- name: Distribute the credential key repository
- shell: rsync -e 'ssh -o StrictHostKeyChecking=no' \
- -avz \
- --delete \
- /etc/keystone/credential-keys \
- root@{{ hostvars[ item ].ansible_eth0.ipv4.address }}:/etc/keystone/
- with_items: groups['controller'][1:]
- notify:
- - restart keystone services
-
-- name: Bootstrap the Identity service
- shell:
- keystone-manage bootstrap \
- --bootstrap-password {{ ADMIN_PASS }} \
- --bootstrap-admin-url http://{{ internal_ip }}:35357/v3/ \
- --bootstrap-internal-url http://{{ internal_ip }}:35357/v3/ \
- --bootstrap-public-url http://{{ internal_ip }}:5000/v3/
- --bootstrap-region-id RegionOne \
- notify:
- - restart keystone services
-
-- meta: flush_handlers
-
-- name: wait for keystone ready
- wait_for: port=35357 delay=15 timeout=60 host={{ internal_ip }}
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_install.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_install.yml
deleted file mode 100644
index 757349c5..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_install.yml
+++ /dev/null
@@ -1,98 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install keystone packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: disable boot auto start
- file:
- path={{ item }}
- state=absent
- with_items:
- - /etc/init.d/keystone
- - /etc/init/keystone.conf
- - /lib/systemd/system/keystone.service
- when: ansible_os_family == "Debian"
-
-- name: generate keystone service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: delete sqlite database
- file:
- path: /var/lib/keystone/keystone.db
- state: absent
-
-- name: update keystone conf
- template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
- notify:
- - restart keystone services
-
-- name: assure listen port exist
- lineinfile:
- dest: '{{ apache_config_dir }}/ports.conf'
- regexp: '{{ item.regexp }}'
- line: '{{ item.line}}'
- with_items:
- - regexp: "^Listen {{ internal_ip }}:5000"
- line: "Listen {{ internal_ip }}:5000"
- - regexp: "^Listen {{ internal_ip }}:35357"
- line: "Listen {{ internal_ip }}:35357"
- notify:
- - restart keystone services
-
-- name: update apache2 configs
- template:
- src: wsgi-keystone.conf.j2
- dest: '{{ apache_config_dir }}/sites-available/keystone.conf'
- when: ansible_os_family == 'Debian'
- notify:
- - restart keystone services
-
-- name: update apache2 configs
- template:
- src: wsgi-keystone.conf.j2
- dest: '{{ apache_config_dir }}/keystone.conf'
- when: ansible_os_family == 'RedHat'
- notify:
- - restart keystone services
-
-- name: enable keystone server
- file:
- src: "{{ apache_config_dir }}/sites-available/keystone.conf"
- dest: "{{ apache_config_dir }}/sites-enabled/keystone.conf"
- state: "link"
- when: ansible_os_family == 'Debian'
- notify:
- - restart keystone services
-
-- name: keystone source files
- template: src={{ item }} dest=/opt/{{ item }}
- with_items:
- - admin-openrc.sh
- - admin-openrc-v2.sh
- - demo-openrc.sh
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/main.yml
deleted file mode 100644
index ad619d40..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/main.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include: keystone_install.yml
- tags:
- - install
- - keystone_install
- - keystone
-
-- include: keystone_config.yml
- when: inventory_hostname == groups['controller'][0]
- tags:
- - config
- - keystone_config
- - keystone
-
-- include: keystone_create.yml
- when: inventory_hostname == groups['controller'][0]
- tags:
- - config
- - keystone_create
- - keystone
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/admin-openrc.sh b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/admin-openrc.sh
deleted file mode 100644
index 94d5850f..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/admin-openrc.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-# Verify the Identity Service installation
-export OS_PROJECT_DOMAIN_NAME=default
-export OS_USER_DOMAIN_NAME=default
-export OS_TENANT_NAME=admin
-export OS_PROJECT_NAME=admin
-export OS_USERNAME=admin
-export OS_PASSWORD={{ ADMIN_PASS }}
-export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v3
-export OS_IDENTITY_API_VERSION=3
-export OS_IMAGE_API_VERSION=2
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/demo-openrc.sh b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/demo-openrc.sh
deleted file mode 100644
index 920f42ed..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/demo-openrc.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-export OS_PROJECT_DOMAIN_NAME=default
-export OS_USER_DOMAIN_NAME=default
-export OS_TENANT_NAME=demo
-export OS_PROJECT_NAME=demo
-export OS_USERNAME=demo
-export OS_PASSWORD={{ DEMO_PASS }}
-export OS_AUTH_URL=http://{{ internal_vip.ip }}:5000/v3
-export OS_IDENTITY_API_VERSION=3
-export OS_IMAGE_API_VERSION=2
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/keystone.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/keystone.conf
deleted file mode 100644
index 919be344..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/keystone.conf
+++ /dev/null
@@ -1,60 +0,0 @@
-{% set memcached_servers = [] %}
-{% set rabbitmq_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% set _ = rabbitmq_servers.append('%s:5672'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-{% set rabbitmq_servers = rabbitmq_servers|join(',') %}
-[DEFAULT]
-debug={{ DEBUG }}
-log_dir = /var/log/keystone
-
-[cache]
-backend = keystone.cache.memcache_pool
-memcache_servers = {{ memcached_servers}}
-enabled=true
-
-[revoke]
-driver = sql
-expiration_buffer = 3600
-caching = true
-
-[database]
-connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone?charset=utf8
-idle_timeout = 30
-min_pool_size = 5
-max_pool_size = 120
-pool_timeout = 30
-
-[fernet_tokens]
-key_repository = /etc/keystone/fernet-keys/
-
-[identity]
-default_domain_id = default
-driver = sql
-
-[assignment]
-driver = sql
-
-[resource]
-driver = sql
-caching = true
-cache_time = 3600
-
-[token]
-enforce_token_bind = permissive
-expiration = 43200
-provider = fernet
-driver = sql
-caching = true
-cache_time = 3600
-
-[eventlet_server]
-public_bind_host = {{ identity_host }}
-admin_bind_host = {{ identity_host }}
-
-[oslo_messaging_rabbit]
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-rabbit_hosts = {{ rabbitmq_servers }}
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/wsgi-keystone.conf.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/wsgi-keystone.conf.j2
deleted file mode 100644
index 55c89839..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/wsgi-keystone.conf.j2
+++ /dev/null
@@ -1,50 +0,0 @@
-{% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
-{% if work_threads > 10 %}
-{% set work_threads = 10 %}
-{% endif %}
-
-<VirtualHost {{ internal_ip }}:5000>
- WSGIDaemonProcess keystone-public processes=4 threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
- WSGIProcessGroup keystone-public
- WSGIScriptAlias / /usr/bin/keystone-wsgi-public
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- <IfVersion >= 2.4>
- ErrorLogFormat "%{cu}t %M"
- </IfVersion>
- ErrorLog /var/log/{{ http_service_name }}/keystone.log
- CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined
-
- <Directory /usr/bin>
- <IfVersion >= 2.4>
- Require all granted
- </IfVersion>
- <IfVersion < 2.4>
- Order allow,deny
- Allow from all
- </IfVersion>
- </Directory>
-</VirtualHost>
-
-<VirtualHost {{ internal_ip }}:35357>
- WSGIDaemonProcess keystone-admin processes=4 threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
- WSGIProcessGroup keystone-admin
- WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- <IfVersion >= 2.4>
- ErrorLogFormat "%{cu}t %M"
- </IfVersion>
- ErrorLog /var/log/{{ http_service_name }}/keystone.log
- CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined
-
- <Directory /usr/bin>
- <IfVersion >= 2.4>
- Require all granted
- </IfVersion>
- <IfVersion < 2.4>
- Order allow,deny
- Allow from all
- </IfVersion>
- </Directory>
-</VirtualHost>
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/Debian.yml
deleted file mode 100644
index 89bfbe0a..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/Debian.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-cron_path: "/var/spool/cron/crontabs"
-
-packages:
- - apache2
- - libapache2-mod-wsgi
- - python-keystone
- - python-openstackclient
- - keystone
-
-services:
- - apache2
-
-apache_config_dir: /etc/apache2
-http_service_name: apache2
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/main.yml
deleted file mode 100644
index baaf89e1..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/main.yml
+++ /dev/null
@@ -1,194 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch:
- - python-keystoneclient
-
-services_noarch: []
-os_services:
- - name: keystone
- type: identity
- region: RegionOne
- description: "OpenStack Identity"
- publicurl: "http://{{ public_vip.ip }}:5000/v2.0"
- internalurl: "http://{{ internal_vip.ip }}:5000/v2.0"
- adminurl: "http://{{ internal_vip.ip }}:35357/v2.0"
-
- - name: glance
- type: image
- region: RegionOne
- description: "OpenStack Image Service"
- publicurl: "http://{{ public_vip.ip }}:9292"
- internalurl: "http://{{ internal_vip.ip }}:9292"
- adminurl: "http://{{ internal_vip.ip }}:9292"
-
- - name: nova
- type: compute
- region: RegionOne
- description: "OpenStack Compute"
- publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
-
- - name: neutron
- type: network
- region: RegionOne
- description: "OpenStack Networking"
- publicurl: "http://{{ public_vip.ip }}:9696"
- internalurl: "http://{{ internal_vip.ip }}:9696"
- adminurl: "http://{{ internal_vip.ip }}:9696"
-
- - name: ceilometer
- type: metering
- region: RegionOne
- description: "OpenStack Telemetry"
- publicurl: "http://{{ public_vip.ip }}:8777"
- internalurl: "http://{{ internal_vip.ip }}:8777"
- adminurl: "http://{{ internal_vip.ip }}:8777"
-
- - name: aodh
- type: alarming
- region: RegionOne
- description: "OpenStack Telemetry"
- publicurl: "http://{{ public_vip.ip }}:8042"
- internalurl: "http://{{ internal_vip.ip }}:8042"
- adminurl: "http://{{ internal_vip.ip }}:8042"
-
- - name: cinder
- type: volume
- region: RegionOne
- description: "OpenStack Block Storage"
- publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
-
- - name: cinderv2
- type: volumev2
- region: RegionOne
- description: "OpenStack Block Storage v2"
- publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
-
- - name: heat
- type: orchestration
- region: RegionOne
- description: "OpenStack Orchestration"
- publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
-
- - name: heat-cfn
- type: cloudformation
- region: RegionOne
- description: "OpenStack CloudFormation Orchestration"
- publicurl: "http://{{ public_vip.ip }}:8000/v1"
- internalurl: "http://{{ internal_vip.ip }}:8000/v1"
- adminurl: "http://{{ internal_vip.ip }}:8000/v1"
-
- - name: congress
- type: policy
- region: RegionOne
- description: "OpenStack Policy Service"
- publicurl: "http://{{ public_vip.ip }}:1789"
- internalurl: "http://{{ internal_vip.ip }}:1789"
- adminurl: "http://{{ internal_vip.ip }}:1789"
-
-# - name: swift
-# type: object-store
-# region: RegionOne
-# description: "OpenStack Object Storage"
-# publicurl: "http://{{ public_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-# internalurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-# adminurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-
-os_users:
- - user: admin
- password: "{{ ADMIN_PASS }}"
- email: admin@admin.com
- role: admin
- tenant: admin
- tenant_description: "Admin Tenant"
-
- - user: glance
- password: "{{ GLANCE_PASS }}"
- email: glance@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: nova
- password: "{{ NOVA_PASS }}"
- email: nova@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: keystone
- password: "{{ KEYSTONE_PASS }}"
- email: keystone@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: neutron
- password: "{{ NEUTRON_PASS }}"
- email: neutron@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: ceilometer
- password: "{{ CEILOMETER_PASS }}"
- email: ceilometer@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: cinder
- password: "{{ CINDER_PASS }}"
- email: cinder@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: aodh
- password: "{{ AODH_PASS }}"
- email: aodh@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: heat
- password: "{{ HEAT_PASS }}"
- email: heat@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: congress
- password: "{{ CONGRESS_PASS }}"
- email: congress@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: demo
- password: ""
- email: heat@demo.com
- role: heat_stack_user
- tenant: demo
- tenant_description: "Demo Tenant"
-
-# - user: swift
-# password: "{{ CINDER_PASS }}"
-# email: swift@admin.com
-# role: admin
-# tenant: service
-# tenant_description: "Service Tenant"
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/deb.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/deb.conf
deleted file mode 100644
index 6e1159a1..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/deb.conf
+++ /dev/null
@@ -1,11 +0,0 @@
-keystone/admin-password: password
-keystone/auth-token: password
-keystone/admin-password-confirm: password
-keystone/admin-email: root@localhost
-keystone/admin-role-name: admin
-keystone/admin-user: admin
-keystone/create-admin-tenant: false
-keystone/region-name: Orange
-keystone/admin-tenant-name: admin
-keystone/register-endpoint: false
-keystone/configure_db: false
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/deb.conf.bak b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/deb.conf.bak
deleted file mode 100644
index 6e1159a1..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/deb.conf.bak
+++ /dev/null
@@ -1,11 +0,0 @@
-keystone/admin-password: password
-keystone/auth-token: password
-keystone/admin-password-confirm: password
-keystone/admin-email: root@localhost
-keystone/admin-role-name: admin
-keystone/admin-user: admin
-keystone/create-admin-tenant: false
-keystone/region-name: Orange
-keystone/admin-tenant-name: admin
-keystone/register-endpoint: false
-keystone/configure_db: false
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/get_deb_depends.py b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/get_deb_depends.py
deleted file mode 100644
index d510bcf4..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/get_deb_depends.py
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env python3
-
-import sys
-import subprocess
-
-pkts = []
-
-for arg in sys.argv[1:]:
- proc = subprocess.Popen(["dpkg-deb",
- "--info",
- arg],
- stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- out = proc.stdout.read()
- err = proc.stderr.read()
- if err:
- print("An error occurred with {} ({})".format(arg, err))
- continue
- for line in out.splitlines():
- line = line.decode('utf-8')
- if " Depends:" in line:
- line = line.replace(" Depends:", "")
- for _dep in line.split(','):
- pkts.append(_dep.split()[0])
-
-print(" ".join(pkts))
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/handlers/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/handlers/main.yml
deleted file mode 100755
index 608a8a09..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/handlers/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart keystone services
- service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/main.yml
deleted file mode 100644
index a3511de7..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-#############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include: moon.yml
- when: moon == "Enable"
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon-compute.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon-compute.yml
deleted file mode 100644
index e4142b5f..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon-compute.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-#############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: update api-paste.ini
- template: src=api-paste.ini dest=/etc/nova/api-paste.ini backup=yes
-
-- name: restart nova task
- service: name={{ item }} state=restarted enabled=yes
- with_items:
- - nova-compute
-
-#- name: restart swift task
-# shell: swift-init all start
-# ignore_errors: True
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon-controller.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon-controller.yml
deleted file mode 100644
index 95dd2e89..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon-controller.yml
+++ /dev/null
@@ -1,238 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-# install all packages
-- name: install keystone packages
- shell: apt-get install -y python-pip unzip
-
-# download master.zip
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: download keystone-moon packages
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/moon/master.zip" dest=/tmp/master.zip mode=0444
-
-- name: extract keystone-moon packages
- unarchive: src=/tmp/master.zip dest=/tmp copy=no
-
-# install all dependencies
-- name: copy scripts
- copy: src=get_deb_depends.py dest=/tmp/get_deb_depends.py
-
-- name: install keystone-moon dependencies
- shell: "apt-get install `python /tmp/get_deb_depends.py /tmp/moon-bin-master/*.deb`"
- when: ansible_os_family == "Debian"
-
-- name: delete configuration file
- shell: >
- rm -f {{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf;
- rm -f {{ apache_config_dir }}/sites-available/wsgi-keystone.conf;
-
-# install keystone moon
-- name: copy scripts
- copy: src=deb.conf dest=/tmp/deb.conf
-
-- name: install keystone moon
- shell: >
- export DEBIAN_FRONTEND="noninteractive";
- sudo -E dpkg -i /tmp/moon-bin-master/*moon*.deb;
-
-#- name: install keystone moon
-# shell: >
-# export DEBIAN_FRONTEND="noninteractive";
-# sudo -E debconf-set-selections python-keystone < /tmp/deb.conf;
-# sudo -E dpkg -i /tmp/moon-bin-master/*moon*.deb;
-
-- name: stop keystone task
- shell: >
- service keystone stop;
- mv /etc/init.d/keystone /home/;
- mv /etc/init/keystone.conf /home/;
- mv /lib/systemd/system/keystone.service /home/;
-
-# config keystone and apache2
-- name: delete sqlite database
- file:
- path: /var/lib/keystone/keystone.db
- state: absent
-
-#- name: update keystone conf
-# template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
-
-
-#- name: assure listen port exist
-# lineinfile:
-# dest: '{{ apache_config_dir }}/ports.conf'
-# regexp: '{{ item.regexp }}'
-# line: '{{ item.line}}'
-# with_items:
-# - regexp: "^Listen {{ internal_ip }}:5000"
-# line: "Listen {{ internal_ip }}:5000"
-# - regexp: "^Listen {{ internal_ip }}:35357"
-# line: "Listen {{ internal_ip }}:35357"
-
-- name: update apache2 configs
- template:
- src: wsgi-keystone.conf.j2
- dest: '{{ apache_config_dir }}/sites-available/wsgi-keystone.conf'
- when: ansible_os_family == 'Debian'
-
-- name: enable keystone server
- file:
- src: "{{ apache_config_dir }}/sites-available/wsgi-keystone.conf"
- dest: "{{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf"
- state: "link"
- when: ansible_os_family == 'Debian'
-
-#- name: keystone source files
-# template: src={{ item }} dest=/opt/{{ item }}
-# with_items:
-# - admin-openrc.sh
-# - demo-openrc.sh
-
-# keystone paste ini
-- name: keystone paste ini 1
- shell: sudo cp /etc/keystone/keystone-paste.ini /etc/keystone/keystone-paste.ini.bak;
-
-- name: keystone paste ini 2
- shell: sudo sed "3i[pipeline:moon_pipeline]\npipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension moon_service\n\n[app:moon_service]\nuse = egg:keystone#moon_service\n" /etc/keystone/keystone-paste.ini > /tmp/keystone-paste.ini;
-
-- name: keystone paste ini 3
- shell: sudo cp /tmp/keystone-paste.ini /etc/keystone/keystone-paste.ini;
-
-- name: keystone paste ini 4
- shell: sudo sed "s/use = egg:Paste#urlmap/use = egg:Paste#urlmap\n\/moon = moon_pipeline/" /etc/keystone/keystone-paste.ini > /tmp/keystone-paste.ini;
-
-- name: keystone paste ini 5
- shell: sudo cp /tmp/keystone-paste.ini /etc/keystone/keystone-paste.ini;
-
-# moon log
-- name: moon log
- shell: >
- sudo mkdir /var/log/moon/;
- sudo chown keystone /var/log/moon/;
- sudo addgroup moonlog;
- sudo chgrp moonlog /var/log/moon/;
- sudo touch /var/log/moon/keystonemiddleware.log;
- sudo touch /var/log/moon/system.log;
- sudo chgrp moonlog /var/log/moon/keystonemiddleware.log;
- sudo chgrp moonlog /var/log/moon/system.log;
- sudo chmod g+rw /var/log/moon;
- sudo chmod g+rw /var/log/moon/keystonemiddleware.log;
- sudo chmod g+rw /var/log/moon/system.log;
- sudo adduser keystone moonlog;
- # sudo adduser swift moonlog;
- sudo adduser nova moonlog;
-
-
-# keystone db sync
-- name: keystone db sync
- shell: >
- sudo /usr/bin/keystone-manage db_sync;
- sudo /usr/bin/keystone-manage db_sync --extension moon;
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-
-#############################################
-- name: wait for keystone ready
- wait_for: port=35357 delay=3 timeout=10 host={{ internal_ip }}
-
-#- name: cron job to purge expired tokens hourly
-# cron:
-# name: 'purge expired tokens'
-# special_time: hourly
-# job: '/usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1'
-
-#############################################
-# moon workaround
-- name: copy scripts
- copy: src=controllers.py dest=/usr/lib/python2.7/dist-packages/keystone/contrib/moon/controllers.py
-
-# apache2 restart
-- name: restart apache2
- service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
-
-# install moonclient
-- name: install moon client
- shell: sudo pip install /tmp/moon-bin-master/python-moonclient-0.1.tar.gz
-
-###################################################
-
-
-#- name: add tenants
-# keystone_user:
-# token: "{{ ADMIN_TOKEN }}"
-# endpoint: "http://{{ internal_ip }}:35357/v2.0"
-# tenant: "{{ item.tenant }}"
-# tenant_description: "{{ item.tenant_description }}"
-# with_items: "{{ os_users }}"
-# when: inventory_hostname == groups['controller'][0]
-#
-#- name: add users
-# keystone_user:
-# token: "{{ ADMIN_TOKEN }}"
-# endpoint: "http://{{ internal_ip }}:35357/v2.0"
-# user: "{{ item.user }}"
-# tenant: "{{ item.tenant }}"
-# password: "{{ item.password }}"
-# email: "{{ item.email }}"
-# with_items: "{{ os_users }}"
-# when: inventory_hostname == groups['controller'][0]
-#
-#- name: grant roles
-# keystone_user:
-# token: "{{ ADMIN_TOKEN }}"
-# endpoint: "http://{{ internal_ip }}:35357/v2.0"
-# user: "{{ item.user }}"
-# role: "{{ item.role }}"
-# tenant: "{{ item.tenant }}"
-# with_items: "{{ os_users }}"
-# when: inventory_hostname == groups['controller'][0]
-#
-#- name: add endpoints
-# keystone_service:
-# token: "{{ ADMIN_TOKEN }}"
-# endpoint: "http://{{ internal_ip }}:35357/v2.0"
-# name: "{{ item.name }}"
-# type: "{{ item.type }}"
-# region: "{{ item.region}}"
-# description: "{{ item.description }}"
-# publicurl: "{{ item.publicurl }}"
-# internalurl: "{{ item.internalurl }}"
-# adminurl: "{{ item.adminurl }}"
-# with_items: "{{ os_services }}"
-# when: inventory_hostname == groups['controller'][0]
-
-
-###################################################
-
-- name: update api-paste.ini
- template: src=api-paste.ini dest=/etc/nova/api-paste.ini backup=yes
-
-#- name: update proxy-server conf
-# template: src=proxy-server.conf dest=/etc/swift/proxy-server.conf backup=yes
-
-# restart nova
-- name: restart nova
- service: name={{ item }} state=restarted enabled=yes
- with_items:
- - nova-api
- - nova-cert
- - nova-conductor
- - nova-consoleauth
- - nova-scheduler
-
-# restart swift
-#- name: restart swift
-# service: name={{ item }} state=restarted enabled=yes
-# with_items:
-# - swift-proxy
-# - memcached
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon.yml
deleted file mode 100644
index 40e1c98c..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-#############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- include: moon-controller.yml
- when: inventory_hostname in groups['controller']
-
-- include: moon-compute.yml
- when: inventory_hostname in groups['compute']
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/admin-openrc.sh b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/admin-openrc.sh
deleted file mode 100644
index 6ba620ff..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/admin-openrc.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-# Verify the Identity Service installation
-export OS_PASSWORD={{ ADMIN_PASS }}
-export OS_TENANT_NAME=admin
-export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
-export OS_USERNAME=admin
-export OS_VOLUME_API_VERSION=2
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/api-paste.ini b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/api-paste.ini
deleted file mode 100644
index f99689b7..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/api-paste.ini
+++ /dev/null
@@ -1,106 +0,0 @@
-############
-# Metadata #
-############
-[composite:metadata]
-use = egg:Paste#urlmap
-/: meta
-
-[pipeline:meta]
-pipeline = cors metaapp
-
-[app:metaapp]
-paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
-
-#############
-# OpenStack #
-#############
-
-[composite:osapi_compute]
-use = call:nova.api.openstack.urlmap:urlmap_factory
-/: oscomputeversions
-# starting in Liberty the v21 implementation replaces the v2
-# implementation and is suggested that you use it as the default. If
-# this causes issues with your clients you can rollback to the
-# *frozen* v2 api by commenting out the above stanza and using the
-# following instead::
-# /v2: openstack_compute_api_legacy_v2
-# if rolling back to v2 fixes your issue please file a critical bug
-# at - https://bugs.launchpad.net/nova/+bugs
-#
-# v21 is an exactly feature match for v2, except it has more stringent
-# input validation on the wsgi surface (prevents fuzzing early on the
-# API). It also provides new features via API microversions which are
-# opt into for clients. Unaware clients will receive the same frozen
-# v2 API feature set, but with some relaxed validation
-/v2: openstack_compute_api_v21_legacy_v2_compatible
-/v2.1: openstack_compute_api_v21
-
-# NOTE: this is deprecated in favor of openstack_compute_api_v21_legacy_v2_compatible
-[composite:openstack_compute_api_legacy_v2]
-use = call:nova.api.auth:pipeline_factory
-noauth2 = cors compute_req_id faultwrap sizelimit noauth2 legacy_ratelimit osapi_compute_app_legacy_v2
-keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext moon legacy_ratelimit osapi_compute_app_legacy_v2
-keystone_nolimit = cors compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_legacy_v2
-
-[composite:openstack_compute_api_v21]
-use = call:nova.api.auth:pipeline_factory_v21
-noauth2 = cors compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
-keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v21
-
-[composite:openstack_compute_api_v21_legacy_v2_compatible]
-use = call:nova.api.auth:pipeline_factory_v21
-noauth2 = cors compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21
-keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext legacy_v2_compatible osapi_compute_app_v21
-
-[filter:request_id]
-paste.filter_factory = oslo_middleware:RequestId.factory
-
-[filter:compute_req_id]
-paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
-
-[filter:faultwrap]
-paste.filter_factory = nova.api.openstack:FaultWrapper.factory
-
-[filter:noauth2]
-paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
-
-[filter:legacy_ratelimit]
-paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
-
-[filter:sizelimit]
-paste.filter_factory = oslo_middleware:RequestBodySizeLimiter.factory
-
-[filter:legacy_v2_compatible]
-paste.filter_factory = nova.api.openstack:LegacyV2CompatibleWrapper.factory
-
-[app:osapi_compute_app_legacy_v2]
-paste.app_factory = nova.api.openstack.compute:APIRouter.factory
-
-[app:osapi_compute_app_v21]
-paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory
-
-[pipeline:oscomputeversions]
-pipeline = faultwrap oscomputeversionapp
-
-[app:oscomputeversionapp]
-paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
-
-##########
-# Shared #
-##########
-
-[filter:cors]
-paste.filter_factory = oslo_middleware.cors:filter_factory
-oslo_config_project = nova
-
-[filter:keystonecontext]
-paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
-
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-
-[filter:moon]
-paste.filter_factory = keystonemiddleware.moon_agent:filter_factory
-authz_login=admin
-authz_password=password
-logfile=/var/log/moon/keystonemiddleware.log
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/demo-openrc.sh b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/demo-openrc.sh
deleted file mode 100644
index 5807e868..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/demo-openrc.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-export OS_USERNAME=demo
-export OS_PASSWORD={{ DEMO_PASS }}
-export OS_TENANT_NAME=demo
-export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/keystone-paste.ini b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/keystone-paste.ini
deleted file mode 100644
index cd9ebede..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/keystone-paste.ini
+++ /dev/null
@@ -1,96 +0,0 @@
-# Keystone PasteDeploy configuration file.
-
-[pipeline:moon_pipeline]
-pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension moon_service
-
-[app:moon_service]
-use = egg:keystone#moon_service
-
-[filter:debug]
-use = egg:oslo.middleware#debug
-
-[filter:request_id]
-use = egg:oslo.middleware#request_id
-
-[filter:build_auth_context]
-use = egg:keystone#build_auth_context
-
-[filter:token_auth]
-use = egg:keystone#token_auth
-
-[filter:admin_token_auth]
-# This is deprecated in the M release and will be removed in the O release.
-# Use `keystone-manage bootstrap` and remove this from the pipelines below.
-use = egg:keystone#admin_token_auth
-
-[filter:json_body]
-use = egg:keystone#json_body
-
-[filter:cors]
-use = egg:oslo.middleware#cors
-oslo_config_project = keystone
-
-[filter:ec2_extension]
-use = egg:keystone#ec2_extension
-
-[filter:ec2_extension_v3]
-use = egg:keystone#ec2_extension_v3
-
-[filter:s3_extension]
-use = egg:keystone#s3_extension
-
-[filter:url_normalize]
-use = egg:keystone#url_normalize
-
-[filter:sizelimit]
-use = egg:oslo.middleware#sizelimit
-
-[app:public_service]
-use = egg:keystone#public_service
-
-[app:service_v3]
-use = egg:keystone#service_v3
-
-[app:admin_service]
-use = egg:keystone#admin_service
-
-[pipeline:public_api]
-# The last item in this pipeline must be public_service or an equivalent
-# application. It cannot be a filter.
-pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension public_service
-
-[pipeline:admin_api]
-# The last item in this pipeline must be admin_service or an equivalent
-# application. It cannot be a filter.
-pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension s3_extension admin_service
-
-[pipeline:api_v3]
-# The last item in this pipeline must be service_v3 or an equivalent
-# application. It cannot be a filter.
-pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension_v3 s3_extension service_v3
-
-[app:public_version_service]
-use = egg:keystone#public_version_service
-
-[app:admin_version_service]
-use = egg:keystone#admin_version_service
-
-[pipeline:public_version_api]
-pipeline = cors sizelimit url_normalize public_version_service
-
-[pipeline:admin_version_api]
-pipeline = cors sizelimit url_normalize admin_version_service
-
-[composite:main]
-use = egg:Paste#urlmap
-/moon = moon_pipeline
-/v2.0 = public_api
-/v3 = api_v3
-/ = public_version_api
-
-[composite:admin]
-use = egg:Paste#urlmap
-/moon = moon_pipeline
-/v2.0 = admin_api
-/v3 = api_v3
-/ = admin_version_api
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/keystone.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/keystone.conf
deleted file mode 100644
index 649fc32c..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/keystone.conf
+++ /dev/null
@@ -1,59 +0,0 @@
-{% set memcached_servers = [] %}
-{% set rabbitmq_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% set _ = rabbitmq_servers.append('%s:5672'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-{% set rabbitmq_servers = rabbitmq_servers|join(',') %}
-[DEFAULT]
-admin_token={{ ADMIN_TOKEN }}
-debug={{ DEBUG }}
-log_dir = /var/log/keystone
-
-[cache]
-backend=keystone.cache.memcache_pool
-memcache_servers={{ memcached_servers}}
-enabled=true
-
-[revoke]
-driver=sql
-expiration_buffer=3600
-caching=true
-
-[database]
-connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone?charset=utf8
-idle_timeout=30
-min_pool_size=5
-max_pool_size=120
-pool_timeout=30
-
-
-[identity]
-default_domain_id=default
-driver=sql
-
-[assignment]
-driver=sql
-
-[resource]
-driver=sql
-caching=true
-cache_time=3600
-
-[token]
-enforce_token_bind=permissive
-expiration=43200
-provider=uuid
-driver=sql
-caching=true
-cache_time=3600
-
-[eventlet_server]
-public_bind_host= {{ identity_host }}
-admin_bind_host= {{ identity_host }}
-
-[oslo_messaging_rabbit]
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-rabbit_hosts = {{ rabbitmq_servers }}
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/proxy-server.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/proxy-server.conf
deleted file mode 100644
index 9bea7a8e..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/proxy-server.conf
+++ /dev/null
@@ -1,775 +0,0 @@
-{% set memcached_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-[DEFAULT]
-bind_ip = {{ internal_ip }}
-bind_port = 8080
-# bind_timeout = 30
-# backlog = 4096
-swift_dir = /etc/swift
-user = swift
-
-# Enables exposing configuration settings via HTTP GET /info.
-# expose_info = true
-
-# Key to use for admin calls that are HMAC signed. Default is empty,
-# which will disable admin calls to /info.
-# admin_key = secret_admin_key
-#
-# Allows the ability to withhold sections from showing up in the public calls
-# to /info. You can withhold subsections by separating the dict level with a
-# ".". The following would cause the sections 'container_quotas' and 'tempurl'
-# to not be listed, and the key max_failed_deletes would be removed from
-# bulk_delete. Default value is 'swift.valid_api_versions' which allows all
-# registered features to be listed via HTTP GET /info except
-# swift.valid_api_versions information
-# disallowed_sections = swift.valid_api_versions, container_quotas, tempurl
-
-# Use an integer to override the number of pre-forked processes that will
-# accept connections. Should default to the number of effective cpu
-# cores in the system. It's worth noting that individual workers will
-# use many eventlet co-routines to service multiple concurrent requests.
-# workers = auto
-#
-# Maximum concurrent requests per worker
-# max_clients = 1024
-#
-# Set the following two lines to enable SSL. This is for testing only.
-# cert_file = /etc/swift/proxy.crt
-# key_file = /etc/swift/proxy.key
-#
-# expiring_objects_container_divisor = 86400
-# expiring_objects_account_name = expiring_objects
-#
-# You can specify default log routing here if you want:
-# log_name = swift
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_headers = false
-# log_address = /dev/log
-# The following caps the length of log lines to the value given; no limit if
-# set to 0, the default.
-# log_max_line_length = 0
-#
-# This optional suffix (default is empty) that would be appended to the swift transaction
-# id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
-# This is very useful when one is managing more than one swift cluster.
-# trans_id_suffix =
-#
-# comma separated list of functions to call to setup custom log handlers.
-# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
-# adapted_logger
-# log_custom_handlers =
-#
-# If set, log_udp_host will override log_address
-# log_udp_host =
-# log_udp_port = 514
-#
-# You can enable StatsD logging here:
-# log_statsd_host =
-# log_statsd_port = 8125
-# log_statsd_default_sample_rate = 1.0
-# log_statsd_sample_rate_factor = 1.0
-# log_statsd_metric_prefix =
-#
-# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
-# cors_allow_origin =
-# strict_cors_mode = True
-#
-# client_timeout = 60
-# eventlet_debug = false
-
-[pipeline:main]
-# This sample pipeline uses tempauth and is used for SAIO dev work and
-# testing. See below for a pipeline using keystone.
-#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
-pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging moon proxy-server
-
-# The following pipeline shows keystone integration. Comment out the one
-# above and uncomment this one. Additional steps for integrating keystone are
-# covered further below in the filter sections for authtoken and keystoneauth.
-#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
-
-[app:proxy-server]
-use = egg:swift#proxy
-account_autocreate = True
-# You can override the default log routing for this app here:
-# set log_name = proxy-server
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_address = /dev/log
-#
-# log_handoffs = true
-# recheck_account_existence = 60
-# recheck_container_existence = 60
-# object_chunk_size = 65536
-# client_chunk_size = 65536
-#
-# How long the proxy server will wait on responses from the a/c/o servers.
-# node_timeout = 10
-#
-# How long the proxy server will wait for an initial response and to read a
-# chunk of data from the object servers while serving GET / HEAD requests.
-# Timeouts from these requests can be recovered from so setting this to
-# something lower than node_timeout would provide quicker error recovery
-# while allowing for a longer timeout for non-recoverable requests (PUTs).
-# Defaults to node_timeout, should be overriden if node_timeout is set to a
-# high number to prevent client timeouts from firing before the proxy server
-# has a chance to retry.
-# recoverable_node_timeout = node_timeout
-#
-# conn_timeout = 0.5
-#
-# How long to wait for requests to finish after a quorum has been established.
-# post_quorum_timeout = 0.5
-#
-# How long without an error before a node's error count is reset. This will
-# also be how long before a node is reenabled after suppression is triggered.
-# error_suppression_interval = 60
-#
-# How many errors can accumulate before a node is temporarily ignored.
-# error_suppression_limit = 10
-#
-# If set to 'true' any authorized user may create and delete accounts; if
-# 'false' no one, even authorized, can.
-# allow_account_management = false
-#
-# Set object_post_as_copy = false to turn on fast posts where only the metadata
-# changes are stored anew and the original data file is kept in place. This
-# makes for quicker posts.
-# object_post_as_copy = true
-#
-# If set to 'true' authorized accounts that do not yet exist within the Swift
-# cluster will be automatically created.
-# account_autocreate = false
-#
-# If set to a positive value, trying to create a container when the account
-# already has at least this maximum containers will result in a 403 Forbidden.
-# Note: This is a soft limit, meaning a user might exceed the cap for
-# recheck_account_existence before the 403s kick in.
-# max_containers_per_account = 0
-#
-# This is a comma separated list of account hashes that ignore the
-# max_containers_per_account cap.
-# max_containers_whitelist =
-#
-# Comma separated list of Host headers to which the proxy will deny requests.
-# deny_host_headers =
-#
-# Prefix used when automatically creating accounts.
-# auto_create_account_prefix = .
-#
-# Depth of the proxy put queue.
-# put_queue_depth = 10
-#
-# Storage nodes can be chosen at random (shuffle), by using timing
-# measurements (timing), or by using an explicit match (affinity).
-# Using timing measurements may allow for lower overall latency, while
-# using affinity allows for finer control. In both the timing and
-# affinity cases, equally-sorting nodes are still randomly chosen to
-# spread load.
-# The valid values for sorting_method are "affinity", "shuffle", or "timing".
-# sorting_method = shuffle
-#
-# If the "timing" sorting_method is used, the timings will only be valid for
-# the number of seconds configured by timing_expiry.
-# timing_expiry = 300
-#
-# By default on a GET/HEAD swift will connect to a storage node one at a time
-# in a single thread. There is smarts in the order they are hit however. If you
-# turn on concurrent_gets below, then replica count threads will be used.
-# With addition of the concurrency_timeout option this will allow swift to send
-# out GET/HEAD requests to the storage nodes concurrently and answer with the
-# first to respond. With an EC policy the parameter only affects HEAD requests.
-# concurrent_gets = off
-#
-# This parameter controls how long to wait before firing off the next
-# concurrent_get thread. A value of 0 would be fully concurrent, any other
-# number will stagger the firing of the threads. This number should be
-# between 0 and node_timeout. The default is what ever you set for the
-# conn_timeout parameter.
-# concurrency_timeout = 0.5
-#
-# Set to the number of nodes to contact for a normal request. You can use
-# '* replicas' at the end to have it use the number given times the number of
-# replicas for the ring being used for the request.
-# request_node_count = 2 * replicas
-#
-# Which backend servers to prefer on reads. Format is r<N> for region
-# N or r<N>z<M> for region N, zone M. The value after the equals is
-# the priority; lower numbers are higher priority.
-#
-# Example: first read from region 1 zone 1, then region 1 zone 2, then
-# anything in region 2, then everything else:
-# read_affinity = r1z1=100, r1z2=200, r2=300
-# Default is empty, meaning no preference.
-# read_affinity =
-#
-# Which backend servers to prefer on writes. Format is r<N> for region
-# N or r<N>z<M> for region N, zone M. If this is set, then when
-# handling an object PUT request, some number (see setting
-# write_affinity_node_count) of local backend servers will be tried
-# before any nonlocal ones.
-#
-# Example: try to write to regions 1 and 2 before writing to any other
-# nodes:
-# write_affinity = r1, r2
-# Default is empty, meaning no preference.
-# write_affinity =
-#
-# The number of local (as governed by the write_affinity setting)
-# nodes to attempt to contact first, before any non-local ones. You
-# can use '* replicas' at the end to have it use the number given
-# times the number of replicas for the ring being used for the
-# request.
-# write_affinity_node_count = 2 * replicas
-#
-# These are the headers whose values will only be shown to swift_owners. The
-# exact definition of a swift_owner is up to the auth system in use, but
-# usually indicates administrative responsibilities.
-# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control
-
-[filter:tempauth]
-use = egg:swift#tempauth
-# You can override the default log routing for this filter here:
-# set log_name = tempauth
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-#
-# The reseller prefix will verify a token begins with this prefix before even
-# attempting to validate it. Also, with authorization, only Swift storage
-# accounts with this prefix will be authorized by this middleware. Useful if
-# multiple auth systems are in use for one Swift cluster.
-# The reseller_prefix may contain a comma separated list of items. The first
-# item is used for the token as mentioned above. If second and subsequent
-# items exist, the middleware will handle authorization for an account with
-# that prefix. For example, for prefixes "AUTH, SERVICE", a path of
-# /v1/SERVICE_account is handled the same as /v1/AUTH_account. If an empty
-# (blank) reseller prefix is required, it must be first in the list. Two
-# single quote characters indicates an empty (blank) reseller prefix.
-# reseller_prefix = AUTH
-
-#
-# The require_group parameter names a group that must be presented by
-# either X-Auth-Token or X-Service-Token. Usually this parameter is
-# used only with multiple reseller prefixes (e.g., SERVICE_require_group=blah).
-# By default, no group is needed. Do not use .admin.
-# require_group =
-
-# The auth prefix will cause requests beginning with this prefix to be routed
-# to the auth subsystem, for granting tokens, etc.
-# auth_prefix = /auth/
-# token_life = 86400
-#
-# This allows middleware higher in the WSGI pipeline to override auth
-# processing, useful for middleware such as tempurl and formpost. If you know
-# you're not going to use such middleware and you want a bit of extra security,
-# you can set this to false.
-# allow_overrides = true
-#
-# This specifies what scheme to return with storage urls:
-# http, https, or default (chooses based on what the server is running as)
-# This can be useful with an SSL load balancer in front of a non-SSL server.
-# storage_url_scheme = default
-#
-# Lastly, you need to list all the accounts/users you want here. The format is:
-# user_<account>_<user> = <key> [group] [group] [...] [storage_url]
-# or if you want underscores in <account> or <user>, you can base64 encode them
-# (with no equal signs) and use this format:
-# user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url]
-# There are special groups of:
-# .reseller_admin = can do anything to any account for this auth
-# .admin = can do anything within the account
-# If neither of these groups are specified, the user can only access containers
-# that have been explicitly allowed for them by a .admin or .reseller_admin.
-# The trailing optional storage_url allows you to specify an alternate url to
-# hand back to the user upon authentication. If not specified, this defaults to
-# $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve
-# to what the requester would need to use to reach this host.
-# Here are example entries, required for running the tests:
-user_admin_admin = admin .admin .reseller_admin
-user_test_tester = testing .admin
-user_test2_tester2 = testing2 .admin
-user_test_tester3 = testing3
-user_test5_tester5 = testing5 service
-
-# To enable Keystone authentication you need to have the auth token
-# middleware first to be configured. Here is an example below, please
-# refer to the keystone's documentation for details about the
-# different settings.
-#
-# You'll also need to have the keystoneauth middleware enabled and have it in
-# your main pipeline, as show in the sample pipeline at the top of this file.
-#
-# Following parameters are known to work with keystonemiddleware v2.3.0
-# (above v2.0.0), but checking the latest information in the wiki page[1]
-# is recommended.
-# 1. http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html#configuration
-#
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-auth_uri = http://{{ internal_vip.ip }}:5000
-auth_url = http://{{ internal_vip.ip }}:35357
-identity_uri = http://{{ internal_vip.ip }}:35357
-memcached_servers = {{ memcached_servers }}
-#auth_plugin = password
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = swift
-password = {{ CINDER_PASS }}
-delay_auth_decision = True
-admin_user=admin
-admin_password={{ ADMIN_PASS }}
-admin_token={{ ADMIN_TOKEN }}
-#
-# delay_auth_decision defaults to False, but leaving it as false will
-# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from
-# working. This value must be explicitly set to True.
-# delay_auth_decision = False
-#
-# cache = swift.cache
-# include_service_catalog = False
-#
-[filter:keystoneauth]
-use = egg:swift#keystoneauth
-operator_roles = admin,user
-# The reseller_prefix option lists account namespaces that this middleware is
-# responsible for. The prefix is placed before the Keystone project id.
-# For example, for project 12345678, and prefix AUTH, the account is
-# named AUTH_12345678 (i.e., path is /v1/AUTH_12345678/...).
-# Several prefixes are allowed by specifying a comma-separated list
-# as in: "reseller_prefix = AUTH, SERVICE". The empty string indicates a
-# single blank/empty prefix. If an empty prefix is required in a list of
-# prefixes, a value of '' (two single quote characters) indicates a
-# blank/empty prefix. Except for the blank/empty prefix, an underscore ('_')
-# character is appended to the value unless already present.
-# reseller_prefix = AUTH
-#
-# The user must have at least one role named by operator_roles on a
-# project in order to create, delete and modify containers and objects
-# and to set and read privileged headers such as ACLs.
-# If there are several reseller prefix items, you can prefix the
-# parameter so it applies only to those accounts (for example
-# the parameter SERVICE_operator_roles applies to the /v1/SERVICE_<project>
-# path). If you omit the prefix, the option applies to all reseller
-# prefix items. For the blank/empty prefix, prefix with '' (do not put
-# underscore after the two single quote characters).
-# operator_roles = admin, swiftoperator
-#
-# The reseller admin role has the ability to create and delete accounts
-# reseller_admin_role = ResellerAdmin
-#
-# This allows middleware higher in the WSGI pipeline to override auth
-# processing, useful for middleware such as tempurl and formpost. If you know
-# you're not going to use such middleware and you want a bit of extra security,
-# you can set this to false.
-# allow_overrides = true
-#
-# If the service_roles parameter is present, an X-Service-Token must be
-# present in the request that when validated, grants at least one role listed
-# in the parameter. The X-Service-Token may be scoped to any project.
-# If there are several reseller prefix items, you can prefix the
-# parameter so it applies only to those accounts (for example
-# the parameter SERVICE_service_roles applies to the /v1/SERVICE_<project>
-# path). If you omit the prefix, the option applies to all reseller
-# prefix items. For the blank/empty prefix, prefix with '' (do not put
-# underscore after the two single quote characters).
-# By default, no service_roles are required.
-# service_roles =
-#
-# For backwards compatibility, keystoneauth will match names in cross-tenant
-# access control lists (ACLs) when both the requesting user and the tenant
-# are in the default domain i.e the domain to which existing tenants are
-# migrated. The default_domain_id value configured here should be the same as
-# the value used during migration of tenants to keystone domains.
-# default_domain_id = default
-#
-# For a new installation, or an installation in which keystone projects may
-# move between domains, you should disable backwards compatible name matching
-# in ACLs by setting allow_names_in_acls to false:
-# allow_names_in_acls = true
-
-[filter:healthcheck]
-use = egg:swift#healthcheck
-# An optional filesystem path, which if present, will cause the healthcheck
-# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE".
-# This facility may be used to temporarily remove a Swift node from a load
-# balancer pool during maintenance or upgrade (remove the file to allow the
-# node back into the load balancer pool).
-# disable_path =
-
-[filter:cache]
-use = egg:swift#memcache
-memcache_servers = {{ memcached_servers }}
-# You can override the default log routing for this filter here:
-# set log_name = cache
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-#
-# If not set here, the value for memcache_servers will be read from
-# memcache.conf (see memcache.conf-sample) or lacking that file, it will
-# default to the value below. You can specify multiple servers separated with
-# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 (IPv6 addresses must
-# follow rfc3986 section-3.2.2, i.e. [::1]:11211)
-# memcache_servers = 127.0.0.1:11211
-#
-# Sets how memcache values are serialized and deserialized:
-# 0 = older, insecure pickle serialization
-# 1 = json serialization but pickles can still be read (still insecure)
-# 2 = json serialization only (secure and the default)
-# If not set here, the value for memcache_serialization_support will be read
-# from /etc/swift/memcache.conf (see memcache.conf-sample).
-# To avoid an instant full cache flush, existing installations should
-# upgrade with 0, then set to 1 and reload, then after some time (24 hours)
-# set to 2 and reload.
-# In the future, the ability to use pickle serialization will be removed.
-# memcache_serialization_support = 2
-#
-# Sets the maximum number of connections to each memcached server per worker
-# memcache_max_connections = 2
-#
-# More options documented in memcache.conf-sample
-
-[filter:ratelimit]
-use = egg:swift#ratelimit
-# You can override the default log routing for this filter here:
-# set log_name = ratelimit
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-#
-# clock_accuracy should represent how accurate the proxy servers' system clocks
-# are with each other. 1000 means that all the proxies' clock are accurate to
-# each other within 1 millisecond. No ratelimit should be higher than the
-# clock accuracy.
-# clock_accuracy = 1000
-#
-# max_sleep_time_seconds = 60
-#
-# log_sleep_time_seconds of 0 means disabled
-# log_sleep_time_seconds = 0
-#
-# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
-# rate_buffer_seconds = 5
-#
-# account_ratelimit of 0 means disabled
-# account_ratelimit = 0
-
-# DEPRECATED- these will continue to work but will be replaced
-# by the X-Account-Sysmeta-Global-Write-Ratelimit flag.
-# Please see ratelimiting docs for details.
-# these are comma separated lists of account names
-# account_whitelist = a,b
-# account_blacklist = c,d
-
-# with container_limit_x = r
-# for containers of size x limit write requests per second to r. The container
-# rate will be linearly interpolated from the values given. With the values
-# below, a container of size 5 will get a rate of 75.
-# container_ratelimit_0 = 100
-# container_ratelimit_10 = 50
-# container_ratelimit_50 = 20
-
-# Similarly to the above container-level write limits, the following will limit
-# container GET (listing) requests.
-# container_listing_ratelimit_0 = 100
-# container_listing_ratelimit_10 = 50
-# container_listing_ratelimit_50 = 20
-
-[filter:domain_remap]
-use = egg:swift#domain_remap
-# You can override the default log routing for this filter here:
-# set log_name = domain_remap
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-#
-# storage_domain = example.com
-# path_root = v1
-
-# Browsers can convert a host header to lowercase, so check that reseller
-# prefix on the account is the correct case. This is done by comparing the
-# items in the reseller_prefixes config option to the found prefix. If they
-# match except for case, the item from reseller_prefixes will be used
-# instead of the found reseller prefix. When none match, the default reseller
-# prefix is used. When no default reseller prefix is configured, any request
-# with an account prefix not in that list will be ignored by this middleware.
-# reseller_prefixes = AUTH
-# default_reseller_prefix =
-
-[filter:catch_errors]
-use = egg:swift#catch_errors
-# You can override the default log routing for this filter here:
-# set log_name = catch_errors
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-
-[filter:cname_lookup]
-# Note: this middleware requires python-dnspython
-use = egg:swift#cname_lookup
-# You can override the default log routing for this filter here:
-# set log_name = cname_lookup
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-#
-# Specify the storage_domain that match your cloud, multiple domains
-# can be specified separated by a comma
-# storage_domain = example.com
-#
-# lookup_depth = 1
-
-# Note: Put staticweb just after your auth filter(s) in the pipeline
-[filter:staticweb]
-use = egg:swift#staticweb
-# You can override the default log routing for this filter here:
-# set log_name = staticweb
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-
-# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline
-[filter:tempurl]
-use = egg:swift#tempurl
-# The methods allowed with Temp URLs.
-# methods = GET HEAD PUT POST DELETE
-#
-# The headers to remove from incoming requests. Simply a whitespace delimited
-# list of header names and names can optionally end with '*' to indicate a
-# prefix match. incoming_allow_headers is a list of exceptions to these
-# removals.
-# incoming_remove_headers = x-timestamp
-#
-# The headers allowed as exceptions to incoming_remove_headers. Simply a
-# whitespace delimited list of header names and names can optionally end with
-# '*' to indicate a prefix match.
-# incoming_allow_headers =
-#
-# The headers to remove from outgoing responses. Simply a whitespace delimited
-# list of header names and names can optionally end with '*' to indicate a
-# prefix match. outgoing_allow_headers is a list of exceptions to these
-# removals.
-# outgoing_remove_headers = x-object-meta-*
-#
-# The headers allowed as exceptions to outgoing_remove_headers. Simply a
-# whitespace delimited list of header names and names can optionally end with
-# '*' to indicate a prefix match.
-# outgoing_allow_headers = x-object-meta-public-*
-
-# Note: Put formpost just before your auth filter(s) in the pipeline
-[filter:formpost]
-use = egg:swift#formpost
-
-# Note: Just needs to be placed before the proxy-server in the pipeline.
-[filter:name_check]
-use = egg:swift#name_check
-# forbidden_chars = '"`<>
-# maximum_length = 255
-# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$
-
-[filter:list-endpoints]
-use = egg:swift#list_endpoints
-# list_endpoints_path = /endpoints/
-
-[filter:proxy-logging]
-use = egg:swift#proxy_logging
-# If not set, logging directives from [DEFAULT] without "access_" will be used
-# access_log_name = swift
-# access_log_facility = LOG_LOCAL0
-# access_log_level = INFO
-# access_log_address = /dev/log
-#
-# If set, access_log_udp_host will override access_log_address
-# access_log_udp_host =
-# access_log_udp_port = 514
-#
-# You can use log_statsd_* from [DEFAULT] or override them here:
-# access_log_statsd_host =
-# access_log_statsd_port = 8125
-# access_log_statsd_default_sample_rate = 1.0
-# access_log_statsd_sample_rate_factor = 1.0
-# access_log_statsd_metric_prefix =
-# access_log_headers = false
-#
-# If access_log_headers is True and access_log_headers_only is set only
-# these headers are logged. Multiple headers can be defined as comma separated
-# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime
-# access_log_headers_only =
-#
-# By default, the X-Auth-Token is logged. To obscure the value,
-# set reveal_sensitive_prefix to the number of characters to log.
-# For example, if set to 12, only the first 12 characters of the
-# token appear in the log. An unauthorized access of the log file
-# won't allow unauthorized usage of the token. However, the first
-# 12 or so characters is unique enough that you can trace/debug
-# token usage. Set to 0 to suppress the token completely (replaced
-# by '...' in the log).
-# Note: reveal_sensitive_prefix will not affect the value
-# logged with access_log_headers=True.
-# reveal_sensitive_prefix = 16
-#
-# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
-# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
-# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
-#
-# Note: The double proxy-logging in the pipeline is not a mistake. The
-# left-most proxy-logging is there to log requests that were handled in
-# middleware and never made it through to the right-most middleware (and
-# proxy server). Double logging is prevented for normal requests. See
-# proxy-logging docs.
-
-# Note: Put before both ratelimit and auth in the pipeline.
-[filter:bulk]
-use = egg:swift#bulk
-# max_containers_per_extraction = 10000
-# max_failed_extractions = 1000
-# max_deletes_per_request = 10000
-# max_failed_deletes = 1000
-
-# In order to keep a connection active during a potentially long bulk request,
-# Swift may return whitespace prepended to the actual response body. This
-# whitespace will be yielded no more than every yield_frequency seconds.
-# yield_frequency = 10
-
-# Note: The following parameter is used during a bulk delete of objects and
-# their container. This would frequently fail because it is very likely
-# that all replicated objects have not been deleted by the time the middleware got a
-# successful response. It can be configured the number of retries. And the
-# number of seconds to wait between each retry will be 1.5**retry
-
-# delete_container_retry_count = 0
-
-# Note: Put after auth and staticweb in the pipeline.
-[filter:slo]
-use = egg:swift#slo
-# max_manifest_segments = 1000
-# max_manifest_size = 2097152
-#
-# Rate limiting applies only to segments smaller than this size (bytes).
-# rate_limit_under_size = 1048576
-#
-# Start rate-limiting SLO segment serving after the Nth small segment of a
-# segmented object.
-# rate_limit_after_segment = 10
-#
-# Once segment rate-limiting kicks in for an object, limit segments served
-# to N per second. 0 means no rate-limiting.
-# rate_limit_segments_per_sec = 1
-#
-# Time limit on GET requests (seconds)
-# max_get_time = 86400
-
-# Note: Put after auth and staticweb in the pipeline.
-# If you don't put it in the pipeline, it will be inserted for you.
-[filter:dlo]
-use = egg:swift#dlo
-# Start rate-limiting DLO segment serving after the Nth segment of a
-# segmented object.
-# rate_limit_after_segment = 10
-#
-# Once segment rate-limiting kicks in for an object, limit segments served
-# to N per second. 0 means no rate-limiting.
-# rate_limit_segments_per_sec = 1
-#
-# Time limit on GET requests (seconds)
-# max_get_time = 86400
-
-# Note: Put after auth in the pipeline.
-[filter:container-quotas]
-use = egg:swift#container_quotas
-
-# Note: Put after auth in the pipeline.
-[filter:account-quotas]
-use = egg:swift#account_quotas
-
-[filter:gatekeeper]
-use = egg:swift#gatekeeper
-# Set this to false if you want to allow clients to set arbitrary X-Timestamps
-# on uploaded objects. This may be used to preserve timestamps when migrating
-# from a previous storage system, but risks allowing users to upload
-# difficult-to-delete data.
-# shunt_inbound_x_timestamp = true
-#
-# You can override the default log routing for this filter here:
-# set log_name = gatekeeper
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-
-[filter:container_sync]
-use = egg:swift#container_sync
-# Set this to false if you want to disallow any full url values to be set for
-# any new X-Container-Sync-To headers. This will keep any new full urls from
-# coming in, but won't change any existing values already in the cluster.
-# Updating those will have to be done manually, as knowing what the true realm
-# endpoint should be cannot always be guessed.
-# allow_full_urls = true
-# Set this to specify this clusters //realm/cluster as "current" in /info
-# current = //REALM/CLUSTER
-
-# Note: Put it at the beginning of the pipeline to profile all middleware. But
-# it is safer to put this after catch_errors, gatekeeper and healthcheck.
-[filter:xprofile]
-use = egg:swift#xprofile
-# This option enable you to switch profilers which should inherit from python
-# standard profiler. Currently the supported value can be 'cProfile',
-# 'eventlet.green.profile' etc.
-# profile_module = eventlet.green.profile
-#
-# This prefix will be used to combine process ID and timestamp to name the
-# profile data file. Make sure the executing user has permission to write
-# into this path (missing path segments will be created, if necessary).
-# If you enable profiling in more than one type of daemon, you must override
-# it with an unique value like: /var/log/swift/profile/proxy.profile
-# log_filename_prefix = /tmp/log/swift/profile/default.profile
-#
-# the profile data will be dumped to local disk based on above naming rule
-# in this interval.
-# dump_interval = 5.0
-#
-# Be careful, this option will enable profiler to dump data into the file with
-# time stamp which means there will be lots of files piled up in the directory.
-# dump_timestamp = false
-#
-# This is the path of the URL to access the mini web UI.
-# path = /__profile__
-#
-# Clear the data when the wsgi server shutdown.
-# flush_at_shutdown = false
-#
-# unwind the iterator of applications
-# unwind = false
-
-# Note: Put after slo, dlo in the pipeline.
-# If you don't put it in the pipeline, it will be inserted automatically.
-[filter:versioned_writes]
-use = egg:swift#versioned_writes
-# Enables using versioned writes middleware and exposing configuration
-# settings via HTTP GET /info.
-# WARNING: Setting this option bypasses the "allow_versions" option
-# in the container configuration file, which will be eventually
-# deprecated. See documentation for more details.
-# allow_versioned_writes = false
-
-
-[filter:moon]
-paste.filter_factory = keystonemiddleware.moon_agent:filter_factory
-authz_login=admin
-authz_password={{ ADMIN_PASS }}
-auth_host = {{ internal_vip.ip }}
-logfile=/var/log/moon/keystonemiddleware.log
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/wsgi-keystone.conf.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/wsgi-keystone.conf.j2
deleted file mode 100644
index 64d864af..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/wsgi-keystone.conf.j2
+++ /dev/null
@@ -1,46 +0,0 @@
- {% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
-<VirtualHost {{ internal_ip }}:5000>
- WSGIDaemonProcess keystone-public processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
- WSGIProcessGroup keystone-public
- WSGIScriptAlias / /usr/bin/keystone-wsgi-public
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- <IfVersion >= 2.4>
- ErrorLogFormat "%{cu}t %M"
- </IfVersion>
- ErrorLog /var/log/{{ http_service_name }}/keystone.log
- CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined
-
- <Directory /usr/bin>
- <IfVersion >= 2.4>
- Require all granted
- </IfVersion>
- <IfVersion < 2.4>
- Order allow,deny
- Allow from all
- </IfVersion>
- </Directory>
-</VirtualHost>
-
-<VirtualHost {{ internal_ip }}:35357>
- WSGIDaemonProcess keystone-admin processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
- WSGIProcessGroup keystone-admin
- WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- <IfVersion >= 2.4>
- ErrorLogFormat "%{cu}t %M"
- </IfVersion>
- ErrorLog /var/log/{{ http_service_name }}/keystone.log
- CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined
-
- <Directory /usr/bin>
- <IfVersion >= 2.4>
- Require all granted
- </IfVersion>
- <IfVersion < 2.4>
- Order allow,deny
- Allow from all
- </IfVersion>
- </Directory>
-</VirtualHost>
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/vars/Debian.yml
deleted file mode 100644
index 0da81179..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/vars/Debian.yml
+++ /dev/null
@@ -1,168 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-packages:
- - adduser
- - dbconfig-common
- - init-system-helpers
- - python-keystone
- - q-text-as-data
- - sqlite3
- - ssl-cert
- - debconf
- - lsb-base
- - python:any
- - libjs-sphinxdoc
- - python-pip
- - unzip
- - apache2
- - libapache2-mod-wsgi
-
-dependency_packages:
- - python-cryptography
- - python-dateutil
- - python-dogpile.cache
- - python-eventlet
- - python-greenlet
- - python-jsonschema
- - python-keystoneclient
- - python-keystonemiddleware
- - python-ldap
- - python-ldappool
- - python-lxml
- - python-memcache
- - python-migrate
- - python-msgpack
- - python-mysqldb
- - python-oauthlib
- - python-openstackclient
- - python-oslo.cache
- - python-oslo.concurrency
- - python-oslo.config
- - python-oslo.context
- - python-oslo.db
- - python-oslo.i18n
- - python-oslo.log
- - python-oslo.messaging
- - python-oslo.middleware
- - python-oslo.policy
- - python-oslo.serialization
- - python-oslo.service
- - python-oslo.utils
- - python-pam
- - python-passlib
- - python-paste
- - python-pastedeploy
- - python-pbr
- - python-pycadf
- - python-pymysql
- - python-pysaml2
- - python-pysqlite2
- - python-routes
- - python-six
- - python-sqlalchemy
- - python-stevedore
- - python-webob
- - unzip
- - python3-keystoneauth1
- - python3-keystoneclient
- - python3-oslo.config
- - python3-oslo.context
- - python3-oslo.i18n
- - python3-oslo.serialization
- - python-oslo.service
- - python-oslo.utils
- - python-pam
- - python-passlib
- - python-paste
- - python-pastedeploy
- - python-pbr
- - python-pycadf
- - python-pymysql
- - python-pysaml2
- - python-pysqlite2
- - python-routes
- - python-six
- - python-sqlalchemy
- - python-stevedore
- - python-webob
- - unzip
- - python3-keystoneauth1
- - python3-keystoneclient
- - python3-oslo.config
- - python3-oslo.context
- - python3-oslo.i18n
- - python3-oslo.serialization
- - python3-oslo.utils
- - apache2
- - libapache2-mod-wsgi
- - python3-cryptography
- - python3-dateutil
- - python3-dogpile.cache
- - python3-eventlet
- - python3-greenlet
- - python3-jsonschema
- - python3-keystoneclient
- - python3-keystonemiddleware
- - python3-lxml
- - python3-memcache
- - python3-migrate
- - python3-msgpack
- - python3-mysqldb
- - python3-oauthlib
- - python3-openstackclient
- - python3-oslo.cache
- - python3-oslo.concurrency
- - python3-oslo.config
- - python3-oslo.context
- - python3-oslo.db
- - python3-oslo.i18n
- - python3-oslo.log
- - python3-oslo.messaging
- - python3-oslo.middleware
- - python3-oslo.policy
- - python3-oslo.serialization
- - python3-oslo.service
- - python3-oslo.utils
- - python3-pam
- - python3-passlib
- - python3-paste
- - python3-pastedeploy
- - python3-pbr
- - python3-pycadf
- - python3-pymysql
- - python3-pysaml2
- - python3-routes
- - python3-six
- - python3-sqlalchemy
- - python3-stevedore
- - python3-webob
- - python3-oslo.service
- - python3-oslo.utils
- - python3-pam
- - python3-passlib
- - python3-paste
- - python3-pastedeploy
- - python3-pbr
- - python3-pycadf
- - python3-pymysql
- - python3-pysaml2
- - python3-routes
- - python3-six
- - python3-sqlalchemy
- - python3-stevedore
- - python3-webob
-
-services:
- - apache2
-
-
-apache_config_dir: /etc/apache2
-http_service_name: apache2
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/vars/main.yml
deleted file mode 100644
index cff8c7c2..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/vars/main.yml
+++ /dev/null
@@ -1,172 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch: []
-
-services_noarch: []
-
-os_services:
- - name: keystone
- type: identity
- region: RegionOne
- description: "OpenStack Identity"
- publicurl: "http://{{ public_vip.ip }}:5000/v2.0"
- internalurl: "http://{{ internal_vip.ip }}:5000/v2.0"
- adminurl: "http://{{ internal_vip.ip }}:35357/v2.0"
-
- - name: glance
- type: image
- region: RegionOne
- description: "OpenStack Image Service"
- publicurl: "http://{{ public_vip.ip }}:9292"
- internalurl: "http://{{ internal_vip.ip }}:9292"
- adminurl: "http://{{ internal_vip.ip }}:9292"
-
- - name: nova
- type: compute
- region: RegionOne
- description: "OpenStack Compute"
- publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
-
- - name: neutron
- type: network
- region: RegionOne
- description: "OpenStack Networking"
- publicurl: "http://{{ public_vip.ip }}:9696"
- internalurl: "http://{{ internal_vip.ip }}:9696"
- adminurl: "http://{{ internal_vip.ip }}:9696"
-
- - name: ceilometer
- type: metering
- region: RegionOne
- description: "OpenStack Telemetry"
- publicurl: "http://{{ public_vip.ip }}:8777"
- internalurl: "http://{{ internal_vip.ip }}:8777"
- adminurl: "http://{{ internal_vip.ip }}:8777"
-
- - name: aodh
- type: alarming
- region: RegionOne
- description: "OpenStack Telemetry"
- publicurl: "http://{{ public_vip.ip }}:8042"
- internalurl: "http://{{ internal_vip.ip }}:8042"
- adminurl: "http://{{ internal_vip.ip }}:8042"
-
-# - name: cinder
-# type: volume
-# region: RegionOne
-# description: "OpenStack Block Storage"
-# publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s"
-# internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
-# adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
-#
-# - name: cinderv2
-# type: volumev2
-# region: RegionOne
-# description: "OpenStack Block Storage v2"
-# publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s"
-# internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
-# adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
-
- - name: heat
- type: orchestration
- region: RegionOne
- description: "OpenStack Orchestration"
- publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
-
- - name: heat-cfn
- type: cloudformation
- region: RegionOne
- description: "OpenStack CloudFormation Orchestration"
- publicurl: "http://{{ public_vip.ip }}:8000/v1"
- internalurl: "http://{{ internal_vip.ip }}:8000/v1"
- adminurl: "http://{{ internal_vip.ip }}:8000/v1"
-
-# - name: swift
-# type: object-store
-# region: RegionOne
-# description: "OpenStack Object Storage"
-# publicurl: "http://{{ public_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-# internalurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-# adminurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-
-os_users:
- - user: admin
- password: "{{ ADMIN_PASS }}"
- email: admin@admin.com
- role: admin
- tenant: admin
- tenant_description: "Admin Tenant"
-
- - user: glance
- password: "{{ GLANCE_PASS }}"
- email: glance@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: nova
- password: "{{ NOVA_PASS }}"
- email: nova@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: keystone
- password: "{{ KEYSTONE_PASS }}"
- email: keystone@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: neutron
- password: "{{ NEUTRON_PASS }}"
- email: neutron@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: ceilometer
- password: "{{ CEILOMETER_PASS }}"
- email: ceilometer@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: cinder
- password: "{{ CINDER_PASS }}"
- email: cinder@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: heat
- password: "{{ HEAT_PASS }}"
- email: heat@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: demo
- password: ""
- email: heat@demo.com
- role: heat_stack_user
- tenant: demo
- tenant_description: "Demo Tenant"
-
-# - user: swift
-# password: "{{ CINDER_PASS }}"
-# email: swift@admin.com
-# role: admin
-# tenant: service
-# tenant_description: "Service Tenant"
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/handlers/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/handlers/main.yml
deleted file mode 100644
index ca4e8088..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/handlers/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart neutron compute service
- service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
-
-- name: restart nova-compute services
- service: name=nova-compute state=restarted enabled=yes
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/tasks/main.yml
deleted file mode 100644
index 375e325d..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/tasks/main.yml
+++ /dev/null
@@ -1,75 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: activate ipv4 forwarding
- sysctl: name=net.ipv4.ip_forward value=1
- state=present reload=yes
-
-- name: deactivate ipv4 rp filter
- sysctl: name=net.ipv4.conf.all.rp_filter value=0
- state=present reload=yes
-
-- name: deactivate ipv4 default rp filter
- sysctl: name=net.ipv4.conf.default.rp_filter
- value=0 state=present reload=yes
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install compute-related neutron packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: fix openstack neutron plugin config file
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
- systemctl daemon-reload
- when: ansible_os_family == 'RedHat'
-
-- name: fix openstack neutron plugin config file ubuntu
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
- when: ansible_os_family == "Debian"
-
-- name: generate neutron compute service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: config ml2 plugin
- template: src=templates/ml2_conf.ini
- dest=/etc/neutron/plugins/ml2/ml2_conf.ini
- backup=yes
-
-- name: ln plugin.ini
- file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
-
-- name: config neutron
- template: src=neutron.conf
- dest=/etc/neutron/neutron.conf backup=yes
- notify:
- - restart neutron compute service
- - restart nova-compute services
-
-- meta: flush_handlers
-
-- include: ../../neutron-network/tasks/odl.yml
- when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/vars/Debian.yml
deleted file mode 100644
index 83d7f323..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/vars/Debian.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-packages:
- - neutron-common
- - neutron-plugin-ml2
- - openvswitch-switch-dpdk
- - openvswitch-switch
- - neutron-plugin-openvswitch-agent
-
-services:
- - neutron-openvswitch-agent
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-controller/tasks/neutron_install.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-controller/tasks/neutron_install.yml
deleted file mode 100644
index 917a8356..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-controller/tasks/neutron_install.yml
+++ /dev/null
@@ -1,46 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install controller-related neutron packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: generate neutron control service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: get tenant id to fill neutron.conf
- shell:
- . /opt/admin-openrc.sh;
- openstack project show service | grep id | sed -n "2,1p" | awk '{print $4}'
- register: NOVA_ADMIN_TENANT_ID
-
-- name: update neutron conf
- template: src=templates/neutron.conf dest=/etc/neutron/neutron.conf backup=yes
-
-- name: update ml2 plugin conf
- template: src=templates/ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes
-
-- name: ln plugin.ini
- file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-network/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-network/tasks/main.yml
deleted file mode 100644
index 31f7f17c..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-network/tasks/main.yml
+++ /dev/null
@@ -1,117 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: activate ipv4 forwarding
- sysctl: name=net.ipv4.ip_forward value=1
- state=present reload=yes
-
-- name: deactivate ipv4 rp filter
- sysctl: name=net.ipv4.conf.all.rp_filter value=0
- state=present reload=yes
-
-- name: deactivate ipv4 default rp filter
- sysctl: name=net.ipv4.conf.default.rp_filter
- value=0 state=present reload=yes
-
-- name: assert kernel support for vxlan
- command: modinfo -F version vxlan
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-
-- name: assert iproute2 suppport for vxlan
- command: ip link add type vxlan help
- register: iproute_out
- failed_when: iproute_out.rc == 255
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install neutron network related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: generate neutron network service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: fix openstack neutron plugin config file
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
- systemctl daemon-reload
- when: ansible_os_family == 'RedHat'
-
-- name: fix openstack neutron plugin config file ubuntu
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
- when: ansible_os_family == "Debian"
-
-- name: config l3 agent
- template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini
- backup=yes
-
-- name: config dhcp agent
- template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini
- backup=yes
-
-- name: update dnsmasq-neutron.conf
- template: src=templates/dnsmasq-neutron.conf
- dest=/etc/neutron/dnsmasq-neutron.conf
-
-- name: config metadata agent
- template: src=metadata_agent.ini
- dest=/etc/neutron/metadata_agent.ini backup=yes
-
-- name: config ml2 plugin
- template: src=templates/ml2_conf.ini
- dest=/etc/neutron/plugins/ml2/ml2_conf.ini
- backup=yes
-
-- name: ln plugin.ini
- file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
-
-- name: config neutron
- template: src=templates/neutron.conf
- dest=/etc/neutron/neutron.conf backup=yes
-
-- name: force mtu to 1450 for vxlan
- lineinfile:
- dest: /etc/neutron/dnsmasq-neutron.conf
- regexp: '^dhcp-option-force'
- line: 'dhcp-option-force=26,1450'
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-
-- include: firewall.yml
- when: enable_fwaas == True
-
-- include: vpn.yml
- when: enable_vpnaas == True
-
-- include: odl.yml
- when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: restart neutron network relation service
- service: name={{ item }} state=restarted enabled=yes
- with_flattened:
- - services_noarch
- - services
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-network/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-network/vars/Debian.yml
deleted file mode 100644
index 1a78ca8c..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-network/vars/Debian.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - neutron-plugin-ml2
- - openvswitch-switch-dpdk
- - openvswitch-switch
- - neutron-l3-agent
- - neutron-dhcp-agent
- - neutron-plugin-openvswitch-agent
-
-services:
- - openvswitch-switch
- - neutron-openvswitch-agent
-
-openvswitch_agent: neutron-plugin-openvswitch-agent
-
-xorp_packages:
- - xorp
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/tasks/main.yml
deleted file mode 100644
index 16315b36..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/tasks/main.yml
+++ /dev/null
@@ -1,57 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install nova-compute related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: restart virtlogd
- service: name=virtlogd state=started enabled=yes
- when: ansible_os_family == "Debian"
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: get number of cpu support virtualization
- shell: egrep -c '(vmx|svm)' /proc/cpuinfo
- register: kvm_cpu_num
-
-- name: update nova-compute conf
- template: src={{ item }} dest=/etc/nova/{{ item }}
- with_items:
- - nova.conf
- - nova-compute.conf
- notify:
- - restart nova-compute services
-
-- name: generate neutron control service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-#'
-- name: remove nova sqlite db
- shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed
-
-- meta: flush_handlers
-
-- name: restart nova-compute and libvirt-bin
- shell: >
- service nova-compute restart;
- service libvirt-bin restart;
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/templates/nova-compute.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/templates/nova-compute.conf
deleted file mode 100644
index 305d408b..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/templates/nova-compute.conf
+++ /dev/null
@@ -1,11 +0,0 @@
-[DEFAULT]
-compute_driver=libvirt.LibvirtDriver
-force_raw_images = true
-[libvirt]
-{% if kvm_cpu_num.stdout_lines[0]|int == 0 %}
-virt_type=qemu
-{% else %}
-virt_type=kvm
-{% endif %}
-images_type = raw
-mem_stats_period_seconds=0
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/templates/nova.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/templates/nova.conf
deleted file mode 100644
index 8d7e9a5f..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/templates/nova.conf
+++ /dev/null
@@ -1,104 +0,0 @@
-[DEFAULT]
-transport_url = rabbit://{{ RABBIT_USER }}:{{ RABBIT_PASS }}@{{ rabbit_host }}
-auth_strategy = keystone
-my_ip = {{ internal_ip }}
-use_neutron = True
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-log-dir=/var/log/nova
-state_path=/var/lib/nova
-force_dhcp_release=True
-verbose={{ VERBOSE }}
-ec2_private_dns_show_ip=True
-enabled_apis=osapi_compute,metadata
-default_floating_pool={{ public_net_info.network }}
-metadata_listen={{ internal_ip }}
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-iscsi_helper=tgtadm
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-debug={{ DEBUG }}
-volumes_path=/var/lib/nova/volumes
-rpc_backend = rabbit
-osapi_compute_listen={{ internal_ip }}
-network_api_class = nova.network.neutronv2.api.API
-security_group_api = neutron
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-
-[api_database]
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova_api
-idle_timeout = 30
-pool_timeout = 10
-use_db_reconnect = True
-
-[database]
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-idle_timeout = 30
-pool_timeout = 10
-use_db_reconnect = True
-
-[glance]
-api_servers = http://{{ internal_vip.ip }}:9292
-host = {{ internal_vip.ip }}
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-project_name = service
-username = nova
-password = {{ NOVA_PASS }}
-
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-
-[libvirt]
-use_virtio_for_bridges=True
-
-[neutron]
-url = http://{{ internal_vip.ip }}:9696
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-region_name = RegionOne
-project_name = service
-username = neutron
-password = {{ NEUTRON_PASS }}
-service_metadata_proxy = True
-metadata_proxy_shared_secret = {{ METADATA_SECRET }}
-
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ internal_vip.ip }}:35357/v3
-
-[oslo_concurrency]
-lock_path=/var/lib/nova/tmp
-
-[oslo_messaging_rabbit]
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-[vnc]
-enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-[wsgi]
-api_paste_config=/etc/nova/api-paste.ini
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-controller/tasks/nova_config.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-controller/tasks/nova_config.yml
deleted file mode 100644
index f332c97a..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-controller/tasks/nova_config.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: nova api db sync
- shell: su -s /bin/sh -c "nova-manage api_db sync" nova
- ignore_errors: True
- notify:
- - restart nova service
-
-- name: nova db sync
- nova_manage: action=dbsync
- notify:
- - restart nova service
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/files/opendaylight.service b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/files/opendaylight.service
deleted file mode 100644
index 6c9e4c44..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/files/opendaylight.service
+++ /dev/null
@@ -1,21 +0,0 @@
-[Unit]
-Description=OpenDaylight
-After=
-
-
-[Service]
-User=root
-Group=root
-Type=simple
-EnvironmentFile=-/opt/moon-environment
-WorkingDirectory=/opt/opendaylight-0.3.0
-PermissionsStartOnly=true
-ExecStartPre=
-ExecStart=/usr/lib/jvm/java-8-oracle/bin/java -Djava.security.properties=/opt/opendaylight-0.3.0/etc/odl.java.security -server -Xms128M -Xmx2048m -XX:+UnlockDiagnosticVMOptions -XX:+UnsyncloadClass -XX:MaxPermSize=512m -Dcom.sun.management.jmxremote -Djava.endorsed.dirs=/usr/lib/jvm/java-8-oracle/jre/lib/endorsed:/usr/lib/jvm/java-8-oracle/lib/endorsed:/opt/opendaylight-0.3.0/lib/endorsed -Djava.ext.dirs=/usr/lib/jvm/java-8-oracle/jre/lib/ext:/usr/lib/jvm/java-8-oracle/lib/ext:/opt/opendaylight-0.3.0/lib/ext -Dkaraf.instances=/opt/opendaylight-0.3.0/instances -Dkaraf.home=/opt/opendaylight-0.3.0 -Dkaraf.base=/opt/opendaylight-0.3.0 -Dkaraf.data=/opt/opendaylight-0.3.0/data -Dkaraf.etc=/opt/opendaylight-0.3.0/etc -Djava.io.tmpdir=/opt/opendaylight-0.3.0/data/tmp -Djava.util.logging.config.file=/opt/opendaylight-0.3.0/etc/java.util.logging.properties -Dkaraf.startLocalConsole=false -Dkaraf.startRemoteShell=true -classpath /opt/opendaylight-0.3.0/lib/karaf-jaas-boot.jar:/opt/opendaylight-0.3.0/lib/karaf-jmx-boot.jar:/opt/opendaylight-0.3.0/lib/karaf-org.osgi.core.jar:/opt/opendaylight-0.3.0/lib/karaf.branding-1.2.2-Beryllium-SR2.jar:/opt/opendaylight-0.3.0/lib/karaf.jar org.apache.karaf.main.Main
-Restart=on-failure
-LimitNOFILE=65535
-TimeoutStopSec=15
-
-[Install]
-WantedBy=multi-user.target
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_00_download_packages.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_00_download_packages.yml
deleted file mode 100644
index efd359db..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_00_download_packages.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: download oracle-jdk8 package file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }}
-
-#"
-
-- name: download oracle-jdk8 script file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/
-
-#"
-
-- name: download odl package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/odl/{{ odl_pkg_url }}" dest=/opt/{{ odl_pkg_name }}
-
-# "
-
-- name: download odl pip package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/pip/{{ networking_odl_pkg_name }}" dest=/opt/{{ networking_odl_pkg_name }}
-
-#"
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml
deleted file mode 100644
index 8d71606f..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml
+++ /dev/null
@@ -1,53 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: opendaylight system file
- copy:
- src: "{{ service_file.src }}"
- dest: "{{ service_file.dst }}"
- mode: 0755
-
-- name: set l3 fwd enable in custom.properties
- template:
- src: custom.properties
- dest: "{{ odl_home }}/etc/custom.properties"
- owner: odl
- group: odl
- mode: 0775
- when: odl_l3_agent == "Enable"
-
-- name: create karaf config
- template:
- src: org.apache.karaf.features.cfg.Debian
- dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg"
- owner: odl
- group: odl
- mode: 0775
- when: ansible_os_family == "Debian"
-
-- name: create karaf config
- template:
- src: org.apache.karaf.features.cfg.Redhat
- dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg"
- owner: odl
- group: odl
- mode: 0775
- when: ansible_os_family == "RedHat"
-
-- name: create tomcat config
- template:
- src: tomcat-server.xml
- dest: "{{ odl_home }}/configuration/tomcat-server.xml"
-
-- name: create tomcat config
- template:
- src: jetty.xml
- dest: "{{ odl_home }}/etc/jetty.xml"
-
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_04_install_pip_packages.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_04_install_pip_packages.yml
deleted file mode 100644
index 869d264a..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_04_install_pip_packages.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: patch odl pip package
- shell: |
- cd /opt
- tar xf /opt/{{ networking_odl_pkg_name }}
- rm -rf /opt/{{ networking_odl_pkg_name }}
- sed -i 's/^neutron-lib.*/neutron-lib/' networking-odl-2.0.0/requirements.txt
- tar zcf /opt/{{ networking_odl_pkg_name }} networking-odl-2.0.0
- rm -rf networking-odl-2.0.0
- cd -
-
-- name: odl pip package install
- shell: |
- cd /opt
- pip install {{ networking_odl_pkg_name }}
- rm -rf {{ networking_odl_pkg_name }}
- cd -
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml
deleted file mode 100644
index f44b373b..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: turn off neutron-server neutron-plugins-openvswitch-agent Daemon on control node
- shell: >
- sed -i '/{{ service_ovs_agent_name }}/d' /opt/service ;
- sed -i '/neutron-server/d' /opt/service;
- sed -i '/keepalived/d' /opt/service;
-
-- name: turn off neutron-server on control node
- service: name=neutron-server state=stopped
-
-- name: turn off keepalived on control node
- service: name=keepalived state=stopped
- when: ansible_os_family == "Debian"
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_odl_controller.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_odl_controller.yml
deleted file mode 100644
index d78a76e0..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/01_odl_controller.yml
+++ /dev/null
@@ -1,47 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: download packages
- include: 01_00_download_packages.yml
-
-- name: create odl user and group
- include: 01_01_create_odl_user_and_group.yml
-
-- name: unarchive odl and jdk
- include: 01_02_unarchive_odl_and_jdk.yml
-
-- name: copy odl configuration files
- include: 01_03_copy_odl_configuration_files.yml
-
-- name: install pip packages
- include: 01_04_install_pip_packages.yml
-
-- name: clean up karaf data
- include: 01_05_clean_up_karaf_data.yml
-
-- name: stop openstack services
- include: 01_06_stop_openstack_services.yml
-
-- name: set opendaylight cluster
- include: 05_set_opendaylight_cluster.yml
- when: groups['odl']|length > 1
-
-- name: install moon
- include: moon-odl.yml
- when: moon == "Enable"
-
-- name: start and check odl
- include: 01_07_start_check_odl.yml
-
-- name: run openswitch
- include: 03_openvswitch.yml
-
-- name: configure neutron
- include: 01_08_configure_neutron.yml
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
deleted file mode 100644
index 04f0ec61..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: restart keepalived to recover external IP before check br-int
- shell: service keepalived restart
- when: inventory_hostname in groups['odl']
- ignore_errors: True
-
-- name: restart opendaylight (for newton, opendaylight doesn't listen 6640 port, need restart)
- shell: service opendaylight restart; sleep 60
- when: inventory_hostname in groups['odl']
- ignore_errors: True
-
-- name: set opendaylight as the manager
- command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ internal_vip.ip }}:6640;"
-
-- name: check br-int
- shell: ovs-vsctl list-br | grep br-int; while [ $? -ne 0 ]; do sleep 10; ovs-vsctl list-br | grep br-int; done
-
-- name: set local ip in openvswitch
- shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config={'local_ip'=' {{ internal_ip }} '};
-
-#'
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml
deleted file mode 100644
index 7eddf7fa..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: combine odl controller
- shell: rm -f /opt/cluster; touch /opt/cluster;
-
-- name: combine odl controller
- shell: echo "{{ ip_settings[item.1]['mgmt']['ip'] }} \c" >> /opt/cluster; >> /opt/cluster;
- with_indexed_items: groups['odl']
-
-- name: combine odl controller
- shell: cat /opt/cluster
- register: cluster
-
-#- debug: msg="{{ cluster.stdout_lines[0] }}"
-
-- name: combine odl controller
- shell: uname -n | cut -b 5,5
- register: number
-
-#- debug: msg="{{ number.stdout_lines[0] }}"
-
-- debug: msg="{{ odl_home }}/bin/configure_cluster.sh {{ number.stdout_lines[0] }} {{ cluster.stdout_lines[0] }}"
-
-- name: configure odl controller in cluster
- shell: "{{ odl_home }}/bin/configure_cluster.sh {{ number.stdout_lines[0] }} {{ cluster.stdout_lines[0] }}"
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/main.yml
deleted file mode 100644
index 32952c51..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/main.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
- tags:
- - test_odl
-
-- name: Provision Common on all nodes
- include: 00_odl_common.yml
- when: groups['odl']|length !=0
-
-- name: Provision ODL on Controller nodes
- include: 01_odl_controller.yml
- when: inventory_hostname in groups['odl']
-
-- name: Provision ODL on Compute nodes
- include: 02_odl_compute.yml
- when: groups['odl']|length !=0 and inventory_hostname not in groups['odl']
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/moon-odl.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/moon-odl.yml
deleted file mode 100644
index b89b2823..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/moon-odl.yml
+++ /dev/null
@@ -1,61 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: delete data journal snapshots
- shell: rm -rf {{ odl_home }}/{{ item }}
- with_items:
- - journal
- - data
- - snapshots
-
-- name: remove aaa feature
- shell: rm -rf {{ odl_home }}/system/org/opendaylight/aaa/
-
-- name: download apache maven package file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/moon/apache-maven-3.3.9-bin.tar.gz" dest=/opt/apache-maven-3.3.9-bin.tar.gz
-
-- name: create maven folder
- shell: mkdir -p /opt/apache-maven-3.3.9/
-
-- name: extract maven
- command: su -s /bin/sh -c "tar zxf /opt/apache-maven-3.3.9-bin.tar.gz -C /opt/apache-maven-3.3.9/ --strip-components 1 --no-overwrite-dir -k --skip-old-files" root
-
-- name: install maven
- shell: ln -s /opt/apache-maven-3.3.9/bin/mvn /usr/local/bin/mvn;
-
-- name: create m2 directory
- file: path=/root/.m2/ state=directory mode=0755
-
-- name: copy settings.xml
- template: src=settings.xml dest=/root/.m2/settings.xml
-
-#- name: upload swift lib
-# unarchive: src=odl-aaa-moon.tar.gz dest=/home/
-
-- name: download odl-aaa-moon package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/moon/{{ odl_aaa_moon }}" dest=/home/
-
-- name: unarchive odl-aaa-moon package
- command: su -s /bin/sh -c "tar xvf /home/{{ odl_aaa_moon }} -C /home/"
-
-- name: install aaa
- shell: >
- export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/usr/lib/jvm/java-8-oracle/bin:/opt/apache-maven-3.3.3/bin";
- export JAVA_HOME="/usr/lib/jvm/java-8-oracle";
- export _JAVA_OPTIONS="-Djava.net.preferIPv4Stack=true";
- export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=512m";
- cd /home/odl-aaa-moon/aaa/;
- mvn clean install -DskipTests;
-
-- name: remove shiro ini
- shell: rm -f {{ odl_home }}/etc/shiro.ini
-
-- name: set moon env
- template: src=moon-environment dest=/opt/moon-environment
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/jetty.xml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/jetty.xml
deleted file mode 100755
index 50ac7c35..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/jetty.xml
+++ /dev/null
@@ -1,88 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied. See the License for the
- specific language governing permissions and limitations
- under the License.
--->
-<!DOCTYPE Configure PUBLIC "-//Mort Bay Consulting//
-DTD Configure//EN" "http://jetty.mortbay.org/configure.dtd">
-
-<Configure class="org.eclipse.jetty.server.Server">
-
- <!-- =========================================================== -->
- <!-- Set connectors -->
- <!-- =========================================================== -->
- <!-- One of each type! -->
- <!-- =========================================================== -->
-
- <!-- Use this connector for many frequently idle connections and for
- threadless continuations. -->
- <Call name="addConnector">
- <Arg>
- <New class="org.eclipse.jetty.server.nio.SelectChannelConnector">
- <Set name="host">
- <Property name="jetty.host"/>
- </Set>
- <Set name="port">
- <Property name="jetty.port" default="8181" />
- </Set>
- <Set name="maxIdleTime">300000</Set>
- <Set name="Acceptors">2</Set>
- <Set name="statsOn">false</Set>
- <Set name="confidentialPort">8543</Set>
- <Set name="lowResourcesConnections">20000</Set>
- <Set name="lowResourcesMaxIdleTime">5000</Set>
- </New>
- </Arg>
- </Call>
-
- <!-- =========================================================== -->
- <!-- Configure Authentication Realms -->
- <!-- Realms may be configured for the entire server here, or -->
- <!-- they can be configured for a specific web app in a context -->
- <!-- configuration (see $(jetty.home)/contexts/test.xml for an -->
- <!-- example). -->
- <!-- =========================================================== -->
- <Call name="addBean">
- <Arg>
- <New class="org.eclipse.jetty.plus.jaas.JAASLoginService">
- <Set name="name">karaf</Set>
- <Set name="loginModuleName">karaf</Set>
- <Set name="roleClassNames">
- <Array type="java.lang.String">
- <Item>org.apache.karaf.jaas.boot.principal.RolePrincipal
- </Item>
- </Array>
- </Set>
- </New>
- </Arg>
- </Call>
- <Call name="addBean">
- <Arg>
- <New class="org.eclipse.jetty.plus.jaas.JAASLoginService">
- <Set name="name">default</Set>
- <Set name="loginModuleName">karaf</Set>
- <Set name="roleClassNames">
- <Array type="java.lang.String">
- <Item>org.apache.karaf.jaas.boot.principal.RolePrincipal
- </Item>
- </Array>
- </Set>
- </New>
- </Arg>
- </Call>
-
-</Configure>
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/ml2_conf.sh b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/ml2_conf.sh
deleted file mode 100755
index 5e3627bf..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/ml2_conf.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-cat <<EOT>> /etc/neutron/plugins/ml2/ml2_conf.ini
-[ml2_odl]
-password = admin
-username = admin
-url = http://{{ internal_vip.ip }}:8181/controller/nb/v2/neutron
-EOT
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/moon-environment b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/moon-environment
deleted file mode 100644
index 9a13da8e..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/moon-environment
+++ /dev/null
@@ -1,3 +0,0 @@
-MOON_SERVER_ADDR={{ internal_vip.ip }}
-MOON_SERVER_PORT=5000
-no_proxy="localhost,127.0.0.1"
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/settings.xml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/settings.xml
deleted file mode 100644
index 5ba3b50c..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/settings.xml
+++ /dev/null
@@ -1,82 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=2 tabstop=2: -->
-<!--
- Copyright (c) 2014, 2015 Cisco Systems, Inc. and others. All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
- <localRepository>{{ odl_home }}/system/ </localRepository>
- <profiles>
- <profile>
- <id>opendaylight-release</id>
- <repositories>
- <repository>
- <id>opendaylight-mirror</id>
- <name>opendaylight-mirror</name>
- <url>https://nexus.opendaylight.org/content/repositories/public/</url>
- <releases>
- <enabled>true</enabled>
- <updatePolicy>never</updatePolicy>
- </releases>
- <snapshots>
- <enabled>false</enabled>
- </snapshots>
- </repository>
- </repositories>
- <pluginRepositories>
- <pluginRepository>
- <id>opendaylight-mirror</id>
- <name>opendaylight-mirror</name>
- <url>https://nexus.opendaylight.org/content/repositories/public/</url>
- <releases>
- <enabled>true</enabled>
- <updatePolicy>never</updatePolicy>
- </releases>
- <snapshots>
- <enabled>false</enabled>
- </snapshots>
- </pluginRepository>
- </pluginRepositories>
- </profile>
-
- <profile>
- <id>opendaylight-snapshots</id>
- <repositories>
- <repository>
- <id>opendaylight-snapshot</id>
- <name>opendaylight-snapshot</name>
- <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
- <releases>
- <enabled>false</enabled>
- </releases>
- <snapshots>
- <enabled>true</enabled>
- </snapshots>
- </repository>
- </repositories>
- <pluginRepositories>
- <pluginRepository>
- <id>opendaylight-snapshot</id>
- <name>opendaylight-snapshot</name>
- <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
- <releases>
- <enabled>false</enabled>
- </releases>
- <snapshots>
- <enabled>true</enabled>
- </snapshots>
- </pluginRepository>
- </pluginRepositories>
- </profile>
- </profiles>
-
- <activeProfiles>
- <activeProfile>opendaylight-release</activeProfile>
- <activeProfile>opendaylight-snapshots</activeProfile>
- </activeProfiles>
-</settings>
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/tomcat-server.xml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/tomcat-server.xml
deleted file mode 100755
index bc7ab13d..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/tomcat-server.xml
+++ /dev/null
@@ -1,61 +0,0 @@
-<?xml version='1.0' encoding='utf-8'?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<Server>
- <!--APR library loader. Documentation at /docs/apr.html -->
- <Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
- <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html -->
- <Listener className="org.apache.catalina.core.JasperListener" />
- <!-- Prevent memory leaks due to use of particular java/javax APIs-->
- <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
- <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
- <Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />
-
- <Service name="Catalina">
- <Connector port="{{ odl_api_port }}" protocol="HTTP/1.1"
- connectionTimeout="20000"
- redirectPort="8443" />
-
-<!--
- Please remove the comments around the following Connector tag to enable HTTPS Authentication support.
- Remember to add a valid keystore in the configuration folder.
- More info : http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
--->
-
- <!--
- <Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true"
- maxThreads="150" scheme="https" secure="true"
- clientAuth="false" sslProtocol="TLS"
- keystoreFile="configuration/keystore"
- keystorePass="changeit"/>
- -->
-
- <Engine name="Catalina" defaultHost="localhost">
- <Host name="localhost" appBase=""
- unpackWARs="false" autoDeploy="false"
- deployOnStartup="false" createDirs="false">
- <Realm className="org.opendaylight.controller.karafsecurity.ControllerCustomRealm" />
- <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
- <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
- prefix="web_access_log_" suffix=".txt" resolveHosts="false"
- rotatable="true" fileDateFormat="yyyy-MM"
- pattern="%{yyyy-MM-dd HH:mm:ss.SSS z}t - [%a] - %r"/>
- </Host>
- </Engine>
- </Service>
-</Server>
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/vars/Debian.yml
deleted file mode 100755
index 640a264a..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/vars/Debian.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-common_packages:
- - crudini
-
-service_ovs_name: openvswitch-switch
-service_ovs_agent_name: neutron-openvswitch-agent
-
-service_file:
- src: opendaylight.service
- dst: /lib/systemd/system/opendaylight.service
-
-networking_odl_pkg_name: networking-odl-2.0.0.tar.gz
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/vars/main.yml
deleted file mode 100755
index e5f52b42..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/vars/main.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-odl_username: admin
-odl_password: admin
-odl_api_port: 8181
-
-#odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.3.0-Lithium/distribution-karaf-0.3.0-Lithium.tar.gz
-odl_pkg_url: karaf.tar.gz
-odl_pkg_name: karaf.tar.gz
-odl_home: "/opt/opendaylight-0.3.0/"
-odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'odl-restconf','odl-l2switch-switch','odl-openflowplugin-all','odl-mdsal-apidocs','odl-dlux-all','odl-adsal-northbound','odl-nsf-all','odl-ovsdb-openstack','odl-ovsdb-northbound','odl-dlux-core']
-odl_extra_features: ['odl-restconf-all','odl-mdsal-clustering','odl-openflowplugin-flow-services','http','jolokia-osgi']
-odl_features: "{{ odl_base_features + odl_extra_features }}"
-
-odl_aaa_moon: odl-aaa-moon.tar.gz
-
-jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz
-jdk8_script_name: install_jdk8.tar
-
-common_packages_noarch: []
-
-odl_pip:
- - networking_odl
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/handlers/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/handlers/main.yml
deleted file mode 100755
index e099fcf4..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/handlers/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart onos service
- service: name=onos state=restarted enabled=yes
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/main.yml
deleted file mode 100755
index c8ce1155..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/main.yml
+++ /dev/null
@@ -1,51 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: remove neutron-plugin-openvswitch-agent auto start
- shell: >
- update-rc.d neutron-plugin-openvswitch-agent remove;
- sed -i /neutron-plugin-openvswitch-agent/d /opt/service
- when: groups['onos']|length !=0
- ignore_errors: True
-
-- name: shut down and disable Neutron's agent services
- service: name=neutron-plugin-openvswitch-agent state=stopped
- when: groups['onos']|length !=0
- ignore_errors: True
-
-- name: remove neutron-l3-agent auto start
- shell: >
- update-rc.d neutron-l3-agent remove;
- sed -i /neutron-l3-agent/d /opt/service
- when: inventory_hostname in groups['onos']
- ignore_errors: True
-
-- name: shut down and disable Neutron's l3 agent services
- service: name=neutron-l3-agent state=stopped
- when: inventory_hostname in groups['onos']
- ignore_errors: True
-
-- name: Stop the Open vSwitch service and clear existing OVSDB
- shell: >
- ovs-vsctl del-br br-int ;
- ovs-vsctl del-br br-tun ;
- ovs-vsctl del-manager ;
- ip link delete onos_port1 type veth peer name onos_port2;
- when: groups['onos']|length !=0
- ignore_errors: True
-
-- name: Install ONOS Cluster on Controller
- include: onos_controller.yml
- when: inventory_hostname in groups['onos']
-
-- name: Config ONOS Cluster
- include: openvswitch.yml
- when: groups['onos']|length !=0
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/onos_controller.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/onos_controller.yml
deleted file mode 100755
index d51151a9..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/onos_controller.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: download onos driver packages
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_driver }}" dest=/opt/
-
-- name: upload onos sfc driver package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_sfc_driver }}" dest=/opt/
-
-- name: unarchive onos driver package
- command: su -s /bin/sh -c "tar xvf /opt/networking-onos.tar -C /opt/"
-
-- name: upload onos sfc driver package
- command: su -s /bin/sh -c "tar xvf /opt/networking-sfc.tar -C /opt/"
-
-- name: install onos driver
- command: su -s /bin/sh -c "/opt/networking-onos/install_driver.sh"
-
-- name: install onos sfc driver
- command: su -s /bin/sh -c "/opt/networking-sfc/install_driver.sh"
-
-- name: install onos required packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages
-
-- name: download oracle-jdk8 package file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }}
-
-- name: download oracle-jdk8 script file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/
-
-- name: unarchive onos driver package
- command: su -s /bin/sh -c "tar xvf /opt/install_jdk8.tar -C /opt/"
-
-- name: install install_jdk8 package
- command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh"
-
-- name: create JAVA_HOME environment variable
- shell: >
- export J2SDKDIR=/usr/lib/jvm/java-8-oracle;
- export J2REDIR=/usr/lib/jvm/java-8-oracle/jre;
- export PATH=$PATH:/usr/lib/jvm/java-8-oracle/bin:/usr/lib/jvm/java-8-oracle/db/bin:/usr/lib/jvm/java-8-oracle/jre/bin;
- export JAVA_HOME=/usr/lib/jvm/java-8-oracle;
- export DERBY_HOME=/usr/lib/jvm/java-8-oracle/db;
-
-- name: create onos group
- group: name=onos system=yes state=present
-
-- name: create onos user
- user:
- name: onos
- group: onos
- home: "{{ onos_home }}"
- createhome: "yes"
- system: "yes"
- shell: "/bin/false"
-
-- name: download onos package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_pkg_name }}" dest=/opt/{{ onos_pkg_name }}
-
-- name: create new jar repository
- command: su -s /bin/sh -c "mkdir ~/.m2"
- ignore_errors: True
-
-- name: download jar repository
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ repository }}" dest=~/.m2/
-
-- name: extract jar repository
- command: su -s /bin/sh -c "tar xvf ~/.m2/repository.tar -C ~/.m2/"
-
-- name: extract onos package
- command: su -s /bin/sh -c "tar xzf /opt/{{ onos_pkg_name }} -C {{ onos_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" onos
-
-- name: configure onos service
- shell: >
- echo 'export ONOS_OPTS=debug' > {{ onos_home }}/options;
- echo 'export ONOS_USER=root' >> {{ onos_home }}/options;
- mkdir {{ onos_home }}/var;
- mkdir {{ onos_home }}/config;
- sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' {{ onos_home }}/init/onos.conf;
- cp -rf {{ onos_home }}/init/onos.conf /etc/init/;
- cp -rf {{ onos_home }}/init/onos.conf /etc/init.d/;
-
-- name: configure onos boot feature
- shell: >
- sed -i '/^featuresBoot=/c\featuresBoot={{ onos_boot_features }}' {{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg;
-
-- name: wait for config time
- shell: "sleep 10"
-
-- name: start onos service
- service: name=onos state=started enabled=yes
-
-- name: wait for onos start time
- shell: "sleep 200"
-
-- name: add onos auto start
- shell: >
- echo "onos">>/opt/service
-
-##########################################################################################################
-################################ ONOS connect with OpenStack ################################
-##########################################################################################################
-- name: Configure Neutron1
- shell: >
- crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_sfc.services.sfc.plugin.SfcPlugin, networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin, onos_router;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers onos_ml2;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers vxlan
-
-- name: Create ML2 Configuration File
- template:
- src: ml2_conf.sh
- dest: "/opt/ml2_conf.sh"
- mode: 0777
-
-- name: Configure Neutron2
- command: su -s /bin/sh -c "/opt/ml2_conf.sh;"
-
-- name: Configure Neutron3
- shell: >
- mysql -e "drop database if exists neutron_ml2;";
- mysql -e "create database neutron_ml2 character set utf8;";
- mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';";
- su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron;
- su -s /bin/sh -c "neutron-db-manage --subproject networking-sfc upgrade head" neutron;
-
-- name: Restart neutron-server
- service: name=neutron-server state=restarted
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/openvswitch.yml
deleted file mode 100755
index aac787ea..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/openvswitch.yml
+++ /dev/null
@@ -1,57 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: set veth port
- shell: >
- ip link add onos_port1 type veth peer name onos_port2;
- ifconfig onos_port1 up;
- ifconfig onos_port2 up;
- ignore_errors: True
-
-- name: add openflow-base feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow-base'";
- when: inventory_hostname in groups['onos']
-
-- name: add openflow feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow'";
- when: inventory_hostname in groups['onos']
-
-- name: add ovsdatabase feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdatabase'";
- when: inventory_hostname in groups['onos']
-
-- name: add ovsdb-base feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-base'";
- when: inventory_hostname in groups['onos']
-
-- name: add onos driver ovsdb feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-drivers-ovsdb'";
- when: inventory_hostname in groups['onos']
-
-- name: add ovsdb provider host feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-provider-host'";
- when: inventory_hostname in groups['onos']
-
-- name: add vtn feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-app-vtn-onosfw'";
- when: inventory_hostname in groups['onos']
-
-- name: set public eth card start
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'externalportname-set -n onos_port2'"
- when: inventory_hostname in groups['onos']
-
-- name: Set ONOS as the manager
- command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:6640;"
-
-- name: delete default gateway
- shell: >
- route delete default;
- when: inventory_hostname not in groups['onos']
- ignore_errors: True
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/templates/ml2_conf.sh b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/templates/ml2_conf.sh
deleted file mode 100755
index 8af03df4..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/templates/ml2_conf.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-cat <<EOT>> /etc/neutron/plugins/ml2/ml2_conf.ini
-[onos]
-password = admin
-username = admin
-url_path = http://{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:8181/onos/vtn
-EOT
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/Debian.yml
deleted file mode 100755
index 59a4dbd9..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/Debian.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - software-properties-common
- - crudini
-
-services: []
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/RedHat.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/RedHat.yml
deleted file mode 100755
index 59a4dbd9..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/RedHat.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - software-properties-common
- - crudini
-
-services: []
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/main.yml
deleted file mode 100755
index f11f1102..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-onos_pkg_name: onos-1.6.0.tar.gz
-onos_home: /opt/onos/
-karaf_dist: apache-karaf-3.0.5
-jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz
-jdk8_script_name: install_jdk8.tar
-onos_driver: networking-onos.tar
-onos_sfc_driver: networking-sfc.tar
-repository: repository.tar
-onos_boot_features: config,standard,region,package,kar,ssh,management,webconsole,onos-api,onos-core,onos-incubator,onos-cli,onos-rest,onos-gui,onos-openflow-base, onos-openflow, onos-ovsdatabase, onos-ovsdb-base, onos-drivers-ovsdb, onos-ovsdb-provider-host, onos-app-vtn-onosfw
-
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/open-contrail/tasks/uninstall-openvswitch.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/open-contrail/tasks/uninstall-openvswitch.yml
deleted file mode 100755
index 836cb78b..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/open-contrail/tasks/uninstall-openvswitch.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: del ovs bridge
- shell: ovs-vsctl del-br br-int; ovs-vsctl del-br br-tun; ovs-vsctl del-br br-prv;
-
-- name: remove ovs and ovs-plugin daeman
- shell: >
- sed -i '/neutron-openvswitch-agent/d' /opt/service ;
- sed -i '/openvswitch-switch/d' /opt/service ;
-
-- name: stop ovs and ovs-plugin
- shell: service openvswitch-switch stop; service neutron-openvswitch-agent stop;
-
-- name: remove ovs and ovs-plugin files
- shell: >
- update-rc.d -f neutron-openvswitch-agent remove;
- mv /etc/init.d/neutron-openvswitch-agent /home/neutron-openvswitch-agent;
- mv /etc/init/neutron-openvswitch-agent.conf /home/neutron-openvswitch-agent.conf;
- update-rc.d -f openvswitch-switch remove ;
- mv /etc/init.d/openvswitch-switch /home/openvswitch-switch ;
- mv /etc/init/openvswitch-switch.conf /home/openvswitch-switch.conf ;
- update-rc.d -f neutron-ovs-cleanup remove ;
- mv /etc/init.d/neutron-ovs-cleanup /home/neutron-ovs-cleanup ;
- mv /etc/init/neutron-ovs-cleanup.conf /home/neutron-ovs-cleanup.conf ;
-
-- name: remove ovs kernel module
- shell: rmmod vport_vxlan; rmmod openvswitch;
- ignore_errors: True
-
-- name: copy recovery script
- copy: src={{ item }} dest=/opt/setup_networks
- with_items:
-# - recover_network_opencontrail.py
- - setup_networks_opencontrail.py
-
-#- name: recover external script
-# shell: python /opt/setup_networks/recover_network_opencontrail.py
-
-- name: modify net-init
- shell: sed -i 's/setup_networks.py/setup_networks_opencontrail.py/g' /etc/init.d/net_init
-
-- name: resolve dual NIC problem
- shell: >
- echo "net.ipv4.conf.all.arp_ignore=1" >> /etc/sysctl.conf ;
- /sbin/sysctl -p ;
- echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore ;
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/templates/neutron.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/templates/neutron.j2
deleted file mode 100644
index e7107660..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/templates/neutron.j2
+++ /dev/null
@@ -1,7 +0,0 @@
-[securitygroup]
-firewall_driver = neutron.agent.firewall.NoopFirewallDriver
-enable_security_group = True
-
-[agent]
-prevent_arp_spoofing = False
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/templates/nova.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/templates/nova.j2
deleted file mode 100644
index 7dbc216a..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/templates/nova.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-[DEFAULT]
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/vars/Debian.yml
deleted file mode 100644
index 221a3d92..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/vars/Debian.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-configs_templates:
- - src: nova.j2
- dest:
- - /etc/nova/nova.conf
- - src: neutron.j2
- dest:
- - /etc/neutron/plugins/ml2/ml2_conf.ini
- - /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
- - /etc/neutron/plugins/ml2/restproxy.ini
-
-controller_services:
- - nova-api
- - nova-cert
- - nova-conductor
- - nova-consoleauth
- - nova-novncproxy
- - nova-scheduler
- - neutron-server
- - neutron-openvswitch-agent
- - neutron-l3-agent
- - neutron-dhcp-agent
- - neutron-metadata-agent
-
-compute_services:
- - nova-compute
- - neutron-openvswitch-agent
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/setup-network/files/setup_networks/net_init b/deploy/adapters/ansible/openstack_newton_xenial/roles/setup-network/files/setup_networks/net_init
deleted file mode 100755
index 41ccb988..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/setup-network/files/setup_networks/net_init
+++ /dev/null
@@ -1,24 +0,0 @@
-#! /bin/sh
-### BEGIN INIT INFO
-# Provides: anamon.init
-# Required-Start: $network
-# Required-Stop:
-# Should-Start:
-# Should-Stop:
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: Starts the cobbler anamon boot notification program
-# Description: anamon runs the first time a machine is booted after installation.
-### END INIT INFO
-
-
-
-#
-# anamon.init: Starts the cobbler post-install boot notification program
-#
-# chkconfig: 35 0 6
-#
-# description: anamon runs the first time a machine is booted after
-# installation.
-#
-python /opt/setup_networks/setup_networks.py
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/storage/files/storage b/deploy/adapters/ansible/openstack_newton_xenial/roles/storage/files/storage
deleted file mode 100755
index 3acc6115..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/storage/files/storage
+++ /dev/null
@@ -1,10 +0,0 @@
-#! /bin/bash
-### BEGIN INIT INFO
-# Provides: Storage
-# Required-Start: $remote_fs $network
-# Required-Stop: $remote_fs $network
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Description: Storage
-### END INIT INFO
-loop_dev=`sh /opt/setup_storage/losetup.sh`
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/main.yml
deleted file mode 100644
index 0f083146..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include: swift.yml
- when: moon == "Enable"
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-compute1.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-compute1.yml
deleted file mode 100644
index be00484b..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-compute1.yml
+++ /dev/null
@@ -1,80 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install swift-compute packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: compute_packages | union(compute_packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: format devices
- shell: >
- dd if=/dev/zero of=/var/swift1 bs=1G count=10;
- dd if=/dev/zero of=/var/swift2 bs=1G count=10;
- mkfs.xfs /var/swift1;
- mkfs.xfs /var/swift2;
-
-- name: create mount point dirertory
- shell: >
- mkdir -p /srv/node/swift1;
- mkdir -p /srv/node/swift2;
-
-- name: edit /etc/fstab
- shell: >
- echo "/var/swift1 /srv/node/swift1/ xfs noatime,nodiratime,nobarrier,logbufs=8 0 2" >> /etc/fstab;
- echo "/var/swift2 /srv/node/swift2/ xfs noatime,nodiratime,nobarrier,logbufs=8 0 2" >> /etc/fstab;
- mount /srv/node/swift1;
- mount /srv/node/swift2;
-
-- name: edit /etc/default/rsync
- shell: sed -i 's/RSYNC_ENABLE=false/RSYNC_ENABLE=true/g' /etc/default/rsync
-
-- name: restart rsync service
- service: name=rsync state=restarted enabled=yes
-
-- name: copy scripts
- template: src={{ item }} dest=/etc/swift/ backup=yes
- with_items:
- - account-server.conf
- - container-server.conf
- - object-server.conf
-
-- name: change directory
- shell: >
- chown -R swift:swift /srv/node;
- mkdir -p /var/cache/swift;
- chown -R root:swift /var/cache/swift;
- chmod -R 775 /var/cache/swift;
-
-#- name: copy swift lib
-# copy: src=swift-lib.tar.gz dest=/tmp/swift-lib.tar.gz
-#
-#- name: upload swift lib
-# unarchive: src=swift-lib.tar.gz dest=/tmp/
-#
-#- name: copy swift lib
-# shell: command: su -s /bin/sh -c "cp /tmp/swift-lib/* /usr/lib/"
-#
-#- name: untar swift lib
-# shell: >
-# tar zxf /tmp/swift-lib.tar.gz;
-# cp /tmp/swift-lib/* /usr/lib/;
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-controller1.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-controller1.yml
deleted file mode 100644
index 36d05040..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-controller1.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install swift-controllor packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: controller_packages | union(controller_packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: make swift directory
- file: path=/etc/swift state=directory mode=0755
-
-- name: update proxy-server conf
- template: src=proxy-server.conf dest=/etc/swift/proxy-server.conf backup=yes
-
-
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-controller2.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-controller2.yml
deleted file mode 100644
index 92d4ab22..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-controller2.yml
+++ /dev/null
@@ -1,93 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: create account.builder file
- shell: >
- cd /etc/swift ;
- swift-ring-builder account.builder create 10 3 1;
-
-- name: add each storage node to the ring
- shell: >
- cd /etc/swift;
- swift-ring-builder account.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6002 --device swift1 --weight 100 ;
- swift-ring-builder account.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6002 --device swift2 --weight 100 ;
- with_indexed_items: groups['compute']
-
-- name: verify the ring contents 1
- shell: >
- cd /etc/swift;
- swift-ring-builder account.builder;
-
-- name: rebalance the ring
- shell: >
- cd /etc/swift;
- swift-ring-builder account.builder rebalance;
-
-
-#####################
-- name: create contrainer builder file
- shell: >
- cd /etc/swift;
- swift-ring-builder container.builder create 10 3 1;
-
-- name: add each storage node to the ring
- shell: >
- cd /etc/swift;
- swift-ring-builder container.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6001 --device swift1 --weight 100;
- swift-ring-builder container.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6001 --device swift2 --weight 100;
- with_indexed_items: groups['compute']
-
-- name: verify the ring contents 2
- shell: >
- cd /etc/swift;
- swift-ring-builder container.builder;
-
-- name: rebalance the ring
- shell: >
- cd /etc/swift;
- swift-ring-builder container.builder rebalance;
-
-#############################
-
-- name: create object builder file
- shell: >
- cd /etc/swift;
- swift-ring-builder object.builder create 10 3 1;
-
-- name: add each storage node to the ring
- shell: >
- cd /etc/swift;
- swift-ring-builder object.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6000 --device swift1 --weight 100;
- swift-ring-builder object.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6000 --device swift2 --weight 100;
- with_indexed_items: groups['compute']
-
-- name: verify the ring contents
- shell: >
- cd /etc/swift;
- swift-ring-builder object.builder;
-
-- name: rebalance the ring
- shell: >
- cd /etc/swift;
- swift-ring-builder object.builder rebalance;
-
-##########################
-
-- name: distribute ring configuration files to the other controller
- shell: >
- cd /etc/swift;
- scp account.ring.gz container.ring.gz object.ring.gz root@{{ ip_settings[item.1]['mgmt']['ip'] }}:/etc/swift/;
- with_indexed_items: groups['controller']
-
-- name: distribute ring configuration files to the all compute
- shell: >
- cd /etc/swift;
- scp account.ring.gz container.ring.gz object.ring.gz root@{{ ip_settings[item.1]['mgmt']['ip'] }}:/etc/swift/;
- with_indexed_items: groups['compute']
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/account-server.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/account-server.conf
deleted file mode 100644
index ea84799f..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/account-server.conf
+++ /dev/null
@@ -1,200 +0,0 @@
-[DEFAULT]
-bind_ip = {{ internal_ip }}
-bind_port = 6002
-# bind_timeout = 30
-# backlog = 4096
-user = swift
-swift_dir = /etc/swift
-devices = /srv/node
-mount_check = true
-# disable_fallocate = false
-#
-# Use an integer to override the number of pre-forked processes that will
-# accept connections.
-# workers = auto
-#
-# Maximum concurrent requests per worker
-# max_clients = 1024
-#
-# You can specify default log routing here if you want:
-# log_name = swift
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_address = /dev/log
-# The following caps the length of log lines to the value given; no limit if
-# set to 0, the default.
-# log_max_line_length = 0
-#
-# comma separated list of functions to call to setup custom log handlers.
-# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
-# adapted_logger
-# log_custom_handlers =
-#
-# If set, log_udp_host will override log_address
-# log_udp_host =
-# log_udp_port = 514
-#
-# You can enable StatsD logging here:
-# log_statsd_host =
-# log_statsd_port = 8125
-# log_statsd_default_sample_rate = 1.0
-# log_statsd_sample_rate_factor = 1.0
-# log_statsd_metric_prefix =
-#
-# If you don't mind the extra disk space usage in overhead, you can turn this
-# on to preallocate disk space with SQLite databases to decrease fragmentation.
-# db_preallocation = off
-#
-# eventlet_debug = false
-#
-# You can set fallocate_reserve to the number of bytes you'd like fallocate to
-# reserve, whether there is space for the given file size or not.
-# fallocate_reserve = 0
-
-[pipeline:main]
-pipeline = healthcheck recon account-server
-
-[app:account-server]
-use = egg:swift#account
-# You can override the default log routing for this app here:
-# set log_name = account-server
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_requests = true
-# set log_address = /dev/log
-#
-# auto_create_account_prefix = .
-#
-# Configure parameter for creating specific server
-# To handle all verbs, including replication verbs, do not specify
-# "replication_server" (this is the default). To only handle replication,
-# set to a True value (e.g. "True" or "1"). To handle only non-replication
-# verbs, set to "False". Unless you have a separate replication network, you
-# should not specify any value for "replication_server". Default is empty.
-# replication_server = false
-
-[filter:healthcheck]
-use = egg:swift#healthcheck
-# An optional filesystem path, which if present, will cause the healthcheck
-# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
-# disable_path =
-
-[filter:recon]
-use = egg:swift#recon
-recon_cache_path = /var/cache/swift
-
-[account-replicator]
-# You can override the default log routing for this app here (don't use set!):
-# log_name = account-replicator
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_address = /dev/log
-#
-# Maximum number of database rows that will be sync'd in a single HTTP
-# replication request. Databases with less than or equal to this number of
-# differing rows will always be sync'd using an HTTP replication request rather
-# than using rsync.
-# per_diff = 1000
-#
-# Maximum number of HTTP replication requests attempted on each replication
-# pass for any one container. This caps how long the replicator will spend
-# trying to sync a given database per pass so the other databases don't get
-# starved.
-# max_diffs = 100
-#
-# Number of replication workers to spawn.
-# concurrency = 8
-#
-# Time in seconds to wait between replication passes
-# interval = 30
-# run_pause is deprecated, use interval instead
-# run_pause = 30
-#
-# node_timeout = 10
-# conn_timeout = 0.5
-#
-# The replicator also performs reclamation
-# reclaim_age = 604800
-#
-# Allow rsync to compress data which is transmitted to destination node
-# during sync. However, this is applicable only when destination node is in
-# a different region than the local one.
-# rsync_compress = no
-#
-# Format of the rysnc module where the replicator will send data. See
-# etc/rsyncd.conf-sample for some usage examples.
-# rsync_module = {replication_ip}::account
-#
-# recon_cache_path = /var/cache/swift
-
-[account-auditor]
-# You can override the default log routing for this app here (don't use set!):
-# log_name = account-auditor
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_address = /dev/log
-#
-# Will audit each account at most once per interval
-# interval = 1800
-#
-# accounts_per_second = 200
-# recon_cache_path = /var/cache/swift
-
-[account-reaper]
-# You can override the default log routing for this app here (don't use set!):
-# log_name = account-reaper
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_address = /dev/log
-#
-# concurrency = 25
-# interval = 3600
-# node_timeout = 10
-# conn_timeout = 0.5
-#
-# Normally, the reaper begins deleting account information for deleted accounts
-# immediately; you can set this to delay its work however. The value is in
-# seconds; 2592000 = 30 days for example.
-# delay_reaping = 0
-#
-# If the account fails to be be reaped due to a persistent error, the
-# account reaper will log a message such as:
-# Account <name> has not been reaped since <date>
-# You can search logs for this message if space is not being reclaimed
-# after you delete account(s).
-# Default is 2592000 seconds (30 days). This is in addition to any time
-# requested by delay_reaping.
-# reap_warn_after = 2592000
-
-# Note: Put it at the beginning of the pipeline to profile all middleware. But
-# it is safer to put this after healthcheck.
-[filter:xprofile]
-use = egg:swift#xprofile
-# This option enable you to switch profilers which should inherit from python
-# standard profiler. Currently the supported value can be 'cProfile',
-# 'eventlet.green.profile' etc.
-# profile_module = eventlet.green.profile
-#
-# This prefix will be used to combine process ID and timestamp to name the
-# profile data file. Make sure the executing user has permission to write
-# into this path (missing path segments will be created, if necessary).
-# If you enable profiling in more than one type of daemon, you must override
-# it with an unique value like: /var/log/swift/profile/account.profile
-# log_filename_prefix = /tmp/log/swift/profile/default.profile
-#
-# the profile data will be dumped to local disk based on above naming rule
-# in this interval.
-# dump_interval = 5.0
-#
-# Be careful, this option will enable profiler to dump data into the file with
-# time stamp which means there will be lots of files piled up in the directory.
-# dump_timestamp = false
-#
-# This is the path of the URL to access the mini web UI.
-# path = /__profile__
-#
-# Clear the data when the wsgi server shutdown.
-# flush_at_shutdown = false
-#
-# unwind the iterator of applications
-# unwind = false
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/container-server.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/container-server.conf
deleted file mode 100644
index 88cd2ebb..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/container-server.conf
+++ /dev/null
@@ -1,229 +0,0 @@
-[DEFAULT]
-bind_ip = {{ internal_ip }}
-bind_port = 6001
-# bind_timeout = 30
-# backlog = 4096
-user = swift
-swift_dir = /etc/swift
-devices = /srv/node
-mount_check = true
-# disable_fallocate = false
-#
-# Use an integer to override the number of pre-forked processes that will
-# accept connections.
-# workers = auto
-#
-# Maximum concurrent requests per worker
-# max_clients = 1024
-#
-# This is a comma separated list of hosts allowed in the X-Container-Sync-To
-# field for containers. This is the old-style of using container sync. It is
-# strongly recommended to use the new style of a separate
-# container-sync-realms.conf -- see container-sync-realms.conf-sample
-# allowed_sync_hosts = 127.0.0.1
-#
-# You can specify default log routing here if you want:
-# log_name = swift
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_address = /dev/log
-# The following caps the length of log lines to the value given; no limit if
-# set to 0, the default.
-# log_max_line_length = 0
-#
-# comma separated list of functions to call to setup custom log handlers.
-# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
-# adapted_logger
-# log_custom_handlers =
-#
-# If set, log_udp_host will override log_address
-# log_udp_host =
-# log_udp_port = 514
-#
-# You can enable StatsD logging here:
-# log_statsd_host =
-# log_statsd_port = 8125
-# log_statsd_default_sample_rate = 1.0
-# log_statsd_sample_rate_factor = 1.0
-# log_statsd_metric_prefix =
-#
-# If you don't mind the extra disk space usage in overhead, you can turn this
-# on to preallocate disk space with SQLite databases to decrease fragmentation.
-# db_preallocation = off
-#
-# eventlet_debug = false
-#
-# You can set fallocate_reserve to the number of bytes you'd like fallocate to
-# reserve, whether there is space for the given file size or not.
-# fallocate_reserve = 0
-
-[pipeline:main]
-pipeline = healthcheck recon container-server
-
-[app:container-server]
-use = egg:swift#container
-# You can override the default log routing for this app here:
-# set log_name = container-server
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_requests = true
-# set log_address = /dev/log
-#
-# node_timeout = 3
-# conn_timeout = 0.5
-# allow_versions = false
-# auto_create_account_prefix = .
-#
-# Configure parameter for creating specific server
-# To handle all verbs, including replication verbs, do not specify
-# "replication_server" (this is the default). To only handle replication,
-# set to a True value (e.g. "True" or "1"). To handle only non-replication
-# verbs, set to "False". Unless you have a separate replication network, you
-# should not specify any value for "replication_server".
-# replication_server = false
-
-[filter:healthcheck]
-use = egg:swift#healthcheck
-# An optional filesystem path, which if present, will cause the healthcheck
-# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
-# disable_path =
-
-[filter:recon]
-use = egg:swift#recon
-recon_cache_path = /var/cache/swift
-
-[container-replicator]
-# You can override the default log routing for this app here (don't use set!):
-# log_name = container-replicator
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_address = /dev/log
-#
-# Maximum number of database rows that will be sync'd in a single HTTP
-# replication request. Databases with less than or equal to this number of
-# differing rows will always be sync'd using an HTTP replication request rather
-# than using rsync.
-# per_diff = 1000
-#
-# Maximum number of HTTP replication requests attempted on each replication
-# pass for any one container. This caps how long the replicator will spend
-# trying to sync a given database per pass so the other databases don't get
-# starved.
-# max_diffs = 100
-#
-# Number of replication workers to spawn.
-# concurrency = 8
-#
-# Time in seconds to wait between replication passes
-# interval = 30
-# run_pause is deprecated, use interval instead
-# run_pause = 30
-#
-# node_timeout = 10
-# conn_timeout = 0.5
-#
-# The replicator also performs reclamation
-# reclaim_age = 604800
-#
-# Allow rsync to compress data which is transmitted to destination node
-# during sync. However, this is applicable only when destination node is in
-# a different region than the local one.
-# rsync_compress = no
-#
-# Format of the rysnc module where the replicator will send data. See
-# etc/rsyncd.conf-sample for some usage examples.
-# rsync_module = {replication_ip}::container
-#
-# recon_cache_path = /var/cache/swift
-
-[container-updater]
-# You can override the default log routing for this app here (don't use set!):
-# log_name = container-updater
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_address = /dev/log
-#
-# interval = 300
-# concurrency = 4
-# node_timeout = 3
-# conn_timeout = 0.5
-#
-# slowdown will sleep that amount between containers
-# slowdown = 0.01
-#
-# Seconds to suppress updating an account that has generated an error
-# account_suppression_time = 60
-#
-# recon_cache_path = /var/cache/swift
-
-[container-auditor]
-# You can override the default log routing for this app here (don't use set!):
-# log_name = container-auditor
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_address = /dev/log
-#
-# Will audit each container at most once per interval
-# interval = 1800
-#
-# containers_per_second = 200
-# recon_cache_path = /var/cache/swift
-
-[container-sync]
-# You can override the default log routing for this app here (don't use set!):
-# log_name = container-sync
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_address = /dev/log
-#
-# If you need to use an HTTP Proxy, set it here; defaults to no proxy.
-# You can also set this to a comma separated list of HTTP Proxies and they will
-# be randomly used (simple load balancing).
-# sync_proxy = http://10.1.1.1:8888,http://10.1.1.2:8888
-#
-# Will sync each container at most once per interval
-# interval = 300
-#
-# Maximum amount of time to spend syncing each container per pass
-# container_time = 60
-#
-# Maximum amount of time in seconds for the connection attempt
-# conn_timeout = 5
-# Server errors from requests will be retried by default
-# request_tries = 3
-#
-# Internal client config file path
-# internal_client_conf_path = /etc/swift/internal-client.conf
-
-# Note: Put it at the beginning of the pipeline to profile all middleware. But
-# it is safer to put this after healthcheck.
-[filter:xprofile]
-use = egg:swift#xprofile
-# This option enable you to switch profilers which should inherit from python
-# standard profiler. Currently the supported value can be 'cProfile',
-# 'eventlet.green.profile' etc.
-# profile_module = eventlet.green.profile
-#
-# This prefix will be used to combine process ID and timestamp to name the
-# profile data file. Make sure the executing user has permission to write
-# into this path (missing path segments will be created, if necessary).
-# If you enable profiling in more than one type of daemon, you must override
-# it with an unique value like: /var/log/swift/profile/container.profile
-# log_filename_prefix = /tmp/log/swift/profile/default.profile
-#
-# the profile data will be dumped to local disk based on above naming rule
-# in this interval.
-# dump_interval = 5.0
-#
-# Be careful, this option will enable profiler to dump data into the file with
-# time stamp which means there will be lots of files piled up in the directory.
-# dump_timestamp = false
-#
-# This is the path of the URL to access the mini web UI.
-# path = /__profile__
-#
-# Clear the data when the wsgi server shutdown.
-# flush_at_shutdown = false
-#
-# unwind the iterator of applications
-# unwind = false
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/object-server.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/object-server.conf
deleted file mode 100644
index effd4f22..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/object-server.conf
+++ /dev/null
@@ -1,347 +0,0 @@
-[DEFAULT]
-bind_ip = {{ internal_ip }}
-bind_port = 6000
-# bind_timeout = 30
-# backlog = 4096
-user = swift
-swift_dir = /etc/swift
-devices = /srv/node
-mount_check = true
-# disable_fallocate = false
-# expiring_objects_container_divisor = 86400
-# expiring_objects_account_name = expiring_objects
-#
-# Use an integer to override the number of pre-forked processes that will
-# accept connections. NOTE: if servers_per_port is set, this setting is
-# ignored.
-# workers = auto
-#
-# Make object-server run this many worker processes per unique port of
-# "local" ring devices across all storage policies. This can help provide
-# the isolation of threads_per_disk without the severe overhead. The default
-# value of 0 disables this feature.
-# servers_per_port = 0
-#
-# Maximum concurrent requests per worker
-# max_clients = 1024
-#
-# You can specify default log routing here if you want:
-# log_name = swift
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_address = /dev/log
-# The following caps the length of log lines to the value given; no limit if
-# set to 0, the default.
-# log_max_line_length = 0
-#
-# comma separated list of functions to call to setup custom log handlers.
-# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
-# adapted_logger
-# log_custom_handlers =
-#
-# If set, log_udp_host will override log_address
-# log_udp_host =
-# log_udp_port = 514
-#
-# You can enable StatsD logging here:
-# log_statsd_host =
-# log_statsd_port = 8125
-# log_statsd_default_sample_rate = 1.0
-# log_statsd_sample_rate_factor = 1.0
-# log_statsd_metric_prefix =
-#
-# eventlet_debug = false
-#
-# You can set fallocate_reserve to the number of bytes you'd like fallocate to
-# reserve, whether there is space for the given file size or not.
-# fallocate_reserve = 0
-#
-# Time to wait while attempting to connect to another backend node.
-# conn_timeout = 0.5
-# Time to wait while sending each chunk of data to another backend node.
-# node_timeout = 3
-# Time to wait while sending a container update on object update.
-# container_update_timeout = 1.0
-# Time to wait while receiving each chunk of data from a client or another
-# backend node.
-# client_timeout = 60
-#
-# network_chunk_size = 65536
-# disk_chunk_size = 65536
-
-[pipeline:main]
-pipeline = healthcheck recon object-server
-
-[app:object-server]
-use = egg:swift#object
-# You can override the default log routing for this app here:
-# set log_name = object-server
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_requests = true
-# set log_address = /dev/log
-#
-# max_upload_time = 86400
-#
-# slow is the total amount of seconds an object PUT/DELETE request takes at
-# least. If it is faster, the object server will sleep this amount of time minus
-# the already passed transaction time. This is only useful for simulating slow
-# devices on storage nodes during testing and development.
-# slow = 0
-#
-# Objects smaller than this are not evicted from the buffercache once read
-# keep_cache_size = 5242880
-#
-# If true, objects for authenticated GET requests may be kept in buffer cache
-# if small enough
-# keep_cache_private = false
-#
-# on PUTs, sync data every n MB
-# mb_per_sync = 512
-#
-# Comma separated list of headers that can be set in metadata on an object.
-# This list is in addition to X-Object-Meta-* headers and cannot include
-# Content-Type, etag, Content-Length, or deleted
-# allowed_headers = Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object
-#
-# auto_create_account_prefix = .
-#
-# A value of 0 means "don't use thread pools". A reasonable starting point is
-# 4.
-# threads_per_disk = 0
-#
-# Configure parameter for creating specific server
-# To handle all verbs, including replication verbs, do not specify
-# "replication_server" (this is the default). To only handle replication,
-# set to a True value (e.g. "True" or "1"). To handle only non-replication
-# verbs, set to "False". Unless you have a separate replication network, you
-# should not specify any value for "replication_server".
-# replication_server = false
-#
-# Set to restrict the number of concurrent incoming SSYNC requests
-# Set to 0 for unlimited
-# Note that SSYNC requests are only used by the object reconstructor or the
-# object replicator when configured to use ssync.
-# replication_concurrency = 4
-#
-# Restricts incoming SSYNC requests to one per device,
-# replication_currency above allowing. This can help control I/O to each
-# device, but you may wish to set this to False to allow multiple SSYNC
-# requests (up to the above replication_concurrency setting) per device.
-# replication_one_per_device = True
-#
-# Number of seconds to wait for an existing replication device lock before
-# giving up.
-# replication_lock_timeout = 15
-#
-# These next two settings control when the SSYNC subrequest handler will
-# abort an incoming SSYNC attempt. An abort will occur if there are at
-# least threshold number of failures and the value of failures / successes
-# exceeds the ratio. The defaults of 100 and 1.0 means that at least 100
-# failures have to occur and there have to be more failures than successes for
-# an abort to occur.
-# replication_failure_threshold = 100
-# replication_failure_ratio = 1.0
-#
-# Use splice() for zero-copy object GETs. This requires Linux kernel
-# version 3.0 or greater. If you set "splice = yes" but the kernel
-# does not support it, error messages will appear in the object server
-# logs at startup, but your object servers should continue to function.
-#
-# splice = no
-
-[filter:healthcheck]
-use = egg:swift#healthcheck
-# An optional filesystem path, which if present, will cause the healthcheck
-# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
-# disable_path =
-
-[filter:recon]
-use = egg:swift#recon
-recon_cache_path = /var/cache/swift
-recon_lock_path = /var/lock
-
-[object-replicator]
-# You can override the default log routing for this app here (don't use set!):
-# log_name = object-replicator
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_address = /dev/log
-#
-# daemonize = on
-#
-# Time in seconds to wait between replication passes
-# interval = 30
-# run_pause is deprecated, use interval instead
-# run_pause = 30
-#
-# concurrency = 1
-# stats_interval = 300
-#
-# default is rsync, alternative is ssync
-# sync_method = rsync
-#
-# max duration of a partition rsync
-# rsync_timeout = 900
-#
-# bandwidth limit for rsync in kB/s. 0 means unlimited
-# rsync_bwlimit = 0
-#
-# passed to rsync for io op timeout
-# rsync_io_timeout = 30
-#
-# Allow rsync to compress data which is transmitted to destination node
-# during sync. However, this is applicable only when destination node is in
-# a different region than the local one.
-# NOTE: Objects that are already compressed (for example: .tar.gz, .mp3) might
-# slow down the syncing process.
-# rsync_compress = no
-#
-# Format of the rysnc module where the replicator will send data. See
-# etc/rsyncd.conf-sample for some usage examples.
-# rsync_module = {replication_ip}::object
-#
-# node_timeout = <whatever's in the DEFAULT section or 10>
-# max duration of an http request; this is for REPLICATE finalization calls and
-# so should be longer than node_timeout
-# http_timeout = 60
-#
-# attempts to kill all workers if nothing replicates for lockup_timeout seconds
-# lockup_timeout = 1800
-#
-# The replicator also performs reclamation
-# reclaim_age = 604800
-#
-# ring_check_interval = 15
-# recon_cache_path = /var/cache/swift
-#
-# limits how long rsync error log lines are
-# 0 means to log the entire line
-# rsync_error_log_line_length = 0
-#
-# handoffs_first and handoff_delete are options for a special case
-# such as disk full in the cluster. These two options SHOULD NOT BE
-# CHANGED, except for such an extreme situations. (e.g. disks filled up
-# or are about to fill up. Anyway, DO NOT let your drives fill up)
-# handoffs_first is the flag to replicate handoffs prior to canonical
-# partitions. It allows to force syncing and deleting handoffs quickly.
-# If set to a True value(e.g. "True" or "1"), partitions
-# that are not supposed to be on the node will be replicated first.
-# handoffs_first = False
-#
-# handoff_delete is the number of replicas which are ensured in swift.
-# If the number less than the number of replicas is set, object-replicator
-# could delete local handoffs even if all replicas are not ensured in the
-# cluster. Object-replicator would remove local handoff partition directories
-# after syncing partition when the number of successful responses is greater
-# than or equal to this number. By default(auto), handoff partitions will be
-# removed when it has successfully replicated to all the canonical nodes.
-# handoff_delete = auto
-
-[object-reconstructor]
-# You can override the default log routing for this app here (don't use set!):
-# Unless otherwise noted, each setting below has the same meaning as described
-# in the [object-replicator] section, however these settings apply to the EC
-# reconstructor
-#
-# log_name = object-reconstructor
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_address = /dev/log
-#
-# daemonize = on
-#
-# Time in seconds to wait between reconstruction passes
-# interval = 30
-# run_pause is deprecated, use interval instead
-# run_pause = 30
-#
-# concurrency = 1
-# stats_interval = 300
-# node_timeout = 10
-# http_timeout = 60
-# lockup_timeout = 1800
-# reclaim_age = 604800
-# ring_check_interval = 15
-# recon_cache_path = /var/cache/swift
-# handoffs_first = False
-
-[object-updater]
-# You can override the default log routing for this app here (don't use set!):
-# log_name = object-updater
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_address = /dev/log
-#
-# interval = 300
-# concurrency = 1
-# node_timeout = <whatever's in the DEFAULT section or 10>
-# slowdown will sleep that amount between objects
-# slowdown = 0.01
-#
-# recon_cache_path = /var/cache/swift
-
-[object-auditor]
-# You can override the default log routing for this app here (don't use set!):
-# log_name = object-auditor
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_address = /dev/log
-#
-# Time in seconds to wait between auditor passes
-# interval = 30
-#
-# You can set the disk chunk size that the auditor uses making it larger if
-# you like for more efficient local auditing of larger objects
-# disk_chunk_size = 65536
-# files_per_second = 20
-# concurrency = 1
-# bytes_per_second = 10000000
-# log_time = 3600
-# zero_byte_files_per_second = 50
-# recon_cache_path = /var/cache/swift
-
-# Takes a comma separated list of ints. If set, the object auditor will
-# increment a counter for every object whose size is <= to the given break
-# points and report the result after a full scan.
-# object_size_stats =
-
-# The auditor will cleanup old rsync tempfiles after they are "old
-# enough" to delete. You can configure the time elapsed in seconds
-# before rsync tempfiles will be unlinked, or the default value of
-# "auto" try to use object-replicator's rsync_timeout + 900 and fallback
-# to 86400 (1 day).
-# rsync_tempfile_timeout = auto
-
-# Note: Put it at the beginning of the pipleline to profile all middleware. But
-# it is safer to put this after healthcheck.
-[filter:xprofile]
-use = egg:swift#xprofile
-# This option enable you to switch profilers which should inherit from python
-# standard profiler. Currently the supported value can be 'cProfile',
-# 'eventlet.green.profile' etc.
-# profile_module = eventlet.green.profile
-#
-# This prefix will be used to combine process ID and timestamp to name the
-# profile data file. Make sure the executing user has permission to write
-# into this path (missing path segments will be created, if necessary).
-# If you enable profiling in more than one type of daemon, you must override
-# it with an unique value like: /var/log/swift/profile/object.profile
-# log_filename_prefix = /tmp/log/swift/profile/default.profile
-#
-# the profile data will be dumped to local disk based on above naming rule
-# in this interval.
-# dump_interval = 5.0
-#
-# Be careful, this option will enable profiler to dump data into the file with
-# time stamp which means there will be lots of files piled up in the directory.
-# dump_timestamp = false
-#
-# This is the path of the URL to access the mini web UI.
-# path = /__profile__
-#
-# Clear the data when the wsgi server shutdown.
-# flush_at_shutdown = false
-#
-# unwind the iterator of applications
-# unwind = false
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/proxy-server.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/proxy-server.conf
deleted file mode 100644
index b76796cf..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/proxy-server.conf
+++ /dev/null
@@ -1,764 +0,0 @@
-{% set memcached_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-[DEFAULT]
-bind_ip = {{ internal_ip }}
-bind_port = 8080
-# bind_timeout = 30
-# backlog = 4096
-swift_dir = /etc/swift
-user = swift
-
-# Enables exposing configuration settings via HTTP GET /info.
-# expose_info = true
-
-# Key to use for admin calls that are HMAC signed. Default is empty,
-# which will disable admin calls to /info.
-# admin_key = secret_admin_key
-#
-# Allows the ability to withhold sections from showing up in the public calls
-# to /info. You can withhold subsections by separating the dict level with a
-# ".". The following would cause the sections 'container_quotas' and 'tempurl'
-# to not be listed, and the key max_failed_deletes would be removed from
-# bulk_delete. Default value is 'swift.valid_api_versions' which allows all
-# registered features to be listed via HTTP GET /info except
-# swift.valid_api_versions information
-# disallowed_sections = swift.valid_api_versions, container_quotas, tempurl
-
-# Use an integer to override the number of pre-forked processes that will
-# accept connections. Should default to the number of effective cpu
-# cores in the system. It's worth noting that individual workers will
-# use many eventlet co-routines to service multiple concurrent requests.
-# workers = auto
-#
-# Maximum concurrent requests per worker
-# max_clients = 1024
-#
-# Set the following two lines to enable SSL. This is for testing only.
-# cert_file = /etc/swift/proxy.crt
-# key_file = /etc/swift/proxy.key
-#
-# expiring_objects_container_divisor = 86400
-# expiring_objects_account_name = expiring_objects
-#
-# You can specify default log routing here if you want:
-# log_name = swift
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_headers = false
-# log_address = /dev/log
-# The following caps the length of log lines to the value given; no limit if
-# set to 0, the default.
-# log_max_line_length = 0
-#
-# This optional suffix (default is empty) that would be appended to the swift transaction
-# id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
-# This is very useful when one is managing more than one swift cluster.
-# trans_id_suffix =
-#
-# comma separated list of functions to call to setup custom log handlers.
-# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
-# adapted_logger
-# log_custom_handlers =
-#
-# If set, log_udp_host will override log_address
-# log_udp_host =
-# log_udp_port = 514
-#
-# You can enable StatsD logging here:
-# log_statsd_host =
-# log_statsd_port = 8125
-# log_statsd_default_sample_rate = 1.0
-# log_statsd_sample_rate_factor = 1.0
-# log_statsd_metric_prefix =
-#
-# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
-# cors_allow_origin =
-# strict_cors_mode = True
-#
-# client_timeout = 60
-# eventlet_debug = false
-
-[pipeline:main]
-# This sample pipeline uses tempauth and is used for SAIO dev work and
-# testing. See below for a pipeline using keystone.
-#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
-pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
-
-# The following pipeline shows keystone integration. Comment out the one
-# above and uncomment this one. Additional steps for integrating keystone are
-# covered further below in the filter sections for authtoken and keystoneauth.
-#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
-
-[app:proxy-server]
-use = egg:swift#proxy
-account_autocreate = True
-# You can override the default log routing for this app here:
-# set log_name = proxy-server
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_address = /dev/log
-#
-# log_handoffs = true
-# recheck_account_existence = 60
-# recheck_container_existence = 60
-# object_chunk_size = 65536
-# client_chunk_size = 65536
-#
-# How long the proxy server will wait on responses from the a/c/o servers.
-# node_timeout = 10
-#
-# How long the proxy server will wait for an initial response and to read a
-# chunk of data from the object servers while serving GET / HEAD requests.
-# Timeouts from these requests can be recovered from so setting this to
-# something lower than node_timeout would provide quicker error recovery
-# while allowing for a longer timeout for non-recoverable requests (PUTs).
-# Defaults to node_timeout, should be overriden if node_timeout is set to a
-# high number to prevent client timeouts from firing before the proxy server
-# has a chance to retry.
-# recoverable_node_timeout = node_timeout
-#
-# conn_timeout = 0.5
-#
-# How long to wait for requests to finish after a quorum has been established.
-# post_quorum_timeout = 0.5
-#
-# How long without an error before a node's error count is reset. This will
-# also be how long before a node is reenabled after suppression is triggered.
-# error_suppression_interval = 60
-#
-# How many errors can accumulate before a node is temporarily ignored.
-# error_suppression_limit = 10
-#
-# If set to 'true' any authorized user may create and delete accounts; if
-# 'false' no one, even authorized, can.
-# allow_account_management = false
-#
-# Set object_post_as_copy = false to turn on fast posts where only the metadata
-# changes are stored anew and the original data file is kept in place. This
-# makes for quicker posts.
-# object_post_as_copy = true
-#
-# If set to 'true' authorized accounts that do not yet exist within the Swift
-# cluster will be automatically created.
-# account_autocreate = false
-#
-# If set to a positive value, trying to create a container when the account
-# already has at least this maximum containers will result in a 403 Forbidden.
-# Note: This is a soft limit, meaning a user might exceed the cap for
-# recheck_account_existence before the 403s kick in.
-# max_containers_per_account = 0
-#
-# This is a comma separated list of account hashes that ignore the
-# max_containers_per_account cap.
-# max_containers_whitelist =
-#
-# Comma separated list of Host headers to which the proxy will deny requests.
-# deny_host_headers =
-#
-# Prefix used when automatically creating accounts.
-# auto_create_account_prefix = .
-#
-# Depth of the proxy put queue.
-# put_queue_depth = 10
-#
-# Storage nodes can be chosen at random (shuffle), by using timing
-# measurements (timing), or by using an explicit match (affinity).
-# Using timing measurements may allow for lower overall latency, while
-# using affinity allows for finer control. In both the timing and
-# affinity cases, equally-sorting nodes are still randomly chosen to
-# spread load.
-# The valid values for sorting_method are "affinity", "shuffle", or "timing".
-# sorting_method = shuffle
-#
-# If the "timing" sorting_method is used, the timings will only be valid for
-# the number of seconds configured by timing_expiry.
-# timing_expiry = 300
-#
-# By default on a GET/HEAD swift will connect to a storage node one at a time
-# in a single thread. There is smarts in the order they are hit however. If you
-# turn on concurrent_gets below, then replica count threads will be used.
-# With addition of the concurrency_timeout option this will allow swift to send
-# out GET/HEAD requests to the storage nodes concurrently and answer with the
-# first to respond. With an EC policy the parameter only affects HEAD requests.
-# concurrent_gets = off
-#
-# This parameter controls how long to wait before firing off the next
-# concurrent_get thread. A value of 0 would be fully concurrent, any other
-# number will stagger the firing of the threads. This number should be
-# between 0 and node_timeout. The default is what ever you set for the
-# conn_timeout parameter.
-# concurrency_timeout = 0.5
-#
-# Set to the number of nodes to contact for a normal request. You can use
-# '* replicas' at the end to have it use the number given times the number of
-# replicas for the ring being used for the request.
-# request_node_count = 2 * replicas
-#
-# Which backend servers to prefer on reads. Format is r<N> for region
-# N or r<N>z<M> for region N, zone M. The value after the equals is
-# the priority; lower numbers are higher priority.
-#
-# Example: first read from region 1 zone 1, then region 1 zone 2, then
-# anything in region 2, then everything else:
-# read_affinity = r1z1=100, r1z2=200, r2=300
-# Default is empty, meaning no preference.
-# read_affinity =
-#
-# Which backend servers to prefer on writes. Format is r<N> for region
-# N or r<N>z<M> for region N, zone M. If this is set, then when
-# handling an object PUT request, some number (see setting
-# write_affinity_node_count) of local backend servers will be tried
-# before any nonlocal ones.
-#
-# Example: try to write to regions 1 and 2 before writing to any other
-# nodes:
-# write_affinity = r1, r2
-# Default is empty, meaning no preference.
-# write_affinity =
-#
-# The number of local (as governed by the write_affinity setting)
-# nodes to attempt to contact first, before any non-local ones. You
-# can use '* replicas' at the end to have it use the number given
-# times the number of replicas for the ring being used for the
-# request.
-# write_affinity_node_count = 2 * replicas
-#
-# These are the headers whose values will only be shown to swift_owners. The
-# exact definition of a swift_owner is up to the auth system in use, but
-# usually indicates administrative responsibilities.
-# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control
-
-[filter:tempauth]
-use = egg:swift#tempauth
-# You can override the default log routing for this filter here:
-# set log_name = tempauth
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-#
-# The reseller prefix will verify a token begins with this prefix before even
-# attempting to validate it. Also, with authorization, only Swift storage
-# accounts with this prefix will be authorized by this middleware. Useful if
-# multiple auth systems are in use for one Swift cluster.
-# The reseller_prefix may contain a comma separated list of items. The first
-# item is used for the token as mentioned above. If second and subsequent
-# items exist, the middleware will handle authorization for an account with
-# that prefix. For example, for prefixes "AUTH, SERVICE", a path of
-# /v1/SERVICE_account is handled the same as /v1/AUTH_account. If an empty
-# (blank) reseller prefix is required, it must be first in the list. Two
-# single quote characters indicates an empty (blank) reseller prefix.
-# reseller_prefix = AUTH
-
-#
-# The require_group parameter names a group that must be presented by
-# either X-Auth-Token or X-Service-Token. Usually this parameter is
-# used only with multiple reseller prefixes (e.g., SERVICE_require_group=blah).
-# By default, no group is needed. Do not use .admin.
-# require_group =
-
-# The auth prefix will cause requests beginning with this prefix to be routed
-# to the auth subsystem, for granting tokens, etc.
-# auth_prefix = /auth/
-# token_life = 86400
-#
-# This allows middleware higher in the WSGI pipeline to override auth
-# processing, useful for middleware such as tempurl and formpost. If you know
-# you're not going to use such middleware and you want a bit of extra security,
-# you can set this to false.
-# allow_overrides = true
-#
-# This specifies what scheme to return with storage urls:
-# http, https, or default (chooses based on what the server is running as)
-# This can be useful with an SSL load balancer in front of a non-SSL server.
-# storage_url_scheme = default
-#
-# Lastly, you need to list all the accounts/users you want here. The format is:
-# user_<account>_<user> = <key> [group] [group] [...] [storage_url]
-# or if you want underscores in <account> or <user>, you can base64 encode them
-# (with no equal signs) and use this format:
-# user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url]
-# There are special groups of:
-# .reseller_admin = can do anything to any account for this auth
-# .admin = can do anything within the account
-# If neither of these groups are specified, the user can only access containers
-# that have been explicitly allowed for them by a .admin or .reseller_admin.
-# The trailing optional storage_url allows you to specify an alternate url to
-# hand back to the user upon authentication. If not specified, this defaults to
-# $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve
-# to what the requester would need to use to reach this host.
-# Here are example entries, required for running the tests:
-user_admin_admin = admin .admin .reseller_admin
-user_test_tester = testing .admin
-user_test2_tester2 = testing2 .admin
-user_test_tester3 = testing3
-user_test5_tester5 = testing5 service
-
-# To enable Keystone authentication you need to have the auth token
-# middleware first to be configured. Here is an example below, please
-# refer to the keystone's documentation for details about the
-# different settings.
-#
-# You'll also need to have the keystoneauth middleware enabled and have it in
-# your main pipeline, as show in the sample pipeline at the top of this file.
-#
-# Following parameters are known to work with keystonemiddleware v2.3.0
-# (above v2.0.0), but checking the latest information in the wiki page[1]
-# is recommended.
-# 1. http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html#configuration
-#
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-auth_uri = http://{{ internal_vip.ip }}:5000
-auth_url = http://{{ internal_vip.ip }}:35357
-identity_uri = http://{{ internal_vip.ip }}:35357
-memcached_servers = {{ memcached_servers }}
-#auth_plugin = password
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = swift
-password = {{ CINDER_PASS }}
-delay_auth_decision = True
-#
-# delay_auth_decision defaults to False, but leaving it as false will
-# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from
-# working. This value must be explicitly set to True.
-# delay_auth_decision = False
-#
-# cache = swift.cache
-# include_service_catalog = False
-#
-[filter:keystoneauth]
-use = egg:swift#keystoneauth
-operator_roles = admin,user
-# The reseller_prefix option lists account namespaces that this middleware is
-# responsible for. The prefix is placed before the Keystone project id.
-# For example, for project 12345678, and prefix AUTH, the account is
-# named AUTH_12345678 (i.e., path is /v1/AUTH_12345678/...).
-# Several prefixes are allowed by specifying a comma-separated list
-# as in: "reseller_prefix = AUTH, SERVICE". The empty string indicates a
-# single blank/empty prefix. If an empty prefix is required in a list of
-# prefixes, a value of '' (two single quote characters) indicates a
-# blank/empty prefix. Except for the blank/empty prefix, an underscore ('_')
-# character is appended to the value unless already present.
-# reseller_prefix = AUTH
-#
-# The user must have at least one role named by operator_roles on a
-# project in order to create, delete and modify containers and objects
-# and to set and read privileged headers such as ACLs.
-# If there are several reseller prefix items, you can prefix the
-# parameter so it applies only to those accounts (for example
-# the parameter SERVICE_operator_roles applies to the /v1/SERVICE_<project>
-# path). If you omit the prefix, the option applies to all reseller
-# prefix items. For the blank/empty prefix, prefix with '' (do not put
-# underscore after the two single quote characters).
-# operator_roles = admin, swiftoperator
-#
-# The reseller admin role has the ability to create and delete accounts
-# reseller_admin_role = ResellerAdmin
-#
-# This allows middleware higher in the WSGI pipeline to override auth
-# processing, useful for middleware such as tempurl and formpost. If you know
-# you're not going to use such middleware and you want a bit of extra security,
-# you can set this to false.
-# allow_overrides = true
-#
-# If the service_roles parameter is present, an X-Service-Token must be
-# present in the request that when validated, grants at least one role listed
-# in the parameter. The X-Service-Token may be scoped to any project.
-# If there are several reseller prefix items, you can prefix the
-# parameter so it applies only to those accounts (for example
-# the parameter SERVICE_service_roles applies to the /v1/SERVICE_<project>
-# path). If you omit the prefix, the option applies to all reseller
-# prefix items. For the blank/empty prefix, prefix with '' (do not put
-# underscore after the two single quote characters).
-# By default, no service_roles are required.
-# service_roles =
-#
-# For backwards compatibility, keystoneauth will match names in cross-tenant
-# access control lists (ACLs) when both the requesting user and the tenant
-# are in the default domain i.e the domain to which existing tenants are
-# migrated. The default_domain_id value configured here should be the same as
-# the value used during migration of tenants to keystone domains.
-# default_domain_id = default
-#
-# For a new installation, or an installation in which keystone projects may
-# move between domains, you should disable backwards compatible name matching
-# in ACLs by setting allow_names_in_acls to false:
-# allow_names_in_acls = true
-
-[filter:healthcheck]
-use = egg:swift#healthcheck
-# An optional filesystem path, which if present, will cause the healthcheck
-# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE".
-# This facility may be used to temporarily remove a Swift node from a load
-# balancer pool during maintenance or upgrade (remove the file to allow the
-# node back into the load balancer pool).
-# disable_path =
-
-[filter:cache]
-use = egg:swift#memcache
-memcache_servers = {{ memcached_servers }}
-# You can override the default log routing for this filter here:
-# set log_name = cache
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-#
-# If not set here, the value for memcache_servers will be read from
-# memcache.conf (see memcache.conf-sample) or lacking that file, it will
-# default to the value below. You can specify multiple servers separated with
-# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 (IPv6 addresses must
-# follow rfc3986 section-3.2.2, i.e. [::1]:11211)
-# memcache_servers = 127.0.0.1:11211
-#
-# Sets how memcache values are serialized and deserialized:
-# 0 = older, insecure pickle serialization
-# 1 = json serialization but pickles can still be read (still insecure)
-# 2 = json serialization only (secure and the default)
-# If not set here, the value for memcache_serialization_support will be read
-# from /etc/swift/memcache.conf (see memcache.conf-sample).
-# To avoid an instant full cache flush, existing installations should
-# upgrade with 0, then set to 1 and reload, then after some time (24 hours)
-# set to 2 and reload.
-# In the future, the ability to use pickle serialization will be removed.
-# memcache_serialization_support = 2
-#
-# Sets the maximum number of connections to each memcached server per worker
-# memcache_max_connections = 2
-#
-# More options documented in memcache.conf-sample
-
-[filter:ratelimit]
-use = egg:swift#ratelimit
-# You can override the default log routing for this filter here:
-# set log_name = ratelimit
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-#
-# clock_accuracy should represent how accurate the proxy servers' system clocks
-# are with each other. 1000 means that all the proxies' clock are accurate to
-# each other within 1 millisecond. No ratelimit should be higher than the
-# clock accuracy.
-# clock_accuracy = 1000
-#
-# max_sleep_time_seconds = 60
-#
-# log_sleep_time_seconds of 0 means disabled
-# log_sleep_time_seconds = 0
-#
-# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
-# rate_buffer_seconds = 5
-#
-# account_ratelimit of 0 means disabled
-# account_ratelimit = 0
-
-# DEPRECATED- these will continue to work but will be replaced
-# by the X-Account-Sysmeta-Global-Write-Ratelimit flag.
-# Please see ratelimiting docs for details.
-# these are comma separated lists of account names
-# account_whitelist = a,b
-# account_blacklist = c,d
-
-# with container_limit_x = r
-# for containers of size x limit write requests per second to r. The container
-# rate will be linearly interpolated from the values given. With the values
-# below, a container of size 5 will get a rate of 75.
-# container_ratelimit_0 = 100
-# container_ratelimit_10 = 50
-# container_ratelimit_50 = 20
-
-# Similarly to the above container-level write limits, the following will limit
-# container GET (listing) requests.
-# container_listing_ratelimit_0 = 100
-# container_listing_ratelimit_10 = 50
-# container_listing_ratelimit_50 = 20
-
-[filter:domain_remap]
-use = egg:swift#domain_remap
-# You can override the default log routing for this filter here:
-# set log_name = domain_remap
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-#
-# storage_domain = example.com
-# path_root = v1
-
-# Browsers can convert a host header to lowercase, so check that reseller
-# prefix on the account is the correct case. This is done by comparing the
-# items in the reseller_prefixes config option to the found prefix. If they
-# match except for case, the item from reseller_prefixes will be used
-# instead of the found reseller prefix. When none match, the default reseller
-# prefix is used. When no default reseller prefix is configured, any request
-# with an account prefix not in that list will be ignored by this middleware.
-# reseller_prefixes = AUTH
-# default_reseller_prefix =
-
-[filter:catch_errors]
-use = egg:swift#catch_errors
-# You can override the default log routing for this filter here:
-# set log_name = catch_errors
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-
-[filter:cname_lookup]
-# Note: this middleware requires python-dnspython
-use = egg:swift#cname_lookup
-# You can override the default log routing for this filter here:
-# set log_name = cname_lookup
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-#
-# Specify the storage_domain that match your cloud, multiple domains
-# can be specified separated by a comma
-# storage_domain = example.com
-#
-# lookup_depth = 1
-
-# Note: Put staticweb just after your auth filter(s) in the pipeline
-[filter:staticweb]
-use = egg:swift#staticweb
-# You can override the default log routing for this filter here:
-# set log_name = staticweb
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-
-# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline
-[filter:tempurl]
-use = egg:swift#tempurl
-# The methods allowed with Temp URLs.
-# methods = GET HEAD PUT POST DELETE
-#
-# The headers to remove from incoming requests. Simply a whitespace delimited
-# list of header names and names can optionally end with '*' to indicate a
-# prefix match. incoming_allow_headers is a list of exceptions to these
-# removals.
-# incoming_remove_headers = x-timestamp
-#
-# The headers allowed as exceptions to incoming_remove_headers. Simply a
-# whitespace delimited list of header names and names can optionally end with
-# '*' to indicate a prefix match.
-# incoming_allow_headers =
-#
-# The headers to remove from outgoing responses. Simply a whitespace delimited
-# list of header names and names can optionally end with '*' to indicate a
-# prefix match. outgoing_allow_headers is a list of exceptions to these
-# removals.
-# outgoing_remove_headers = x-object-meta-*
-#
-# The headers allowed as exceptions to outgoing_remove_headers. Simply a
-# whitespace delimited list of header names and names can optionally end with
-# '*' to indicate a prefix match.
-# outgoing_allow_headers = x-object-meta-public-*
-
-# Note: Put formpost just before your auth filter(s) in the pipeline
-[filter:formpost]
-use = egg:swift#formpost
-
-# Note: Just needs to be placed before the proxy-server in the pipeline.
-[filter:name_check]
-use = egg:swift#name_check
-# forbidden_chars = '"`<>
-# maximum_length = 255
-# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$
-
-[filter:list-endpoints]
-use = egg:swift#list_endpoints
-# list_endpoints_path = /endpoints/
-
-[filter:proxy-logging]
-use = egg:swift#proxy_logging
-# If not set, logging directives from [DEFAULT] without "access_" will be used
-# access_log_name = swift
-# access_log_facility = LOG_LOCAL0
-# access_log_level = INFO
-# access_log_address = /dev/log
-#
-# If set, access_log_udp_host will override access_log_address
-# access_log_udp_host =
-# access_log_udp_port = 514
-#
-# You can use log_statsd_* from [DEFAULT] or override them here:
-# access_log_statsd_host =
-# access_log_statsd_port = 8125
-# access_log_statsd_default_sample_rate = 1.0
-# access_log_statsd_sample_rate_factor = 1.0
-# access_log_statsd_metric_prefix =
-# access_log_headers = false
-#
-# If access_log_headers is True and access_log_headers_only is set only
-# these headers are logged. Multiple headers can be defined as comma separated
-# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime
-# access_log_headers_only =
-#
-# By default, the X-Auth-Token is logged. To obscure the value,
-# set reveal_sensitive_prefix to the number of characters to log.
-# For example, if set to 12, only the first 12 characters of the
-# token appear in the log. An unauthorized access of the log file
-# won't allow unauthorized usage of the token. However, the first
-# 12 or so characters is unique enough that you can trace/debug
-# token usage. Set to 0 to suppress the token completely (replaced
-# by '...' in the log).
-# Note: reveal_sensitive_prefix will not affect the value
-# logged with access_log_headers=True.
-# reveal_sensitive_prefix = 16
-#
-# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
-# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
-# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
-#
-# Note: The double proxy-logging in the pipeline is not a mistake. The
-# left-most proxy-logging is there to log requests that were handled in
-# middleware and never made it through to the right-most middleware (and
-# proxy server). Double logging is prevented for normal requests. See
-# proxy-logging docs.
-
-# Note: Put before both ratelimit and auth in the pipeline.
-[filter:bulk]
-use = egg:swift#bulk
-# max_containers_per_extraction = 10000
-# max_failed_extractions = 1000
-# max_deletes_per_request = 10000
-# max_failed_deletes = 1000
-
-# In order to keep a connection active during a potentially long bulk request,
-# Swift may return whitespace prepended to the actual response body. This
-# whitespace will be yielded no more than every yield_frequency seconds.
-# yield_frequency = 10
-
-# Note: The following parameter is used during a bulk delete of objects and
-# their container. This would frequently fail because it is very likely
-# that all replicated objects have not been deleted by the time the middleware got a
-# successful response. It can be configured the number of retries. And the
-# number of seconds to wait between each retry will be 1.5**retry
-
-# delete_container_retry_count = 0
-
-# Note: Put after auth and staticweb in the pipeline.
-[filter:slo]
-use = egg:swift#slo
-# max_manifest_segments = 1000
-# max_manifest_size = 2097152
-#
-# Rate limiting applies only to segments smaller than this size (bytes).
-# rate_limit_under_size = 1048576
-#
-# Start rate-limiting SLO segment serving after the Nth small segment of a
-# segmented object.
-# rate_limit_after_segment = 10
-#
-# Once segment rate-limiting kicks in for an object, limit segments served
-# to N per second. 0 means no rate-limiting.
-# rate_limit_segments_per_sec = 1
-#
-# Time limit on GET requests (seconds)
-# max_get_time = 86400
-
-# Note: Put after auth and staticweb in the pipeline.
-# If you don't put it in the pipeline, it will be inserted for you.
-[filter:dlo]
-use = egg:swift#dlo
-# Start rate-limiting DLO segment serving after the Nth segment of a
-# segmented object.
-# rate_limit_after_segment = 10
-#
-# Once segment rate-limiting kicks in for an object, limit segments served
-# to N per second. 0 means no rate-limiting.
-# rate_limit_segments_per_sec = 1
-#
-# Time limit on GET requests (seconds)
-# max_get_time = 86400
-
-# Note: Put after auth in the pipeline.
-[filter:container-quotas]
-use = egg:swift#container_quotas
-
-# Note: Put after auth in the pipeline.
-[filter:account-quotas]
-use = egg:swift#account_quotas
-
-[filter:gatekeeper]
-use = egg:swift#gatekeeper
-# Set this to false if you want to allow clients to set arbitrary X-Timestamps
-# on uploaded objects. This may be used to preserve timestamps when migrating
-# from a previous storage system, but risks allowing users to upload
-# difficult-to-delete data.
-# shunt_inbound_x_timestamp = true
-#
-# You can override the default log routing for this filter here:
-# set log_name = gatekeeper
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-
-[filter:container_sync]
-use = egg:swift#container_sync
-# Set this to false if you want to disallow any full url values to be set for
-# any new X-Container-Sync-To headers. This will keep any new full urls from
-# coming in, but won't change any existing values already in the cluster.
-# Updating those will have to be done manually, as knowing what the true realm
-# endpoint should be cannot always be guessed.
-# allow_full_urls = true
-# Set this to specify this clusters //realm/cluster as "current" in /info
-# current = //REALM/CLUSTER
-
-# Note: Put it at the beginning of the pipeline to profile all middleware. But
-# it is safer to put this after catch_errors, gatekeeper and healthcheck.
-[filter:xprofile]
-use = egg:swift#xprofile
-# This option enable you to switch profilers which should inherit from python
-# standard profiler. Currently the supported value can be 'cProfile',
-# 'eventlet.green.profile' etc.
-# profile_module = eventlet.green.profile
-#
-# This prefix will be used to combine process ID and timestamp to name the
-# profile data file. Make sure the executing user has permission to write
-# into this path (missing path segments will be created, if necessary).
-# If you enable profiling in more than one type of daemon, you must override
-# it with an unique value like: /var/log/swift/profile/proxy.profile
-# log_filename_prefix = /tmp/log/swift/profile/default.profile
-#
-# the profile data will be dumped to local disk based on above naming rule
-# in this interval.
-# dump_interval = 5.0
-#
-# Be careful, this option will enable profiler to dump data into the file with
-# time stamp which means there will be lots of files piled up in the directory.
-# dump_timestamp = false
-#
-# This is the path of the URL to access the mini web UI.
-# path = /__profile__
-#
-# Clear the data when the wsgi server shutdown.
-# flush_at_shutdown = false
-#
-# unwind the iterator of applications
-# unwind = false
-
-# Note: Put after slo, dlo in the pipeline.
-# If you don't put it in the pipeline, it will be inserted automatically.
-[filter:versioned_writes]
-use = egg:swift#versioned_writes
-# Enables using versioned writes middleware and exposing configuration
-# settings via HTTP GET /info.
-# WARNING: Setting this option bypasses the "allow_versions" option
-# in the container configuration file, which will be eventually
-# deprecated. See documentation for more details.
-# allow_versioned_writes = false
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/rsyncd.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/rsyncd.conf
deleted file mode 100644
index 703c55eb..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/rsyncd.conf
+++ /dev/null
@@ -1,23 +0,0 @@
-uid = swift
-gid = swift
-log file = /var/log/rsyncd.log
-pid file = /var/run/rsyncd.pid
-address = {{ internal_ip }}
-
-[account]
-max connections = 2
-path = /srv/node/
-read only = False
-lock file = /var/lock/account.lock
-
-[container]
-max connections = 2
-path = /srv/node/
-read only = False
-lock file = /var/lock/container.lock
-
-[object]
-max connections = 2
-path = /srv/node/
-read only = False
-lock file = /var/lock/object.lock
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/swift.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/swift.conf
deleted file mode 100644
index 9a31501b..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/swift.conf
+++ /dev/null
@@ -1,183 +0,0 @@
-[swift-hash]
-
-# swift_hash_path_suffix and swift_hash_path_prefix are used as part of the
-# the hashing algorithm when determining data placement in the cluster.
-# These values should remain secret and MUST NOT change
-# once a cluster has been deployed.
-# Use only printable chars (python -c "import string; print(string.printable)")
-
-swift_hash_path_suffix = 7c6a7cd34d07aed5
-swift_hash_path_prefix = 0c4629166f4de441
-
-# storage policies are defined here and determine various characteristics
-# about how objects are stored and treated. Policies are specified by name on
-# a per container basis. Names are case-insensitive. The policy index is
-# specified in the section header and is used internally. The policy with
-# index 0 is always used for legacy containers and can be given a name for use
-# in metadata however the ring file name will always be 'object.ring.gz' for
-# backwards compatibility. If no policies are defined a policy with index 0
-# will be automatically created for backwards compatibility and given the name
-# Policy-0. A default policy is used when creating new containers when no
-# policy is specified in the request. If no other policies are defined the
-# policy with index 0 will be declared the default. If multiple policies are
-# defined you must define a policy with index 0 and you must specify a
-# default. It is recommended you always define a section for
-# storage-policy:0. Aliases are not required when defining a storage policy.
-#
-# A 'policy_type' argument is also supported but is not mandatory. Default
-# policy type 'replication' is used when 'policy_type' is unspecified.
-[storage-policy:0]
-name = Policy-0
-default = yes
-#policy_type = replication
-aliases = yellow, orange
-
-# the following section would declare a policy called 'silver', the number of
-# replicas will be determined by how the ring is built. In this example the
-# 'silver' policy could have a lower or higher # of replicas than the
-# 'Policy-0' policy above. The ring filename will be 'object-1.ring.gz'. You
-# may only specify one storage policy section as the default. If you changed
-# this section to specify 'silver' as the default, when a client created a new
-# container w/o a policy specified, it will get the 'silver' policy because
-# this config has specified it as the default. However if a legacy container
-# (one created with a pre-policy version of swift) is accessed, it is known
-# implicitly to be assigned to the policy with index 0 as opposed to the
-# current default. Note that even without specifying any aliases, a policy
-# always has at least the default name stored in aliases because this field is
-# used to contain all human readable names for a storage policy.
-#
-#[storage-policy:1]
-#name = silver
-#policy_type = replication
-
-# The following declares a storage policy of type 'erasure_coding' which uses
-# Erasure Coding for data reliability. Please refer to Swift documentation for
-# details on how the 'erasure_coding' storage policy is implemented.
-#
-# Swift uses PyECLib, a Python Erasure coding API library, for encode/decode
-# operations. Please refer to Swift documentation for details on how to
-# install PyECLib.
-#
-# When defining an EC policy, 'policy_type' needs to be 'erasure_coding' and
-# EC configuration parameters 'ec_type', 'ec_num_data_fragments' and
-# 'ec_num_parity_fragments' must be specified. 'ec_type' is chosen from the
-# list of EC backends supported by PyECLib. The ring configured for the
-# storage policy must have it's "replica" count configured to
-# 'ec_num_data_fragments' + 'ec_num_parity_fragments' - this requirement is
-# validated when services start. 'ec_object_segment_size' is the amount of
-# data that will be buffered up before feeding a segment into the
-# encoder/decoder. More information about these configuration options and
-# supported `ec_type` schemes is available in the Swift documentation. Please
-# refer to Swift documentation for details on how to configure EC policies.
-#
-# The example 'deepfreeze10-4' policy defined below is a _sample_
-# configuration with an alias of 'df10-4' as well as 10 'data' and 4 'parity'
-# fragments. 'ec_type' defines the Erasure Coding scheme.
-# 'liberasurecode_rs_vand' (Reed-Solomon Vandermonde) is used as an example
-# below.
-#
-#[storage-policy:2]
-#name = deepfreeze10-4
-#aliases = df10-4
-#policy_type = erasure_coding
-#ec_type = liberasurecode_rs_vand
-#ec_num_data_fragments = 10
-#ec_num_parity_fragments = 4
-#ec_object_segment_size = 1048576
-
-
-# The swift-constraints section sets the basic constraints on data
-# saved in the swift cluster. These constraints are automatically
-# published by the proxy server in responses to /info requests.
-
-[swift-constraints]
-
-# max_file_size is the largest "normal" object that can be saved in
-# the cluster. This is also the limit on the size of each segment of
-# a "large" object when using the large object manifest support.
-# This value is set in bytes. Setting it to lower than 1MiB will cause
-# some tests to fail. It is STRONGLY recommended to leave this value at
-# the default (5 * 2**30 + 2).
-
-#max_file_size = 5368709122
-
-
-# max_meta_name_length is the max number of bytes in the utf8 encoding
-# of the name portion of a metadata header.
-
-#max_meta_name_length = 128
-
-
-# max_meta_value_length is the max number of bytes in the utf8 encoding
-# of a metadata value
-
-#max_meta_value_length = 256
-
-
-# max_meta_count is the max number of metadata keys that can be stored
-# on a single account, container, or object
-
-#max_meta_count = 90
-
-
-# max_meta_overall_size is the max number of bytes in the utf8 encoding
-# of the metadata (keys + values)
-
-#max_meta_overall_size = 4096
-
-# max_header_size is the max number of bytes in the utf8 encoding of each
-# header. Using 8192 as default because eventlet use 8192 as max size of
-# header line. This value may need to be increased when using identity
-# v3 API tokens including more than 7 catalog entries.
-# See also include_service_catalog in proxy-server.conf-sample
-# (documented in overview_auth.rst)
-
-#max_header_size = 8192
-
-
-# By default the maximum number of allowed headers depends on the number of max
-# allowed metadata settings plus a default value of 32 for regular http
-# headers. If for some reason this is not enough (custom middleware for
-# example) it can be increased with the extra_header_count constraint.
-
-#extra_header_count = 0
-
-
-# max_object_name_length is the max number of bytes in the utf8 encoding
-# of an object name
-
-#max_object_name_length = 1024
-
-
-# container_listing_limit is the default (and max) number of items
-# returned for a container listing request
-
-#container_listing_limit = 10000
-
-
-# account_listing_limit is the default (and max) number of items returned
-# for an account listing request
-#account_listing_limit = 10000
-
-
-# max_account_name_length is the max number of bytes in the utf8 encoding
-# of an account name
-
-#max_account_name_length = 256
-
-
-# max_container_name_length is the max number of bytes in the utf8 encoding
-# of a container name
-
-#max_container_name_length = 256
-
-
-# By default all REST API calls should use "v1" or "v1.0" as the version string,
-# for example "/v1/account". This can be manually overridden to make this
-# backward-compatible, in case a different version string has been used before.
-# Use a comma-separated list in case of multiple allowed versions, for example
-# valid_api_versions = v0,v1,v2
-# This is only enforced for account, container and object requests. The allowed
-# api versions are by default excluded from /info.
-
-# valid_api_versions = v1,v1.0
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/vars/Debian.yml
deleted file mode 100644
index 39aea32d..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/vars/Debian.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-controller_packages:
- - swift
- - swift-proxy
- - python-swiftclient
- - python-keystoneclient
- - memcached
-
-compute_packages:
- - xfsprogs
- - rsync
- - swift
- - swift-account
- - swift-container
- - swift-object
-
-
-services: []
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/tacker/templates/tacker.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/tacker/templates/tacker.j2
deleted file mode 100644
index ae0f644a..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/tacker/templates/tacker.j2
+++ /dev/null
@@ -1,426 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = True
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = True
-
-# Where to store Tacker state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/tacker
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-auth_strategy = keystone
-policy_file = /usr/local/etc/tacker/policy.json
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-# log_dir =
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ internal_ip }}
-
-# Port the bind the API server to
-bind_port = 8888
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of tacker.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Tacker core plugin entrypoint to be loaded from the
-# tacker.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the tacker source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-# core_plugin =
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# tacker.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the tacker source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-
-service_plugins = vnfm,nfvo
-
-# Paste configuration file
-# api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-# auth_strategy = keystone
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Tacker is
-# being used in conjunction with nova security groups
-# allow_overlapping_ips = False
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = tacker.openstack.common.rpc.impl_kombu
-# Size of RPC thread pool
-# rpc_thread_pool_size = 64
-# Size of RPC connection pool
-# rpc_conn_pool_size = 30
-# Seconds to wait for a response from call or multicall
-# rpc_response_timeout = 60
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-# rpc_cast_timeout = 30
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = tacker.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = tacker
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# IP address of the RabbitMQ installation
-# rabbit_host = localhost
-# Password of the RabbitMQ server
-# rabbit_password = guest
-# Port where RabbitMQ server is running/listening
-# rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-# rabbit_userid = guest
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-
-# QPID
-# rpc_backend=tacker.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=tacker.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = tacker.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = tacker.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = tacker.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-# default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-# notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-# agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# tacker server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to tacker server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-# api_workers = 0
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-# rpc_workers = 0
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== tacker nova interactions ==========
-# Send notification to nova when port status is active.
-# notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-# notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-# nova_url = http://127.0.0.1:8774/v3
-
-# Name of nova region to use. Useful if keystone manages more than one region
-# nova_region_name =
-
-# Username for connection to nova in admin context
-# nova_admin_username =
-
-# The uuid of the admin nova tenant
-# nova_admin_tenant_id =
-
-# Password for connection to nova in admin context.
-# nova_admin_password =
-
-# Authorization URL for connection to nova in admin context.
-# nova_admin_auth_url =
-
-# CA file for novaclient to verify server certificates
-# nova_ca_certificates_file =
-
-# Boolean to control ignoring SSL errors on the nova url
-# nova_api_insecure = False
-
-# Number of seconds between sending events to nova if there are any events to send
-# send_events_interval = 2
-
-# ======== end of tacker nova interactions ==========
-
-[agent]
-# Use "sudo tacker-rootwrap /etc/tacker/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = sudo /usr/local/bin/tacker-rootwrap /usr/local/etc/tacker/rootwrap.conf
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-# report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-signing_dir = /var/cache/tacker
-#cafile = /opt/stack/data/ca-bundle.pem
-#project_domain_id = default
-project_name = service
-#user_domain_id = default
-password = console
-username = tacker
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_plugin = password
-identity_uri = http://{{ internal_vip.ip }}:5000
-auth_uri = http://{{ internal_vip.ip }}:5000
-
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/tacker
-connection = mysql://tacker:TACKER_DBPASS@{{ internal_vip.ip }}:3306/tacker?charset=utf8
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main tacker server. (Leave it as is if the database runs on this host.)
-# connection = sqlite://
-# NOTE: In deployment the [database] section and its connection attribute may
-# be set in the corresponding core plugin '.ini' file. However, it is suggested
-# to put the [database] section and its connection attribute in this
-# configuration file.
-
-# Database engine for which script will be generated when using offline
-# migration
-# engine =
-
-# The SQLAlchemy connection string used to connect to the slave database
-# slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-# max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-# retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-# min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-# max_pool_size = 10
-
-# Timeout in seconds before idle sql connections are reaped
-# idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-# max_overflow = 20
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-# connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-# connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-# pool_timeout = 10
-
-[tacker]
-# Specify drivers for hosting device
-# infra_driver = heat,nova,noop
-
-# Specify drivers for mgmt
-# mgmt_driver = noop,openwrt
-
-# Specify drivers for monitoring
-# monitor_driver = ping, http_ping
-
-[nfvo_vim]
-# Supported VIM drivers, resource orchestration controllers such as OpenStack, kvm
-#Default VIM driver is OpenStack
-#vim_drivers = openstack
-#Default VIM placement if vim id is not provided
-default_vim = VIM0
-
-[vim_keys]
-#openstack = /etc/tacker/vim/fernet_keys
-[tacker_nova]
-# parameters for novaclient to talk to nova
-region_name = RegionOne
-#project_domain_id = default
-project_name = service
-#user_domain_id = default
-password = console
-username = nova
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_plugin = password
-
-[tacker_heat]
-heat_uri = http://{{ internal_vip.ip }}:8004/v1
-stack_retries = 60
-stack_retry_wait = 5
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/templates/neutron.conf b/deploy/adapters/ansible/openstack_newton_xenial/templates/neutron.conf
deleted file mode 100644
index 49caa879..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/templates/neutron.conf
+++ /dev/null
@@ -1,112 +0,0 @@
-{% set memcached_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-
-[DEFAULT]
-verbose = {{ VERBOSE }}
-debug = {{ VERBOSE }}
-state_path = /var/lib/neutron
-lock_path = $state_path/lock
-notify_nova_on_port_status_changes = True
-notify_nova_on_port_data_changes = True
-log_dir = /var/log/neutron
-bind_host = {{ network_server_host }}
-bind_port = 9696
-core_plugin = ml2
-service_plugins = router
-api_paste_config = api-paste.ini
-auth_strategy = keystone
-dhcp_lease_duration = 86400
-allow_overlapping_ips = True
-rpc_backend = rabbit
-rpc_thread_pool_size = 240
-rpc_conn_pool_size = 100
-rpc_response_timeout = 300
-rpc_cast_timeout = 300
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-default_notification_level = INFO
-notification_topics = notifications
-agent_down_time = 75
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-api_workers = 8
-rpc_workers = 8
-notify_nova_on_port_status_changes = True
-notify_nova_on_port_data_changes = True
-nova_url = http://{{ internal_vip.ip }}:8774/v3
-nova_region_name = RegionOne
-nova_admin_username = nova
-nova_admin_password = {{ NOVA_PASS }}
-nova_admin_auth_url = http://{{ internal_vip.ip }}:35357/v3
-send_events_interval = 2
-
-[quotas]
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-quota_items = network,subnet,port
-default_quota = -1
-quota_network = 100
-quota_subnet = 100
-quota_port = 8000
-quota_security_group = 1000
-quota_security_group_rule = 1000
-
-[agent]
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-report_interval = 30
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000
-auth_url = http://{{ internal_vip.ip }}:35357
-memcached_servers = {{ memcached_servers }}
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-project_name = service
-username = neutron
-password = {{ NEUTRON_PASS }}
-
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-slave_connection =
-max_retries = 10
-retry_interval = 10
-min_pool_size = 1
-max_pool_size = 100
-idle_timeout = 30
-use_db_reconnect = True
-max_overflow = 100
-connection_debug = 0
-connection_trace = False
-pool_timeout = 10
-
-[service_providers]
-service_provider=FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewllDriver:default
-
-{% if enable_fwaas %}
-[fwaas]
-driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
-enabled = True
-{% endif %}
-
-[nova]
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-project_name = service
-username = nova
-password = {{ NOVA_PASS }}
-
-[oslo_messaging_rabbit]
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-rabbit_port = 5672
-rabbit_userid = {{ RABBIT_USER }}
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/templates/nova.conf b/deploy/adapters/ansible/openstack_newton_xenial/templates/nova.conf
deleted file mode 100644
index 4a7bb0a2..00000000
--- a/deploy/adapters/ansible/openstack_newton_xenial/templates/nova.conf
+++ /dev/null
@@ -1,119 +0,0 @@
-{% set memcached_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-
-[DEFAULT]
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-log-dir=/var/log/nova
-state_path=/var/lib/nova
-force_dhcp_release=True
-verbose={{ VERBOSE }}
-ec2_private_dns_show_ip=True
-enabled_apis=osapi_compute,metadata
-
-auth_strategy = keystone
-my_ip = {{ internal_ip }}
-use_neutron = True
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-transport_url = rabbit://{{ RABBIT_USER }}:{{ RABBIT_PASS }}@{{ rabbit_host }}
-default_floating_pool={{ public_net_info.network }}
-metadata_listen={{ internal_ip }}
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-
-iscsi_helper=tgtadm
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-debug={{ DEBUG }}
-volumes_path=/var/lib/nova/volumes
-rpc_backend = rabbit
-osapi_compute_listen={{ internal_ip }}
-network_api_class = nova.network.neutronv2.api.API
-security_group_api = neutron
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-memcached_servers = {{ memcached_servers }}
-
-[database]
-# The SQLAlchemy connection string used to connect to the database
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-idle_timeout = 30
-pool_timeout = 10
-use_db_reconnect = True
-
-[api_database]
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova_api
-idle_timeout = 30
-pool_timeout = 10
-use_db_reconnect = True
-
-[cinder]
-os_region_name = RegionOne
-
-[oslo_concurrency]
-lock_path=/var/lib/nova/tmp
-
-[libvirt]
-use_virtio_for_bridges=True
-
-[wsgi]
-api_paste_config=/etc/nova/api-paste.ini
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000
-auth_url = http://{{ internal_vip.ip }}:35357
-memcached_servers = {{ memcached_servers }}
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-project_name = service
-username = nova
-password = {{ NOVA_PASS }}
-
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-
-[vnc]
-enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-[glance]
-api_servers = http://{{ internal_vip.ip }}:9292
-host = {{ internal_vip.ip }}
-
-[neutron]
-url = http://{{ internal_vip.ip }}:9696
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-region_name = RegionOne
-project_name = service
-username = neutron
-password = {{ NEUTRON_PASS }}
-service_metadata_proxy = True
-metadata_proxy_shared_secret = {{ METADATA_SECRET }}
-
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ internal_vip.ip }}:35357/v3
-
-[oslo_messaging_rabbit]
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-rabbit_port = 5672
-rabbit_userid = {{ RABBIT_USER }}
-
diff --git a/deploy/adapters/ansible/openstack_osp9/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack_osp9/HA-ansible-multinodes.yml
deleted file mode 100755
index c91bc90a..00000000
--- a/deploy/adapters/ansible/openstack_osp9/HA-ansible-multinodes.yml
+++ /dev/null
@@ -1,265 +0,0 @@
----
-- hosts: all
- remote_user: root
- pre_tasks:
- - name: make sure ssh dir exist
- file:
- path: '{{ item.path }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- state: directory
- mode: 0755
- with_items:
- - path: /root/.ssh
- owner: root
- group: root
-
- - name: write ssh config
- copy:
- content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no"
- dest: '{{ item.dest }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- mode: 0600
- with_items:
- - dest: /root/.ssh/config
- owner: root
- group: root
-
- - name: generate ssh keys
- shell: if [ ! -f ~/.ssh/id_rsa.pub ]; then ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ""; else echo "already gen ssh key!"; fi;
-
- - name: fetch ssh keys
- fetch: src=/root/.ssh/id_rsa.pub dest=/tmp/ssh-keys-{{ ansible_hostname }} flat=yes
-
- - authorized_key:
- user: root
- key: "{{ lookup('file', 'item') }}"
- with_fileglob:
- - /tmp/ssh-keys-*
- max_fail_percentage: 0
- roles:
- - common
-
-- hosts: all
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - setup-network
-
-- hosts: ha
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - ha
-
-- hosts: controller
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - memcached
- - apache
- - database
- - mq
- - keystone
- - nova-controller
- - neutron-controller
- - cinder-controller
- - glance
- - neutron-common
- - neutron-network
- - ceilometer_controller
-# - ext-network
- - dashboard
- - heat
- - aodh
-
-- hosts: all
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - storage
-
-- hosts: compute
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - nova-compute
- - neutron-compute
- - cinder-volume
- - ceilometer_compute
-
-- hosts: all
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - secgroup
-
-- hosts: ceph_adm
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles: []
- # - ceph-deploy
-
-- hosts: ceph
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-purge
- - ceph-config
-
-- hosts: ceph_mon
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-mon
-
-- hosts: ceph_osd
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-osd
-
-- hosts: ceph
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - ceph-openstack
-
-- hosts: all
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - monitor
-
-
-- hosts: all
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- tasks:
- - name: set bash to nova
- user:
- name: nova
- shell: /bin/bash
-
- - name: make sure ssh dir exist
- file:
- path: '{{ item.path }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- state: directory
- mode: 0755
- with_items:
- - path: /var/lib/nova/.ssh
- owner: nova
- group: nova
-
- - name: copy ssh keys for nova
- shell: cp -rf /root/.ssh/id_rsa /var/lib/nova/.ssh;
-
- - name: write ssh config
- copy:
- content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no"
- dest: '{{ item.dest }}'
- owner: '{{ item.owner }}'
- group: '{{ item.group }}'
- mode: 0600
- with_items:
- - dest: /var/lib/nova/.ssh/config
- owner: nova
- group: nova
-
- - authorized_key:
- user: nova
- key: "{{ lookup('file', 'item') }}"
- with_fileglob:
- - /tmp/ssh-keys-*
-
- - name: chown ssh file
- shell: chown -R nova:nova /var/lib/nova/.ssh;
-
-
-- hosts: all
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - odl_cluster
-
-- hosts: all
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - onos_cluster
-
-- hosts: all
- remote_user: root
- sudo: True
- max_fail_percentage: 0
- roles:
- - open-contrail
-
-- hosts: all
- remote_user: root
- #accelerate: true
- serial: 1
- max_fail_percentage: 0
- roles:
- - odl_cluster_neutron
-
-- hosts: all
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - odl_cluster_post
-
-- hosts: controller
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - ext-network
-
-- hosts: controller
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - tacker
-
-- hosts: controller
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - boot-recovery
-
-- hosts: controller
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - controller-recovery
-
-- hosts: compute
- remote_user: root
- #accelerate: true
- max_fail_percentage: 0
- roles:
- - compute-recovery
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/aodh/handlers/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/aodh/handlers/main.yml
deleted file mode 100755
index b3399e0c..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/aodh/handlers/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart aodh services
- service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/aodh/tasks/aodh_config.yml b/deploy/adapters/ansible/openstack_osp9/roles/aodh/tasks/aodh_config.yml
deleted file mode 100755
index e60d5338..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/aodh/tasks/aodh_config.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: aodh db sync
- shell: su -s /bin/sh -c "aodh-dbsync" aodh
- notify:
- - restart aodh services
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/aodh/tasks/aodh_install.yml b/deploy/adapters/ansible/openstack_osp9/roles/aodh/tasks/aodh_install.yml
deleted file mode 100755
index eb51fbea..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/aodh/tasks/aodh_install.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: install aodh packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: update aodh conf
- template: src={{ item }} dest=/etc/aodh/aodh.conf
- backup=yes
- with_items:
- - aodh.conf.j2
-# - api_paste.ini.j2
-# - policy.json.j2
- notify:
- - restart aodh services
-
-- name: write services to monitor list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: remove default sqlite db
- shell: rm /var/lib/aodh/aodh.sqlite || touch aodh.sqllite.db.removed
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/aodh/tasks/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/aodh/tasks/main.yml
deleted file mode 100755
index 9b61915f..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/aodh/tasks/main.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include: aodh_install.yml
- tags:
- - install
- - aodh_install
- - aodh
-
-- include: aodh_config.yml
- when: inventory_hostname == groups['controller'][0]
- tags:
- - config
- - aodh_config
- - aodh
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/aodh/templates/aodh.conf.j2 b/deploy/adapters/ansible/openstack_osp9/roles/aodh/templates/aodh.conf.j2
deleted file mode 100755
index d4d232be..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/aodh/templates/aodh.conf.j2
+++ /dev/null
@@ -1,46 +0,0 @@
-{% set memcached_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-
-[DEFAULT]
-bind_host = {{ internal_ip }}
-bind_port = 8042
-rpc_backend = rabbit
-auth_strategy = keystone
-debug = True
-
-[oslo_messaging_rabbit]
-rabbit_hosts = {{ internal_vip.ip }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-#rabbit_use_ssl = false
-
-[database]
-connection = mysql://aodh:{{ AODH_DBPASS }}@{{ db_host }}/aodh
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000
-auth_url = http://{{ internal_vip.ip }}:35357
-identity_uri = http://{{ internal_vip.ip }}:35357
-auth_plugin = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = aodh
-password = {{ AODH_PASS }}
-memcached_servers = {{ memcached_servers }}
-token_cache_time = 300
-revocation_cache_time = 60
-
-[service_credentials]
-os_auth_url = http://{{ internal_vip.ip }}:5000/v2.0
-os_username = aodh
-os_tenant_name = service
-os_password = {{ AODH_PASS }}
-os_endpoint_type = internalURL
-os_region_name = RegionOne
-
-[api]
-host = {{ internal_ip }}
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/aodh/templates/api_paste.ini.j2 b/deploy/adapters/ansible/openstack_osp9/roles/aodh/templates/api_paste.ini.j2
deleted file mode 100755
index 151789c4..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/aodh/templates/api_paste.ini.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-# aodh API WSGI Pipeline
-# Define the filters that make up the pipeline for processing WSGI requests
-# Note: This pipeline is PasteDeploy's term rather than aodh's pipeline
-# used for processing samples
-
-# Remove authtoken from the pipeline if you don't want to use keystone authentication
-[pipeline:main]
-pipeline = cors request_id authtoken api-server
-
-[app:api-server]
-paste.app_factory = aodh.api.app:app_factory
-
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-oslo_config_project = aodh
-
-[filter:request_id]
-paste.filter_factory = oslo_middleware:RequestId.factory
-
-[filter:cors]
-paste.filter_factory = oslo_middleware.cors:filter_factory
-oslo_config_project = aodh
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/aodh/templates/policy.json.j2 b/deploy/adapters/ansible/openstack_osp9/roles/aodh/templates/policy.json.j2
deleted file mode 100755
index 4fd873e9..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/aodh/templates/policy.json.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-{
- "context_is_admin": "role:admin",
- "segregation": "rule:context_is_admin",
- "admin_or_owner": "rule:context_is_admin or project_id:%(project_id)s",
- "default": "rule:admin_or_owner",
-
- "telemetry:get_alarm": "rule:admin_or_owner",
- "telemetry:get_alarms": "rule:admin_or_owner",
- "telemetry:query_alarm": "rule:admin_or_owner",
-
- "telemetry:create_alarm": "",
- "telemetry:change_alarm": "rule:admin_or_owner",
- "telemetry:delete_alarm": "rule:admin_or_owner",
-
- "telemetry:get_alarm_state": "rule:admin_or_owner",
- "telemetry:change_alarm_state": "rule:admin_or_owner",
-
- "telemetry:alarm_history": "rule:admin_or_owner",
- "telemetry:query_alarm_history": "rule:admin_or_owner"
-}
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/aodh/vars/Debian.yml b/deploy/adapters/ansible/openstack_osp9/roles/aodh/vars/Debian.yml
deleted file mode 100755
index bdf4655e..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/aodh/vars/Debian.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-#############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#############################################################################
----
-packages:
- - aodh-api
- - aodh-evaluator
- - aodh-notifier
- - aodh-listener
- - aodh-expirer
- - python-ceilometerclient
-
-services:
- - aodh-api
- - aodh-notifier
- - aodh-evaluator
- - aodh-listener
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/aodh/vars/RedHat.yml b/deploy/adapters/ansible/openstack_osp9/roles/aodh/vars/RedHat.yml
deleted file mode 100755
index a0381c6b..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/aodh/vars/RedHat.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-#############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#############################################################################
----
-packages:
- - openstack-aodh-api
- - openstack-aodh-evaluator
- - openstack-aodh-notifier
- - openstack-aodh-listener
- - openstack-aodh-expirer
- - python-ceilometerclient
-
-services:
- - openstack-aodh-api
- - openstack-aodh-notifier
- - openstack-aodh-evaluator
- - openstack-aodh-listener
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/aodh/vars/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/aodh/vars/main.yml
deleted file mode 100755
index b17f6ed0..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/aodh/vars/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-##############################################################################
-## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-##
-## All rights reserved. This program and the accompanying materials
-## are made available under the terms of the Apache License, Version 2.0
-## which accompanies this distribution, and is available at
-## http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
----
-packages_noarch: []
-
-services_noarch: []
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/apache/files/index.html b/deploy/adapters/ansible/openstack_osp9/roles/apache/files/index.html
deleted file mode 100755
index f083c4f1..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/apache/files/index.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
- <head>
- <title>Index</title>
- </head>
- <body>
- <a href="/horizon">Openstack Dashboard</a>
- </body>
-</html>
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/apache/tasks/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/apache/tasks/main.yml
deleted file mode 100755
index 44407bef..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/apache/tasks/main.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: install packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes"
- with_items: packages | union(packages_noarch)
-
-- name: assure listen port exist
- template:
- dest: '{{ apache_config_dir }}/ports.conf'
- src: ports.conf.j2
- notify:
- - restart apache related services
-
-- name: remove default listen port on centos
- lineinfile:
- dest: /etc/httpd/conf/httpd.conf
- state: absent
- regexp: 'Listen 80'
- when: ansible_os_family == 'RedHat'
-
-- name: copy index.html file
- copy: src=index.html dest=/var/www/html/index.html mode=0644
- when: ansible_os_family == 'RedHat'
-
-- name: copy index.html file
- copy: src=index.html dest=/var/www/index.html mode=0644
- when: ansible_os_family == 'Debian'
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/ceilometer_controller/vars/Debian.yml b/deploy/adapters/ansible/openstack_osp9/roles/ceilometer_controller/vars/Debian.yml
deleted file mode 100755
index b749ffaa..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/ceilometer_controller/vars/Debian.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-ceilometer_packages:
- - ceilometer-api
- - ceilometer-collector
- - ceilometer-agent-central
- - ceilometer-agent-notification
-# - ceilometer-alarm-evaluator
-# - ceilometer-alarm-notifier
- - python-ceilometerclient
-
-ceilometer_services:
- - ceilometer-agent-central
- - ceilometer-agent-notification
- - ceilometer-api
- - ceilometer-collector
-# - ceilometer-alarm-evaluator
-# - ceilometer-alarm-notifier
-
-ceilometer_configs_templates:
- - src: ceilometer.j2
- dest:
- - /etc/ceilometer/ceilometer.conf
- - src: cinder.j2
- dest:
- - /etc/cinder/cinder.conf
- - src: glance.j2
- dest:
- - /etc/glance/glance-api.conf
- - /etc/glance/glance-registry.conf
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/ceilometer_controller/vars/RedHat.yml b/deploy/adapters/ansible/openstack_osp9/roles/ceilometer_controller/vars/RedHat.yml
deleted file mode 100755
index 6c5f53ec..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/ceilometer_controller/vars/RedHat.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-ceilometer_packages:
- - openstack-ceilometer-api
- - openstack-ceilometer-collector
- - openstack-ceilometer-central
- - openstack-ceilometer-notification
-# - openstack-ceilometer-alarm
- - python-ceilometerclient
-
-ceilometer_services:
- - openstack-ceilometer-central
- - openstack-ceilometer-notification
- - openstack-ceilometer-api
- - openstack-ceilometer-collector
-# - openstack-ceilometer-alarm-evaluator
-# - openstack-ceilometer-alarm-notifier
-
-ceilometer_configs_templates:
- - src: ceilometer.j2
- dest:
- - /etc/ceilometer/ceilometer.conf
- - src: cinder.j2
- dest:
- - /etc/cinder/cinder.conf
- - src: glance.j2
- dest:
- - /etc/glance/glance-api.conf
- - /etc/glance/glance-registry.conf
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/ceph-mon/tasks/install_mon.yml b/deploy/adapters/ansible/openstack_osp9/roles/ceph-mon/tasks/install_mon.yml
deleted file mode 100755
index 0ad666a6..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/ceph-mon/tasks/install_mon.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: Create a default data directory
- file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}" state="directory"
-
-- name: Populate the monitor daemon
- shell: "ceph-mon --mkfs -i {{ inventory_hostname }} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring"
-
-- name: Change ceph/mon dir owner to ceph
- shell: "chown -R ceph:ceph /var/lib/ceph/mon"
- when: ansible_os_family == "Debian"
-
-- name: Touch the done and auto start file
- file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}/{{ item }}" state="touch"
- with_items:
- - "done"
- - "{{ ceph_start_type }}"
-
-- name: start mon daemon
- shell: "{{ ceph_start_script }}"
-
-- name: wait for creating osd keyring
- wait_for: path=/var/lib/ceph/bootstrap-osd/ceph.keyring
-
-- name: fetch osd keyring
- fetch: src="/var/lib/ceph/bootstrap-osd/ceph.keyring" dest="/tmp/ceph.osd.keyring" flat=yes
- run_once: True
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/ceph-openstack/tasks/ceph_openstack_post.yml b/deploy/adapters/ansible/openstack_osp9/roles/ceph-openstack/tasks/ceph_openstack_post.yml
deleted file mode 100755
index 2097ca57..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/ceph-openstack/tasks/ceph_openstack_post.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-##############################################################################
-## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-##
-## All rights reserved. This program and the accompanying materials
-## are made available under the terms of the Apache License, Version 2.0
-## which accompanies this distribution, and is available at
-## http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
----
-- name: get mount info
- command: mount
- register: mount_info
-
-- name: try unmount image nfs directory
- shell: |
- umount /var/lib/glance/images
- sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
- when: mount_info.stdout.find('images') != -1
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/ceph-openstack/tasks/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/ceph-openstack/tasks/main.yml
deleted file mode 100755
index 06c3acb6..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/ceph-openstack/tasks/main.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- include_vars: "{{ ansible_os_family }}.yml"
- tags:
- - ceph_deploy
- - ceph_openstack_pre
- - ceph_openstack_conf
- - ceph_openstack_post
- - ceph_openstack
-
-- include: ceph_openstack_pre.yml
- tags:
- - ceph_deploy
- - ceph_openstack_pre
- - ceph_openstack
-
-- include: ceph_openstack_conf.yml
- tags:
- - ceph_deploy
- - ceph_openstack_conf
- - ceph_openstack
-
-- include: ceph_openstack_post.yml
- tags:
- - ceph_deploy
- - ceph_openstack_post
- - ceph_openstack
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/ceph-osd/tasks/install_osd.yml b/deploy/adapters/ansible/openstack_osp9/roles/ceph-osd/tasks/install_osd.yml
deleted file mode 100755
index 35e84cf8..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/ceph-osd/tasks/install_osd.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: create osd lv and mount it on /var/local/osd
- script: create_osd.sh
-
-- name: fetch osd keyring from ceph_adm
- fetch: src="/var/lib/ceph/bootstrap-osd/ceph.keyring" dest="/tmp/ceph.osd.keyring" flat=yes
- delegate_to: "{{ public_vip.ip }}"
- when: compute_expansion
-
-- name: copy osd keyring
- copy: src="/tmp/ceph.osd.keyring" dest="/var/lib/ceph/bootstrap-osd/ceph.keyring"
-
-- name: prepare osd disk
- shell: ceph-disk prepare --fs-type xfs /var/local/osd
-
-- name: change local/osd dir owner to ceph
- shell: chown ceph:ceph /var/local/osd
- when: ansible_os_family == "Debian"
-
-- name: activate osd node
- shell: ceph-disk activate /var/local/osd
-
-- name: enable ceph service
- service: name=ceph enabled=yes
-
-- name: rebuild osd after reboot
- lineinfile: dest=/etc/init/ceph-osd-all-starter.conf insertafter="^task" line="pre-start script\n set -e\n /opt/setup_storage/losetup.sh\n sleep 3\n mount /dev/storage-volumes/ceph0 /var/local/osd\nend script"
- when: ansible_os_family == "Debian"
-
-- name: rebuild osd after reboot for centos
- lineinfile: dest=/etc/init.d/ceph insertafter="^### END INIT INFO" line="\nsleep 1\nmount /dev/storage-volumes/ceph0 /var/local/osd"
- when: ansible_os_family == "RedHat"
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/ceph-purge/tasks/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/ceph-purge/tasks/main.yml
deleted file mode 100755
index 02013762..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/ceph-purge/tasks/main.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- name: clear tmp files
- local_action: shell rm -rf /tmp/ceph*
- tags:
- - ceph_purge
- - ceph_deploy
-
-- name: install ceph-related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items:
- - ceph-deploy
- tags:
- - ceph_purge
- - ceph_deploy
- when: ansible_os_family == "Debian"
-
-- name: purge ceph
- shell: "ceph-deploy purge {{ inventory_hostname }}; ceph-deploy purgedata {{ inventory_hostname }}; ceph-deploy forgetkeys"
- tags:
- - ceph_purge
- - ceph_deploy
- when: ansible_os_family == "Debian"
-
-- name: remove monmap
- file: path="/tmp/monmap" state="absent"
- tags:
- - ceph_purge
- - ceph_deploy
-
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/common/tasks/RedHat.yml b/deploy/adapters/ansible/openstack_osp9/roles/common/tasks/RedHat.yml
deleted file mode 100755
index b9f01255..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/common/tasks/RedHat.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: add yum repository for openstack
- template: src=openstack_ppa_repo.repo.j2 dest=/etc/yum.repos.d/openstack_ppa_repo.repo
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/common/tasks/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/common/tasks/main.yml
deleted file mode 100755
index 0f4cf334..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/common/tasks/main.yml
+++ /dev/null
@@ -1,96 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: speed up ansible by purging landscape-common
- apt: pkg=landscape-common state=absent purge=yes
- when: ansible_os_family == "Debian"
-
-- name: update hosts files to all hosts
- template: src=hosts dest=/etc/hosts backup=yes
-
-- name: get compass-core hostname
- local_action: shell hostname
- register: name
-
-- name: get compass-core addr
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: COMPASS_SERVER
-
-- name: run redhat specific play if os is redhat
- include: RedHat.yml
- when: ansible_distribution == "RedHat"
-
-- name: update compass-core name and ip to hosts files
- shell: |
- echo "# compass" >> /etc/hosts
- echo {{ COMPASS_SERVER.stdout_lines[0] }} {{ name.stdout_lines[0] }} >> /etc/hosts
-
-- name: install python-crypto
- yum: name=python-crypto state=present
- register: python_crypto_result
- ignore_errors: yes
- when: ansible_os_family == "RedHat"
-
-- name: remove python crypt egg file to work-around https://bugs.centos.org/view.php?id=9896&nbn=2
- shell: rm -rf /usr/lib64/python2.7/site-packages/pycrypto-2.6.1-py2.7.egg-info
- when: ansible_os_family == "RedHat" and python_crypto_result.msg == "Error unpacking rpm package python2-crypto-2.6.1-9.el7.x86_64\n"
-
-- name: install packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes"
- with_items: packages | union(packages_noarch)
-
-- name: make config template dir exist
- file: path=/opt/os_templates state=directory mode=0755
-
-- name: create pip config directory
- file: path=~/.pip state=directory
-
-- name: update pip.conf
- template: src=pip.conf dest=~/.pip/{{ pip_conf }}
-
-- name: install pip packages
- pip: name={{ item }} state=present extra_args='--pre'
- with_items: pip_packages
-
-- name: install keyczar for accelerate
- pip: name=python-keyczar state=present extra_args='--pre'
- delegate_to: 127.0.0.1
- run_once: true
-
-- name: update ntp conf
- template: src=ntp.conf dest=/etc/ntp.conf backup=yes
-
-- name: use ntpdate once for initial sync time
- shell: ntpdate {{ ntp_server }}
- ignore_errors: True
-
-- name: sync sys clock to hard clock
- shell: hwclock --systohc
- ignore_errors: True
-
-- name: create fireball keys dir
- file: path=~/.fireball.keys state=directory mode=0700
- delegate_to: 127.0.0.1
- run_once: true
-
-- name: restart services
- service: name={{ item }} state=restarted enabled=yes
- with_items: services| union(services_noarch)
-
-- name: write services to monitor list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services| union(services_noarch)
-
-- name: kill daemon for accelerate
- shell: lsof -ni :5099|grep LISTEN|awk '{print $2}'|xargs kill -9
- ignore_errors: true
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/common/templates/hosts b/deploy/adapters/ansible/openstack_osp9/roles/common/templates/hosts
deleted file mode 100755
index 6f76de51..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/common/templates/hosts
+++ /dev/null
@@ -1,7 +0,0 @@
-
-# localhost
-127.0.0.1 localhost
-# controller
-172.16.1.1 host1
-# compute
-172.16.1.1 host1
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/common/templates/ntp.conf b/deploy/adapters/ansible/openstack_osp9/roles/common/templates/ntp.conf
deleted file mode 100755
index 2d560be2..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/common/templates/ntp.conf
+++ /dev/null
@@ -1,54 +0,0 @@
-# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
-
-driftfile /var/lib/ntp/ntp.drift
-
-
-# Enable this if you want statistics to be logged.
-#statsdir /var/log/ntpstats/
-
-statistics loopstats peerstats clockstats
-filegen loopstats file loopstats type day enable
-filegen peerstats file peerstats type day enable
-filegen clockstats file clockstats type day enable
-
-# Specify one or more NTP servers.
-
-# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board
-# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for
-# more information.
-server {{ ntp_server }}
-server {{ internal_vip.ip }}
-
-# Use local server as a fallback.
-server 127.127.1.0 # local clock
-fudge 127.127.1.0 stratum 10
-
-# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for
-# details. The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions>
-# might also be helpful.
-#
-# Note that "restrict" applies to both servers and clients, so a configuration
-# that might be intended to block requests from certain clients could also end
-# up blocking replies from your own upstream servers.
-
-# By default, exchange time with everybody, but don't allow configuration.
-restrict -4 default kod notrap nomodify
-restrict -6 default kod notrap nomodify
-
-# Local users may interrogate the ntp server more closely.
-restrict 127.0.0.1
-restrict ::1
-
-# Clients from this (example!) subnet have unlimited access, but only if
-# cryptographically authenticated.
-#restrict 192.168.123.0 mask 255.255.255.0 notrust
-
-
-# If you want to provide time to your local subnet, change the next line.
-# (Again, the address is an example only.)
-#broadcast 192.168.123.255
-
-# If you want to listen to time broadcasts on your local subnet, de-comment the
-# next lines. Please do this only if you trust everybody on the network!
-#disable auth
-#broadcastclient
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/common/templates/openstack_ppa_repo.repo.j2 b/deploy/adapters/ansible/openstack_osp9/roles/common/templates/openstack_ppa_repo.repo.j2
deleted file mode 100644
index 148f3e14..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/common/templates/openstack_ppa_repo.repo.j2
+++ /dev/null
@@ -1,7 +0,0 @@
-[openstack_ppa_repo]
-name=rhel - openstack_repo
-proxy=_none_
-baseurl=http://{{ COMPASS_SERVER.stdout_lines[0] }}/cblr/repo_mirror/redhat7-osp9-ppa
-enabled=1
-gpgcheck=0
-skip_if_unavailable=1
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/common/templates/pip.conf b/deploy/adapters/ansible/openstack_osp9/roles/common/templates/pip.conf
deleted file mode 100755
index 7bb3e43e..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/common/templates/pip.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-[global]
-find-links = http://{{ COMPASS_SERVER.stdout_lines[0] }}/pip
-no-index = true
-[install]
-trusted-host={{ COMPASS_SERVER.stdout_lines[0] }}
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/common/vars/Debian.yml b/deploy/adapters/ansible/openstack_osp9/roles/common/vars/Debian.yml
deleted file mode 100755
index 1d7972eb..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/common/vars/Debian.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - ubuntu-cloud-keyring
- - python-dev
- - openvswitch-datapath-dkms
- - openvswitch-switch
- - python-memcache
- - python-iniparse
- - python-lxml
- #- python-d* #TODO, need remove
-
-pip_packages:
- - crudini
- - python-keyczar
- - yang2tosca
-
-pip_conf: pip.conf
-
-services:
- - ntp
-
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/common/vars/RedHat.yml b/deploy/adapters/ansible/openstack_osp9/roles/common/vars/RedHat.yml
deleted file mode 100755
index 8143e1cb..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/common/vars/RedHat.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - openvswitch
- - python-devel
- - python-memcached
- - gcc
- - redhat-lsb-core
- - python-crypto
-
-pip_packages:
- - crudini
- - python-keyczar
-
-pip_conf: pip.conf
-
-services:
- - openvswitch
- - ntpd
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/common/vars/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/common/vars/main.yml
deleted file mode 100755
index 713b6b5f..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/common/vars/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch:
- - python-pip
- - ntp
-
-services_noarch: []
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/handlers/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/dashboard/handlers/main.yml
deleted file mode 100755
index 62e0b8e5..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/handlers/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart dashboard services
- service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/tasks/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/dashboard/tasks/main.yml
deleted file mode 100755
index a6b813a7..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/tasks/main.yml
+++ /dev/null
@@ -1,121 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install dashboard packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: remove ubuntu theme
- action: "{{ ansible_pkg_mgr }} name=openstack-dashboard-ubuntu-theme state=absent"
- when: ansible_os_family == 'Debian' and not enable_ubuntu_theme
- notify:
- - restart dashboard services
-
-- name: remove default apache2 config
- file:
- path: '{{ item }}'
- state: absent
- when: ansible_os_family == 'Debian'
- with_items:
- - '{{ apache_config_dir }}/conf-available/openstack-dashboard.conf'
- - '{{ apache_config_dir }}/conf-enabled/openstack-dashboard.conf'
- - '{{ apache_config_dir }}/sites-available/000-default.conf'
- - '{{ apache_config_dir }}/sites-enabled/000-default.conf'
- notify:
- - restart dashboard services
-
-- name: update apache2 configs
- template:
- src: openstack-dashboard.conf.j2
- dest: '{{ apache_config_dir }}/sites-available/openstack-dashboard.conf'
- when: ansible_os_family == 'Debian'
- notify:
- - restart dashboard services
-
-- name: update apache2 configs redhat
- template:
- src: openstack-dashboard-redhat.conf.j2
- dest: '{{ apache_config_dir }}/conf.d/openstack-dashboard.conf'
- when: ansible_os_family == 'RedHat'
- notify:
- - restart dashboard services
-
-- name: enable dashboard
- file:
- src: "/etc/apache2/sites-available/openstack-dashboard.conf"
- dest: "/etc/apache2/sites-enabled/openstack-dashboard.conf"
- state: "link"
- when: ansible_os_family == 'Debian'
- notify:
- - restart dashboard services
-
-- name: update ubuntu horizon settings
- lineinfile:
- dest: /etc/openstack-dashboard/local_settings.py
- regexp: '{{ item.regexp }}'
- line: '{{ item.line }}'
- with_items:
- - regexp: '^WEBROOT[ \t]*=.*'
- line: 'WEBROOT = "/horizon"'
- - regexp: '^COMPRESS_OFFLINE[ \t]*=.*'
- line: 'COMPRESS_OFFLINE=True'
- - regexp: '^ALLOWED_HOSTS[ \t]*=.*'
- line: 'ALLOWED_HOSTS = ["*"]'
- - regexp: '^OPENSTACK_HOST[ \t]*=.*'
- line: 'OPENSTACK_HOST = "{{ internal_ip }}"'
- when: ansible_os_family == 'Debian'
- notify:
- - restart dashboard services
-
-- name: precompile horizon css
- shell: /usr/bin/python /usr/share/openstack-dashboard/manage.py compress --force
- ignore_errors: True
- when: ansible_os_family == 'Debian'
- notify:
- - restart dashboard services
-
-- name: update redhat version horizon settings
- lineinfile:
- dest: /etc/openstack-dashboard/local_settings
- regexp: '{{ item.regexp }}'
- line: '{{ item.line }}'
- with_items:
- - regexp: '^WEBROOT[ \t]*=.*'
- line: 'WEBROOT = "/horizon"'
- - regexp: '^COMPRESS_OFFLINE[ \t]*=.*'
- line: 'COMPRESS_OFFLINE=False'
- - regexp: '^ALLOWED_HOSTS[ \t]*=.*'
- line: 'ALLOWED_HOSTS = ["*"]'
- - regexp: '^OPENSTACK_HOST[ \t]*=.*'
- line: 'OPENSTACK_HOST = "{{ internal_ip }}"'
- when: ansible_os_family == 'RedHat'
- notify:
- - restart dashboard services
-
-- name: temperarily workaround for logo image issue
- shell: sed -i "s/src=\"\/dashboard/src=\"\/horizon/g" /usr/share/openstack-dashboard/openstack_dashboard/themes/rcue/templates/horizon/common/_sidebar.html
- when: ansible_distribution == 'RedHat'
- notify:
- - restart dashboard services
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/openstack-dashboard-redhat.conf.j2 b/deploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/openstack-dashboard-redhat.conf.j2
deleted file mode 100755
index d4d1f297..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/openstack-dashboard-redhat.conf.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
-
-WSGIDaemonProcess horizon processes={{ work_threads }} threads={{ work_threads }}
-WSGIProcessGroup horizon
-WSGISocketPrefix run/wsgi
-
-WSGIScriptAlias /horizon {{ horizon_dir }}/openstack_dashboard/wsgi/django.wsgi
-Alias /horizon/static {{ horizon_dir }}/static
-
-<Directory {{ horizon_dir }}/openstack_dashboard/wsgi>
- Options All
- AllowOverride All
- Require all granted
-</Directory>
-
-<Directory {{ horizon_dir }}/static>
- Options All
- AllowOverride All
- Require all granted
-</Directory>
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/openstack-dashboard.conf b/deploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/openstack-dashboard.conf
deleted file mode 100755
index a5a791a3..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/openstack-dashboard.conf
+++ /dev/null
@@ -1,14 +0,0 @@
-<VirtualHost *:80>
-
-WSGIScriptAlias / /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi
-WSGIDaemonProcess horizon user=www-data group=www-data processes=3 threads=10
-Alias /static /usr/share/openstack-dashboard/openstack_dashboard/static/
-
-<Directory /usr/share/openstack-dashboard/openstack_dashboard/wsgi>
-Order allow,deny
-Allow from all
-</Directory>
-
-
-</VirtualHost>
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/openstack-dashboard.conf.j2 b/deploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/openstack-dashboard.conf.j2
deleted file mode 100755
index 403fcc22..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/openstack-dashboard.conf.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-{% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
-
-<VirtualHost {{ internal_ip }}:80>
- WSGIScriptAlias /horizon {{ horizon_dir }}/wsgi/django.wsgi
- WSGIDaemonProcess horizon user=horizon group=horizon processes={{ work_threads }} threads={{ work_threads }}
- WSGIProcessGroup horizon
- Alias /static {{ horizon_dir }}/static/
- Alias /horizon/static {{ horizon_dir }}/static/
- <Directory {{ horizon_dir }}/wsgi>
- Order allow,deny
- Allow from all
- </Directory>
-</VirtualHost>
-
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/ports.j2 b/deploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/ports.j2
deleted file mode 100755
index 0bfa0428..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/templates/ports.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-# if you just change the port or add more ports here, you will likely also
-# have to change the VirtualHost statement in
-# /etc/apache2/sites-enabled/000-default.conf
-
-Listen {{ internal_ip }}:80
-
-<IfModule ssl_module>
- Listen 443
-</IfModule>
-
-<IfModule mod_gnutls.c>
- Listen 443
-</IfModule>
-
-# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/vars/Debian.yml b/deploy/adapters/ansible/openstack_osp9/roles/dashboard/vars/Debian.yml
deleted file mode 100755
index aaeb8cdb..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/vars/Debian.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages: []
-
-services:
- - memcached
- - apache2
-
-apache_config_dir: /etc/apache2
-horizon_dir: /usr/share/openstack-dashboard/openstack_dashboard
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/vars/RedHat.yml b/deploy/adapters/ansible/openstack_osp9/roles/dashboard/vars/RedHat.yml
deleted file mode 100755
index 651cbee3..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/vars/RedHat.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - mod_wsgi
- - httpd
-
-services:
- - httpd
-
-http_config_file: "/etc/httpd/conf/httpd.conf"
-apache_config_dir: /etc/httpd
-horizon_dir: /usr/share/openstack-dashboard
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/vars/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/dashboard/vars/main.yml
deleted file mode 100755
index 2c940ede..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/dashboard/vars/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch:
- - openstack-dashboard
-
-services_noarch: []
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/database/templates/data.j2 b/deploy/adapters/ansible/openstack_osp9/roles/database/templates/data.j2
deleted file mode 100755
index 66c2fead..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/database/templates/data.j2
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/sh
-mysql -uroot -Dmysql <<EOF
-drop database if exists keystone;
-drop database if exists glance;
-drop database if exists neutron;
-drop database if exists nova;
-drop database if exists cinder;
-drop database if exists heat;
-drop database if exists aodh;
-
-CREATE DATABASE keystone;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON keystone.* TO 'keystone'@'{{ host }}' IDENTIFIED BY '{{ KEYSTONE_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE glance;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON glance.* TO 'glance'@'{{ host }}' IDENTIFIED BY '{{ GLANCE_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE neutron;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON neutron.* TO 'neutron'@'{{ host }}' IDENTIFIED BY '{{ NEUTRON_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE nova;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON nova.* TO 'nova'@'{{ host }}' IDENTIFIED BY '{{ NOVA_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE cinder;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON cinder.* TO 'cinder'@'{{ host }}' IDENTIFIED BY '{{ CINDER_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE heat;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON heat.* TO 'heat'@'{{ host }}' IDENTIFIED BY '{{ HEAT_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE aodh;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON aodh.* TO 'aodh'@'{{ host }}' IDENTIFIED BY '{{ AODH_DBPASS }}';
-{% endfor %}
-
-{% if WSREP_SST_USER is defined %}
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON *.* TO '{{ WSREP_SST_USER }}'@'{{ host }}' IDENTIFIED BY '{{ WSREP_SST_PASS }}';
-{% endfor %}
-{% endif %}
-EOF
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/database/vars/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/database/vars/main.yml
deleted file mode 100755
index a32897f0..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/database/vars/main.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch: []
-
-services_noarch:
- - mysql
-
-credentials:
- - user: keystone
- db: keystone
- password: "{{ KEYSTONE_DBPASS }}"
- - user: neutron
- db: neutron
- password: "{{ NEUTRON_DBPASS }}"
- - user: glance
- db: glance
- password: "{{ GLANCE_DBPASS }}"
- - user: nova
- db: nova_api
- password: "{{ NOVA_DBPASS }}"
- - user: nova
- db: nova
- password: "{{ NOVA_DBPASS }}"
- - user: cinder
- db: cinder
- password: "{{ CINDER_DBPASS }}"
- - user: heat
- db: heat
- password: "{{ HEAT_DBPASS }}"
- - user: aodh
- db: aodh
- password: "{{ AODH_DBPASS }}"
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/ext-network/handlers/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/ext-network/handlers/main.yml
deleted file mode 100755
index 36e39072..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/ext-network/handlers/main.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart neutron-plugin-openvswitch-agent
- service: name=neutron-openvswitch-agent state=restarted enabled=yes
- when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: restart neutron-l3-agent
- service: name=neutron-l3-agent state=restarted enabled=yes
-
-- name: kill dnsmasq
- command: killall dnsmasq
- ignore_errors: True
-
-- name: restart neutron-dhcp-agent
- service: name=neutron-dhcp-agent state=restarted enabled=yes
-
-- name: restart neutron-metadata-agent
- service: name=neutron-metadata-agent state=restarted enabled=yes
-
-- name: restart xorp
- service: name=xorp state=restarted enabled=yes sleep=10
- ignore_errors: True
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/ext-network/tasks/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/ext-network/tasks/main.yml
deleted file mode 100755
index b52b9178..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/ext-network/tasks/main.yml
+++ /dev/null
@@ -1,56 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-# FIXME: temporary workaround for openstack api access random failure
-- name: restart api server
- service: name={{ item }} state=restarted enabled=yes
- with_items: api_services | union(api_services_noarch)
-
-- name: restart neutron server
- service: name=neutron-server state=restarted enabled=yes
-
-- name: wait for neutron time
- shell: "sleep 10"
-
-- name: create external net
- neutron_network:
- login_username: ADMIN
- login_password: "{{ ADMIN_PASS }}"
- login_tenant_name: admin
- auth_url: "http://{{ internal_vip.ip }}:35357/v2.0"
- name: "{{ public_net_info.network }}"
- provider_network_type: "{{ public_net_info.type }}"
- provider_physical_network: "{{ public_net_info.provider_network }}"
- provider_segmentation_id: "{{ public_net_info.segment_id}}"
- shared: false
- router_external: yes
- state: present
- run_once: true
- when: 'public_net_info.enable == True'
-
-- name: create external subnet
- neutron_subnet:
- login_username: ADMIN
- login_password: "{{ ADMIN_PASS }}"
- login_tenant_name: admin
- auth_url: "http://{{ internal_vip.ip }}:35357/v2.0"
- name: "{{ public_net_info.subnet }}"
- network_name: "{{ public_net_info.network }}"
- cidr: "{{ public_net_info.floating_ip_cidr }}"
- enable_dhcp: "{{ public_net_info.enable_dhcp }}"
- no_gateway: "{{ public_net_info.no_gateway }}"
- gateway_ip: "{{ public_net_info.external_gw }}"
- allocation_pool_start: "{{ public_net_info.floating_ip_start }}"
- allocation_pool_end: "{{ public_net_info.floating_ip_end }}"
- state: present
- run_once: true
- when: 'public_net_info.enable == True'
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/ext-network/vars/Debian.yml b/deploy/adapters/ansible/openstack_osp9/roles/ext-network/vars/Debian.yml
deleted file mode 100755
index 0b5c78b6..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/ext-network/vars/Debian.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-api_services:
- - nova-api
- - glance-api
- - ceilometer-api
- - heat-api
- - heat-api-cfn
- - aodh-api
- - cinder-api
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/ext-network/vars/RedHat.yml b/deploy/adapters/ansible/openstack_osp9/roles/ext-network/vars/RedHat.yml
deleted file mode 100755
index 886401fd..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/ext-network/vars/RedHat.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-api_services:
- - openstack-nova-api
- - openstack-glance-api
- - openstack-ceilometer-api
- - openstack-heat-api
- - openstack-heat-api-cfn
- - openstack-cinder-api
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/ext-network/vars/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/ext-network/vars/main.yml
deleted file mode 100755
index b19b6ebf..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/ext-network/vars/main.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-api_services_noarch: []
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/glance/tasks/nfs.yml b/deploy/adapters/ansible/openstack_osp9/roles/glance/tasks/nfs.yml
deleted file mode 100755
index 9dc72e31..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/glance/tasks/nfs.yml
+++ /dev/null
@@ -1,68 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: install nfs packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: nfs_packages
-
-- name: install nfs
- local_action: yum name={{ item }} state=present
- with_items:
- - rpcbind
- - nfs-utils
- run_once: True
-
-- name: create image directory
- local_action: file path=/opt/images state=directory mode=0777
- run_once: True
-
-- name: remove nfs config item if exist
- local_action: lineinfile dest=/etc/exports state=absent
- regexp="^/opt/images"
- run_once: True
-
-- name: update nfs config
- local_action: lineinfile dest=/etc/exports state=present
- line="/opt/images *(rw,insecure,sync,all_squash)"
- run_once: True
-
-- name: restart compass nfs service
- local_action: service name={{ item }} state=restarted enabled=yes
- with_items:
- - rpcbind
- - nfs-server
- run_once: True
-
-- name: get mount info
- command: mount
- register: mount_info
- tags:
- - recovery
-
-- name: get nfs server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: ip_info
- tags:
- - recovery
-
-- name: restart host nfs service
- service: name={{ item }} state=restarted enabled=yes
- with_items: '{{ nfs_services }}'
-
-- name: mount image directory
- shell: |
- mkdir -p /var/lib/glance/images
- mount -t nfs -onfsvers=3 {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images
- sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
- #echo {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab
- when: mount_info.stdout.find('images') == -1
- retries: 5
- delay: 3
- tags:
- - recovery
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/glance/vars/Debian.yml b/deploy/adapters/ansible/openstack_osp9/roles/glance/vars/Debian.yml
deleted file mode 100755
index d1825012..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/glance/vars/Debian.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - glance
- - nfs-common
-
-nfs_packages:
- - nfs-common
-
-nfs_services: []
-
-services:
- - glance-registry
- - glance-api
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/glance/vars/RedHat.yml b/deploy/adapters/ansible/openstack_osp9/roles/glance/vars/RedHat.yml
deleted file mode 100755
index 2987d0c4..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/glance/vars/RedHat.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - openstack-glance
- - rpcbind
-
-nfs_packages:
- - nfs-utils
- - rpcbind
-
-nfs_services:
- - rpcbind
-
-services:
- - openstack-glance-api
- - openstack-glance-registry
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/heat/tasks/heat_install.yml b/deploy/adapters/ansible/openstack_osp9/roles/heat/tasks/heat_install.yml
deleted file mode 100755
index b90e6402..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/heat/tasks/heat_install.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: install heat related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: generate heat service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-# '
-
-- name: create heat user domain
- shell: >
- . /opt/admin-openrc-v3.sh;
- openstack domain create --description "Stack projects and users" heat;
- openstack user create --domain heat --password {{ HEAT_PASS }} heat_domain_admin;
- openstack role add --domain heat --user-domain heat --user heat_domain_admin admin;
- openstack role create heat_stack_owner;
- openstack role add --project demo --user demo heat_stack_owner;
- when: inventory_hostname == groups['controller'][0]
-
-- name: update heat conf
- template: src=heat.j2
- dest=/etc/heat/heat.conf
- backup=yes
- notify:
- - restart heat service
- - remove heat-sqlite-db
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/heat/templates/heat.j2 b/deploy/adapters/ansible/openstack_osp9/roles/heat/templates/heat.j2
deleted file mode 100755
index 62df9fd9..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/heat/templates/heat.j2
+++ /dev/null
@@ -1,28 +0,0 @@
-[DEFAULT]
-heat_metadata_server_url = http://{{ internal_vip.ip }}:8000
-heat_waitcondition_server_url = http://{{ internal_vip.ip }}:8000/v1/waitcondition
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-log_dir = /var/log/heat
-stack_domain_admin = heat_domain_admin
-stack_domain_admin_password = {{ HEAT_PASS }}
-stack_user_domain_name = heat
-
-[database]
-connection = mysql://heat:{{ HEAT_DBPASS }}@{{ db_host }}/heat
-idle_timeout = 30
-use_db_reconnect = True
-pool_timeout = 10
-
-[ec2authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = heat
-admin_password = {{ HEAT_PASS }}
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/keystone/tasks/keystone_install.yml b/deploy/adapters/ansible/openstack_osp9/roles/keystone/tasks/keystone_install.yml
deleted file mode 100755
index ba4fc28e..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/keystone/tasks/keystone_install.yml
+++ /dev/null
@@ -1,97 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install keystone packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: disable boot auto start
- file:
- path={{ item }}
- state=absent
- with_items:
- - /etc/init.d/keystone
- - /etc/init/keystone.conf
- when: ansible_os_family == "Debian"
-
-- name: generate keystone service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: delete sqlite database
- file:
- path: /var/lib/keystone/keystone.db
- state: absent
-
-- name: update keystone conf
- template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
- notify:
- - restart keystone services
-
-- name: assure listen port exist
- lineinfile:
- dest: '{{ apache_config_dir }}/ports.conf'
- regexp: '{{ item.regexp }}'
- line: '{{ item.line}}'
- with_items:
- - regexp: "^Listen {{ internal_ip }}:5000"
- line: "Listen {{ internal_ip }}:5000"
- - regexp: "^Listen {{ internal_ip }}:35357"
- line: "Listen {{ internal_ip }}:35357"
- notify:
- - restart keystone services
-
-- name: update apache2 configs
- template:
- src: wsgi-keystone.conf.j2
- dest: '{{ apache_config_dir }}/sites-available/wsgi-keystone.conf'
- when: ansible_os_family == 'Debian'
- notify:
- - restart keystone services
-
-- name: update apache2 configs
- template:
- src: wsgi-keystone.conf.j2
- dest: '{{ apache_config_dir }}/wsgi-keystone.conf'
- when: ansible_os_family == 'RedHat'
- notify:
- - restart keystone services
-
-- name: enable keystone server
- file:
- src: "{{ apache_config_dir }}/sites-available/wsgi-keystone.conf"
- dest: "{{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf"
- state: "link"
- when: ansible_os_family == 'Debian'
- notify:
- - restart keystone services
-
-- name: keystone source files
- template: src={{ item }} dest=/opt/{{ item }}
- with_items:
- - admin-openrc.sh
- - demo-openrc.sh
- - admin-openrc-v3.sh
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/keystone/vars/RedHat.yml b/deploy/adapters/ansible/openstack_osp9/roles/keystone/vars/RedHat.yml
deleted file mode 100755
index 63ddce3c..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/keystone/vars/RedHat.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-cron_path: "/var/spool/cron"
-
-packages:
- - openstack-keystone
- - python-openstackclient
-
-services:
- - httpd
-
-apache_config_dir: /etc/httpd/conf.d
-http_service_name: httpd
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/keystone/vars/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/keystone/vars/main.yml
deleted file mode 100755
index 9e97a29c..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/keystone/vars/main.yml
+++ /dev/null
@@ -1,164 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch:
- - python-keystoneclient
-
-services_noarch: []
-os_services:
- - name: keystone
- type: identity
- region: RegionOne
- description: "OpenStack Identity"
- publicurl: "http://{{ public_vip.ip }}:5000/v2.0"
- internalurl: "http://{{ internal_vip.ip }}:5000/v2.0"
- adminurl: "http://{{ internal_vip.ip }}:35357/v2.0"
-
- - name: glance
- type: image
- region: RegionOne
- description: "OpenStack Image Service"
- publicurl: "http://{{ public_vip.ip }}:9292"
- internalurl: "http://{{ internal_vip.ip }}:9292"
- adminurl: "http://{{ internal_vip.ip }}:9292"
-
- - name: nova
- type: compute
- region: RegionOne
- description: "OpenStack Compute"
- publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
-
- - name: neutron
- type: network
- region: RegionOne
- description: "OpenStack Networking"
- publicurl: "http://{{ public_vip.ip }}:9696"
- internalurl: "http://{{ internal_vip.ip }}:9696"
- adminurl: "http://{{ internal_vip.ip }}:9696"
-
- - name: ceilometer
- type: metering
- region: RegionOne
- description: "OpenStack Telemetry"
- publicurl: "http://{{ public_vip.ip }}:8777"
- internalurl: "http://{{ internal_vip.ip }}:8777"
- adminurl: "http://{{ internal_vip.ip }}:8777"
-
- - name: aodh
- type: alarming
- region: RegionOne
- description: "OpenStack Telemetry"
- publicurl: "http://{{ public_vip.ip }}:8042"
- internalurl: "http://{{ internal_vip.ip }}:8042"
- adminurl: "http://{{ internal_vip.ip }}:8042"
-
- - name: cinder
- type: volume
- region: RegionOne
- description: "OpenStack Block Storage"
- publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
-
- - name: cinderv2
- type: volumev2
- region: RegionOne
- description: "OpenStack Block Storage v2"
- publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
-
- - name: heat
- type: orchestration
- region: RegionOne
- description: "OpenStack Orchestration"
- publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
-
- - name: heat-cfn
- type: cloudformation
- region: RegionOne
- description: "OpenStack CloudFormation Orchestration"
- publicurl: "http://{{ public_vip.ip }}:8000/v1"
- internalurl: "http://{{ internal_vip.ip }}:8000/v1"
- adminurl: "http://{{ internal_vip.ip }}:8000/v1"
-
-os_users:
- - user: admin
- password: "{{ ADMIN_PASS }}"
- email: admin@admin.com
- role: admin
- tenant: admin
- tenant_description: "Admin Tenant"
-
- - user: glance
- password: "{{ GLANCE_PASS }}"
- email: glance@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: nova
- password: "{{ NOVA_PASS }}"
- email: nova@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: keystone
- password: "{{ KEYSTONE_PASS }}"
- email: keystone@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: neutron
- password: "{{ NEUTRON_PASS }}"
- email: neutron@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: ceilometer
- password: "{{ CEILOMETER_PASS }}"
- email: ceilometer@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: cinder
- password: "{{ CINDER_PASS }}"
- email: cinder@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: aodh
- password: "{{ AODH_PASS }}"
- email: aodh@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: heat
- password: "{{ HEAT_PASS }}"
- email: heat@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: demo
- password: ""
- email: heat@demo.com
- role: heat_stack_user
- tenant: demo
- tenant_description: "Demo Tenant"
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/neutron-compute/tasks/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/neutron-compute/tasks/main.yml
deleted file mode 100755
index fd3e51d3..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/neutron-compute/tasks/main.yml
+++ /dev/null
@@ -1,75 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: activate ipv4 forwarding
- sysctl: name=net.ipv4.ip_forward value=1
- state=present reload=yes
-
-- name: deactivate ipv4 rp filter
- sysctl: name=net.ipv4.conf.all.rp_filter value=0
- state=present reload=yes
-
-- name: deactivate ipv4 default rp filter
- sysctl: name=net.ipv4.conf.default.rp_filter
- value=0 state=present reload=yes
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install compute-related neutron packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: fix openstack neutron plugin config file
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
- systemctl daemon-reload
- when: ansible_os_family == 'RedHat'
-
-- name: fix openstack neutron plugin config file ubuntu
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
- when: ansible_os_family == "Debian"
-
-- name: generate neutron compute service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: config ml2 plugin
- template: src=templates/ml2_conf.ini
- dest=/etc/neutron/plugins/ml2/ml2_conf.ini
- backup=yes
-
-- name: ln plugin.ini
- file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
-
-- name: config neutron
- template: src=templates/neutron.conf
- dest=/etc/neutron/neutron.conf backup=yes
- notify:
- - restart neutron compute service
- - restart nova-compute services
-
-- meta: flush_handlers
-
-- include: ../../neutron-network/tasks/odl.yml
- when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/neutron-compute/vars/Debian.yml b/deploy/adapters/ansible/openstack_osp9/roles/neutron-compute/vars/Debian.yml
deleted file mode 100755
index 6ae52f3b..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/neutron-compute/vars/Debian.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-packages:
- - neutron-common
- - neutron-plugin-ml2
- - openvswitch-datapath-dkms
- - openvswitch-switch
- - neutron-plugin-openvswitch-agent
-
-services:
- - neutron-openvswitch-agent
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/neutron-network/tasks/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/neutron-network/tasks/main.yml
deleted file mode 100755
index 31f7f17c..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/neutron-network/tasks/main.yml
+++ /dev/null
@@ -1,117 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: activate ipv4 forwarding
- sysctl: name=net.ipv4.ip_forward value=1
- state=present reload=yes
-
-- name: deactivate ipv4 rp filter
- sysctl: name=net.ipv4.conf.all.rp_filter value=0
- state=present reload=yes
-
-- name: deactivate ipv4 default rp filter
- sysctl: name=net.ipv4.conf.default.rp_filter
- value=0 state=present reload=yes
-
-- name: assert kernel support for vxlan
- command: modinfo -F version vxlan
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-
-- name: assert iproute2 suppport for vxlan
- command: ip link add type vxlan help
- register: iproute_out
- failed_when: iproute_out.rc == 255
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install neutron network related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: generate neutron network service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: fix openstack neutron plugin config file
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
- systemctl daemon-reload
- when: ansible_os_family == 'RedHat'
-
-- name: fix openstack neutron plugin config file ubuntu
- shell: |
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
- sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
- when: ansible_os_family == "Debian"
-
-- name: config l3 agent
- template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini
- backup=yes
-
-- name: config dhcp agent
- template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini
- backup=yes
-
-- name: update dnsmasq-neutron.conf
- template: src=templates/dnsmasq-neutron.conf
- dest=/etc/neutron/dnsmasq-neutron.conf
-
-- name: config metadata agent
- template: src=metadata_agent.ini
- dest=/etc/neutron/metadata_agent.ini backup=yes
-
-- name: config ml2 plugin
- template: src=templates/ml2_conf.ini
- dest=/etc/neutron/plugins/ml2/ml2_conf.ini
- backup=yes
-
-- name: ln plugin.ini
- file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
-
-- name: config neutron
- template: src=templates/neutron.conf
- dest=/etc/neutron/neutron.conf backup=yes
-
-- name: force mtu to 1450 for vxlan
- lineinfile:
- dest: /etc/neutron/dnsmasq-neutron.conf
- regexp: '^dhcp-option-force'
- line: 'dhcp-option-force=26,1450'
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-
-- include: firewall.yml
- when: enable_fwaas == True
-
-- include: vpn.yml
- when: enable_vpnaas == True
-
-- include: odl.yml
- when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: restart neutron network relation service
- service: name={{ item }} state=restarted enabled=yes
- with_flattened:
- - services_noarch
- - services
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/neutron-network/vars/Debian.yml b/deploy/adapters/ansible/openstack_osp9/roles/neutron-network/vars/Debian.yml
deleted file mode 100755
index c95d0265..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/neutron-network/vars/Debian.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - neutron-plugin-ml2
- - openvswitch-datapath-dkms
- - openvswitch-switch
- - neutron-l3-agent
- - neutron-dhcp-agent
- - neutron-plugin-openvswitch-agent
-
-services:
- - openvswitch-switch
- - neutron-openvswitch-agent
-
-openvswitch_agent: neutron-plugin-openvswitch-agent
-
-xorp_packages:
- - xorp
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/nova-compute/tasks/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/nova-compute/tasks/main.yml
deleted file mode 100755
index fe544630..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/nova-compute/tasks/main.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install nova-compute related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
-
-- name: restart virtlogd
- service: name=virtlogd state=started enabled=yes
- when: ansible_os_family == "Debian"
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: update nova-compute conf
- template: src={{ item }} dest=/etc/nova/{{ item }}
- with_items:
- - nova.conf
- notify:
- - restart nova-compute services
-
-- name: get number of cpu support virtualization
- shell: egrep -c '(vmx|svm)' /proc/cpuinfo
- register: kvm_cpu_num
-
-- name: update nova-compute conf
- template: src={{ item }} dest=/etc/nova/{{ item }}
- with_items:
- - nova-compute.conf
- notify:
- - restart nova-compute services
-
-- name: generate neutron control service list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: services | union(services_noarch)
-
-- name: remove nova sqlite db
- shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/nova-compute/templates/nova-compute.conf b/deploy/adapters/ansible/openstack_osp9/roles/nova-compute/templates/nova-compute.conf
deleted file mode 100755
index 305d408b..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/nova-compute/templates/nova-compute.conf
+++ /dev/null
@@ -1,11 +0,0 @@
-[DEFAULT]
-compute_driver=libvirt.LibvirtDriver
-force_raw_images = true
-[libvirt]
-{% if kvm_cpu_num.stdout_lines[0]|int == 0 %}
-virt_type=qemu
-{% else %}
-virt_type=kvm
-{% endif %}
-images_type = raw
-mem_stats_period_seconds=0
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/nova-compute/templates/nova.conf b/deploy/adapters/ansible/openstack_osp9/roles/nova-compute/templates/nova.conf
deleted file mode 100755
index 73b49a5a..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/nova-compute/templates/nova.conf
+++ /dev/null
@@ -1,89 +0,0 @@
-[DEFAULT]
-block_device_allocate_retries=5
-block_device_allocate_retries_interval=300
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/var/lib/nova/tmp
-force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
-enabled_apis=osapi_compute,metadata
-
-default_floating_pool={{ public_net_info.network }}
-auth_strategy = keystone
-
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-osapi_compute_listen={{ internal_ip }}
-metadata_listen={{ internal_ip }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
-
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-
-[database]
-# The SQLAlchemy connection string used to connect to the database
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-idle_timeout = 30
-use_db_reconnect = True
-pool_timeout = 10
-
-[api_database]
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova_api
-idle_timeout = 30
-use_db_reconnect = True
-pool_timeout = 10
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/2.0
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-
-[glance]
-host = {{ internal_vip.ip }}
-
-[neutron]
-url = http://{{ internal_vip.ip }}:9696
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
-service_metadata_proxy = True
-metadata_proxy_shared_secret = {{ METADATA_SECRET }}
-auth_type = password
-auth_url = http://{{ internal_vip.ip }}:35357
-password = {{ NEUTRON_PASS }}
-username = neutron
-project_domain_name = default
-user_domain_name = default
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/nova-controller/tasks/nova_config.yml b/deploy/adapters/ansible/openstack_osp9/roles/nova-controller/tasks/nova_config.yml
deleted file mode 100755
index f332c97a..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/nova-controller/tasks/nova_config.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: nova api db sync
- shell: su -s /bin/sh -c "nova-manage api_db sync" nova
- ignore_errors: True
- notify:
- - restart nova service
-
-- name: nova db sync
- nova_manage: action=dbsync
- notify:
- - restart nova service
-
-- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/odl_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/openstack_osp9/roles/odl_cluster/tasks/openvswitch.yml
deleted file mode 100755
index 33099104..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/odl_cluster/tasks/openvswitch.yml
+++ /dev/null
@@ -1,148 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-#- name: Install Crudini
-# apt: name={{ item }} state=present
-# with_items:
-# - crudini
-
-- name: install compute packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: compute_packages | union(compute_packages_noarch)
-
-- name: remove neutron-openvswitch-agent service daemon
- shell: sed -i '/{{ service_ovs_agent_name }}/d' /opt/service ;
-
-- name: shut down and disable Neutron's openvswitch agent services
- service: name={{ service_ovs_agent_name }} state=stopped enabled=no
-
-- name: remove Neutron's openvswitch agent services
- shell: >
- update-rc.d -f {{ service_ovs_agent_name }} remove;
- mv /etc/init.d/{{ service_ovs_agent_name }} /home/{{ service_ovs_agent_name }};
- mv /etc/init/{{ service_ovs_agent_name }}.conf /home/{{ service_ovs_agent_name }}.conf;
- when: ansible_os_family == "Debian"
-
-
-- name: Stop the Open vSwitch service and clear existing OVSDB
- shell: >
- service {{ service_ovs_name }} stop ;
- rm -rf /var/log/openvswitch/* ;
- rm -rf /etc/openvswitch/conf.db ;
- service {{ service_ovs_name }} start ;
-
-- name: set opendaylight as the manager
- command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ internal_vip.ip }}:6640;"
-
-- name: check br-int
- shell: ovs-vsctl list-br | grep br-int; while [ $? -ne 0 ]; do sleep 10; ovs-vsctl list-br | grep br-int; done
-
-- name: set local ip in openvswitch
- shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config={'local_ip'=' {{ internal_ip }} '};
-
-#'
-
-##################################################################
-########### Recover External network for odl l3 #################
-##################################################################
-
-- name: check br-ex
- shell: ovs-vsctl list-br | grep br-ex; while [ $? -ne 0 ]; do sleep 10; ovs-vsctl list-br | grep br-ex; done
- when: odl_l3_agent == "Enable"
-
-- name: add ovs uplink
- openvswitch_port: bridge=br-ex port={{ item["interface"] }} state=present
- with_items: "{{ network_cfg['provider_net_mappings'] }}"
- when: item["type"] == "ovs" and odl_l3_agent == "Enable"
-
-- name: wait 10 seconds
- shell: sleep 10
- when: odl_l3_agent == "Enable"
-
-- name: set external nic in openvswitch
- shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config:provider_mappings=br-ex:{{ item["interface"] }}
- with_items: "{{ network_cfg['provider_net_mappings'] }}"
- when: item["type"] == "ovs" and odl_l3_agent == "Enable"
-
-- name: copy recovery script
- copy: src={{ item }} dest=/opt/setup_networks
- with_items:
- - recover_network_odl_l3.py
- - setup_networks_odl_l3.py
- when: odl_l3_agent == "Enable"
-
-- name: recover external script
- shell: python /opt/setup_networks/recover_network_odl_l3.py
- when: odl_l3_agent == "Enable"
-
-- name: update keepalived info
- template: src=keepalived.conf dest=/etc/keepalived/keepalived.conf
- when: inventory_hostname in groups['odl'] and odl_l3_agent == "Enable"
-
-- name: modify net-init
- shell: sed -i 's/setup_networks.py/setup_networks_odl_l3.py/g' /etc/init.d/net_init
- when: odl_l3_agent == "Enable"
-
-##################################################################
-########### Recover External network for odl l2 #################
-##################################################################
-
-- name: add ovs bridge
- openvswitch_bridge: bridge={{ item["name"] }} state=present
- with_items: "{{ network_cfg['provider_net_mappings'] }}"
- when: item["type"] == "ovs" and odl_l3_agent == "Disable"
-
-- name: add ovs uplink
- openvswitch_port: bridge={{ item["name"] }} port={{ item["interface"] }} state=present
- with_items: "{{ network_cfg['provider_net_mappings'] }}"
- when: item["type"] == "ovs" and odl_l3_agent == "Disable"
-
-- name: copy recovery script
- copy: src={{ item }} dest=/opt/setup_networks
- with_items:
- - recover_network.py
- when: odl_l3_agent == "Disable"
-
-- name: recover external script
- shell: python /opt/setup_networks/recover_network.py
- when: odl_l3_agent == "Disable"
-
-##################################################################
-
-
-- name: restart keepalived to recover external IP
- shell: service keepalived restart
- when: inventory_hostname in groups['odl']
- ignore_errors: True
-
-
-
-##################################################################
-##################################################################
-##################################################################
-- name: configure opendaylight -> ml2
- shell: >
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs enable_tunneling True;
-
-#- name: Adjust Service Daemon
-# shell: >
-# sed -i '/neutron-openvswitch-agent/d' /opt/service ;
-# echo opendaylight >> /opt/service ;
-
-- name: copy ml2 configuration script
- template:
- src: ml2_conf.sh
- dest: "/opt/ml2_conf.sh"
- mode: 0777
-
-- name: execute ml2 configuration script
- command: su -s /bin/sh -c "/opt/ml2_conf.sh;"
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/odl_cluster/vars/Debian.yml b/deploy/adapters/ansible/openstack_osp9/roles/odl_cluster/vars/Debian.yml
deleted file mode 100755
index a3d5dd02..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/odl_cluster/vars/Debian.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-controller_packages:
-# - openjdk-7-jdk
- - crudini
-
-compute_packages:
- - crudini
-
-service_ovs_name: openvswitch-switch
-service_ovs_agent_name: neutron-openvswitch-agent
-
-service_file:
- src: opendaylight.conf
- dst: /etc/init/opendaylight.conf
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/files/setup_networks/log.py b/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/files/setup_networks/log.py
deleted file mode 100755
index a22ff0fe..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/files/setup_networks/log.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import logging
-import os
-loggers = {}
-log_dir = "/var/log/setup_network"
-try:
- os.makedirs(log_dir)
-except:
- pass
-
-
-def getLogger(name):
- if name in loggers:
- return loggers[name]
-
- logger = logging.getLogger(name)
- logger.setLevel(logging.DEBUG)
-
- # create file handler which logs even debug messages
- log_file = "%s/%s.log" % (log_dir, name)
- try:
- os.remove(log_file)
- except:
- pass
-
- fh = logging.FileHandler(log_file)
- fh.setLevel(logging.DEBUG)
-
- # create console handler with a higher log level
- ch = logging.StreamHandler()
- ch.setLevel(logging.ERROR)
-
- # create formatter and add it to the handlers
- formatter = logging.Formatter(
- "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
- ch.setFormatter(formatter)
- fh.setFormatter(formatter)
-
- # add the handlers to logger
- logger.addHandler(ch)
- logger.addHandler(fh)
-
- loggers[name] = logger
- return logger
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/files/setup_networks/net_init b/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/files/setup_networks/net_init
deleted file mode 100755
index c27a8bf8..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/files/setup_networks/net_init
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-## BEGIN INIT INFO
-# Provides: anamon.init
-# Default-Start: 3 5
-# Default-Stop: 0 1 2 4 6
-# Required-Start: $network
-# Short-Description: Starts the cobbler anamon boot notification program
-# Description: anamon runs the first time a machine is booted after
-# installation.
-## END INIT INFO
-
-#
-# anamon.init: Starts the cobbler post-install boot notification program
-#
-# chkconfig: 35 0 6
-#
-# description: anamon runs the first time a machine is booted after
-# installation.
-#
-python /opt/setup_networks/setup_networks.py
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/files/setup_networks/setup_networks.py b/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/files/setup_networks/setup_networks.py
deleted file mode 100755
index 315d5c8d..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/files/setup_networks/setup_networks.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import yaml
-import netaddr
-import os
-import log as logging
-
-LOG = logging.getLogger("net-init")
-config_path = os.path.join(os.path.dirname(__file__), "network.cfg")
-
-
-def setup_bondings(bond_mappings):
- print bond_mappings
-
-
-def add_vlan_link(interface, ifname, vlan_id):
- LOG.info("add_vlan_link enter")
- cmd = "ip link add link %s name %s type vlan id %s; " % (
- ifname, interface, vlan_id)
- cmd += "ip link set %s up; ip link set %s up" % (interface, ifname)
- LOG.info("add_vlan_link: cmd=%s" % cmd)
- os.system(cmd)
-
-
-def add_ovs_port(ovs_br, ifname, uplink, vlan_id=None):
- LOG.info("add_ovs_port enter")
- cmd = "ovs-vsctl --may-exist add-port %s %s" % (ovs_br, ifname)
- if vlan_id:
- cmd += " tag=%s" % vlan_id
- cmd += " -- set Interface %s type=internal;" % ifname
- cmd += "ip link set dev %s address `ip link show %s |awk '/link\/ether/{print $2}'`;" % (ifname, uplink) # noqa
- cmd += "ip link set %s up;" % ifname
- LOG.info("add_ovs_port: cmd=%s" % cmd)
- os.system(cmd)
-
-
-def setup_intfs(sys_intf_mappings, uplink_map):
- LOG.info("setup_intfs enter")
- for intf_name, intf_info in sys_intf_mappings.items():
- if intf_info["type"] == "vlan":
- add_vlan_link(
- intf_name,
- intf_info["interface"],
- intf_info["vlan_tag"])
- elif intf_info["type"] == "ovs":
- add_ovs_port(
- intf_info["interface"],
- intf_name,
- uplink_map[intf_info["interface"]],
- vlan_id=intf_info.get("vlan_tag"))
- else:
- pass
-
-
-def setup_ips(ip_settings, sys_intf_mappings):
- LOG.info("setup_ips enter")
- for intf_info in ip_settings.values():
- network = netaddr.IPNetwork(intf_info["cidr"])
- if sys_intf_mappings[intf_info["name"]]["type"] == "ovs":
- intf_name = intf_info["name"]
- else:
- intf_name = intf_info["alias"]
- cmd = "ip addr add %s/%s brd %s dev %s;" \
- % (intf_info["ip"], intf_info["netmask"], str(network.broadcast), intf_name) # noqa
- if "gw" in intf_info:
- cmd += "route del default;"
- cmd += "ip route add default via %s dev %s" % (
- intf_info["gw"], intf_name)
- LOG.info("setup_ips: cmd=%s" % cmd)
- os.system(cmd)
-
-
-def main(config):
- uplink_map = {}
- setup_bondings(config["bond_mappings"])
- for provider_net in config["provider_net_mappings"]:
- uplink_map[provider_net['name']] = provider_net['interface']
-
- setup_intfs(config["sys_intf_mappings"], uplink_map)
- setup_ips(config["ip_settings"], config["sys_intf_mappings"])
-
-if __name__ == "__main__":
- os.system("service openvswitch-switch status|| service openvswitch-switch start") # noqa
- config = yaml.load(open(config_path))
- main(config)
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/handlers/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/handlers/main.yml
deleted file mode 100755
index e099fcf4..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/handlers/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: restart onos service
- service: name=onos state=restarted enabled=yes
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/main.yml
deleted file mode 100755
index 6b619057..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/main.yml
+++ /dev/null
@@ -1,121 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: install onos related packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages | union(packages_noarch)
- when: groups['onos']|length !=0
-
-- name: remove neutron-openvswitch-agent auto start
- shell: >
- update-rc.d neutron-openvswitch-agent remove;
- sed -i /neutron-openvswitch-agent/d /opt/service
- when: groups['onos']|length !=0
- ignore_errors: True
-
-- name: shut down and disable Neutron's agent services
- service: name=neutron-openvswitch-agent state=stopped
- when: groups['onos']|length !=0
- ignore_errors: True
-
-- name: remove neutron-l3-agent auto start
- shell: >
- update-rc.d neutron-l3-agent remove;
- sed -i /neutron-l3-agent/d /opt/service
- when: inventory_hostname in groups['onos']
- ignore_errors: True
-
-- name: shut down and disable Neutron's l3 agent services
- service: name=neutron-l3-agent state=stopped
- when: inventory_hostname in groups['onos']
- ignore_errors: True
-
-- name: Stop the Open vSwitch service and clear existing OVSDB
- shell: >
- service openvswitch-switch stop ;
- rm -rf /var/log/openvswitch/* ;
- rm -rf /etc/openvswitch/conf.db ;
- service openvswitch-switch start ;
- when: groups['onos']|length !=0
- ignore_errors: True
-
-##################################################################
-########### Recover External network #################
-##################################################################
-
-- name: add ovs bridge
- openvswitch_bridge: bridge={{ item["name"] }} state=present
- with_items: "{{ network_cfg['provider_net_mappings'] }}"
- when: item["type"] == "ovs" and groups['onos']|length !=0
-
-- name: add ovs uplink
- openvswitch_port: bridge={{ item["name"] }} port={{ item["interface"] }} state=present
- with_items: "{{ network_cfg['provider_net_mappings'] }}"
- when: item["type"] == "ovs" and groups['onos']|length !=0
-
-- name: add ovs uplink
- shell: ip link set {{ item["interface"] }} up
- with_items: "{{ network_cfg['provider_net_mappings'] }}"
- when: item["type"] == "ovs" and groups['onos']|length !=0
-
-- name: ensure script dir exist
- shell: mkdir -p /opt/setup_networks
- when: groups['onos']|length !=0
-
-- name: copy scripts
- copy: src={{ item }} dest=/opt/setup_networks
- with_items:
- - setup_networks/log.py
- - setup_networks/setup_networks.py
- when: groups['onos']|length !=0
-
-- name: copy boot scripts
- copy: src={{ item }} dest=/etc/init.d/ mode=0755
- with_items:
- - setup_networks/net_init
- when: groups['onos']|length !=0
-
-- name: copy config files
- template: src=network.cfg dest=/opt/setup_networks
- when: groups['onos']|length !=0
-
-- name: make sure python lib exist
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items:
- - python-yaml
- - python-netaddr
- when: groups['onos']|length !=0
-
-- name: run scripts
- shell: python /opt/setup_networks/setup_networks.py
- when: groups['onos']|length !=0
-
-- name: add to boot scripts
- service: name=net_init enabled=yes
- when: groups['onos']|length !=0
-##################################################################
-
-- name: restart keepalived to recover external IP
- shell: service keepalived restart
- when: inventory_hostname in groups['onos']
- ignore_errors: True
-
-- name: Install ONOS Cluster on Controller
- include: onos_controller.yml
- when: inventory_hostname in groups['onos'] and onos_sfc == "Disable"
-
-- name: Install ONOS Cluster on Controller
- include: onos_sfc_controller.yml
- when: inventory_hostname in groups['onos'] and onos_sfc == "Enable"
-
-- name: Config ONOS Cluster
- include: openvswitch.yml
- when: groups['onos']|length !=0
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/onos_controller.yml b/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/onos_controller.yml
deleted file mode 100755
index 9ab8d1c1..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/onos_controller.yml
+++ /dev/null
@@ -1,131 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: download onos driver packages
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_driver }}" dest=/opt/
-
-- name: unarchive onos driver package
- command: su -s /bin/sh -c "tar xvf /opt/networking-onos.tar -C /opt/"
-
-- name: install onos driver
- command: su -s /bin/sh -c "/opt/networking-onos/install_driver.sh"
-
-- name: install onos required packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages
-
-- name: download oracle-jdk8 package file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }}
-
-- name: download oracle-jdk8 script file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/
-
-- name: unarchive onos driver package
- command: su -s /bin/sh -c "tar xvf /opt/install_jdk8.tar -C /opt/"
-
-- name: install install_jdk8 package
- command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh"
-
-- name: create JAVA_HOME environment variable
- shell: >
- export J2SDKDIR=/usr/lib/jvm/java-8-oracle;
- export J2REDIR=/usr/lib/jvm/java-8-oracle/jre;
- export PATH=$PATH:/usr/lib/jvm/java-8-oracle/bin:/usr/lib/jvm/java-8-oracle/db/bin:/usr/lib/jvm/java-8-oracle/jre/bin;
- export JAVA_HOME=/usr/lib/jvm/java-8-oracle;
- export DERBY_HOME=/usr/lib/jvm/java-8-oracle/db;
-
-- name: create onos group
- group: name=onos system=yes state=present
-
-- name: create onos user
- user:
- name: onos
- group: onos
- home: "{{ onos_home }}"
- createhome: "yes"
- system: "yes"
- shell: "/bin/false"
-
-- name: download onos package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_pkg_name }}" dest=/opt/{{ onos_pkg_name }}
-
-- name: create new jar repository
- command: su -s /bin/sh -c "mkdir ~/.m2"
- ignore_errors: True
-
-- name: download jar repository
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ repository }}" dest=~/.m2/
-
-- name: extract jar repository
- command: su -s /bin/sh -c "tar xvf ~/.m2/repository.tar -C ~/.m2/"
-
-- name: extract onos package
- command: su -s /bin/sh -c "tar xzf /opt/{{ onos_pkg_name }} -C {{ onos_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" onos
-
-- name: configure onos service
- shell: >
- echo 'export ONOS_OPTS=debug' > {{ onos_home }}/options;
- echo 'export ONOS_USER=root' >> {{ onos_home }}/options;
- mkdir {{ onos_home }}/var;
- mkdir {{ onos_home }}/config;
- sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' {{ onos_home }}/init/onos.conf;
- cp -rf {{ onos_home }}/init/onos.conf /etc/init/;
- cp -rf {{ onos_home }}/init/onos.conf /etc/init.d/;
-
-- name: configure onos boot feature
- shell: >
- sed -i '/^featuresBoot=/c\featuresBoot={{ onos_boot_features }}' {{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg;
-
-- name: wait for config time
- shell: "sleep 10"
-
-- name: start onos service
- service: name=onos state=started enabled=yes
-
-- name: wait for onos start time
- shell: "sleep 200"
-
-- name: add onos auto start
- shell: >
- echo "onos">>/opt/service
-
-##########################################################################################################
-################################ ONOS connect with OpenStack ################################
-##########################################################################################################
-- name: Configure Neutron1
- shell: >
- crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins onos_router;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers onos_ml2;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers vxlan
-
-- name: Create ML2 Configuration File
- template:
- src: ml2_conf.sh
- dest: "/opt/ml2_conf.sh"
- mode: 0777
-
-- name: Configure Neutron2
- command: su -s /bin/sh -c "/opt/ml2_conf.sh;"
-
-- name: Configure Neutron3
- shell: >
- mysql -e "drop database if exists neutron_ml2;";
- mysql -e "create database neutron_ml2 character set utf8;";
- mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';";
- su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron;
- su -s /bin/sh -c "neutron-db-manage --subproject networking-sfc upgrade head" neutron;
-
-- name: Restart neutron-server
- service: name=neutron-server state=restarted
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/onos_sfc_controller.yml b/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/onos_sfc_controller.yml
deleted file mode 100755
index 226923e8..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/onos_sfc_controller.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: download onos driver packages
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_driver }}" dest=/opt/
-
-- name: download onos sfc driver package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_sfc_driver }}" dest=/opt/
-
-- name: unarchive onos driver package
- command: su -s /bin/sh -c "tar xvf /opt/networking-onos.tar -C /opt/"
-
-- name: unarchive onos sfc driver package
- command: su -s /bin/sh -c "tar xvf /opt/networking-sfc.tar -C /opt/"
-
-- name: install onos driver
- command: su -s /bin/sh -c "/opt/networking-onos/install_driver.sh"
-
-- name: install onos sfc driver
- command: su -s /bin/sh -c "/opt/networking-sfc/install_driver.sh"
-
-- name: install onos required packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages
-
-- name: download oracle-jdk8 package file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }}
-
-- name: download oracle-jdk8 script file
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/
-
-- name: unarchive onos driver package
- command: su -s /bin/sh -c "tar xvf /opt/install_jdk8.tar -C /opt/"
-
-- name: install install_jdk8 package
- command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh"
-
-- name: create JAVA_HOME environment variable
- shell: >
- export J2SDKDIR=/usr/lib/jvm/java-8-oracle;
- export J2REDIR=/usr/lib/jvm/java-8-oracle/jre;
- export PATH=$PATH:/usr/lib/jvm/java-8-oracle/bin:/usr/lib/jvm/java-8-oracle/db/bin:/usr/lib/jvm/java-8-oracle/jre/bin;
- export JAVA_HOME=/usr/lib/jvm/java-8-oracle;
- export DERBY_HOME=/usr/lib/jvm/java-8-oracle/db;
-
-- name: create onos group
- group: name=onos system=yes state=present
-
-- name: create onos user
- user:
- name: onos
- group: onos
- home: "{{ onos_home }}"
- createhome: "yes"
- system: "yes"
- shell: "/bin/false"
-
-- name: download onos package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_pkg_name }}" dest=/opt/{{ onos_pkg_name }}
-
-- name: create new jar repository
- command: su -s /bin/sh -c "mkdir ~/.m2"
- ignore_errors: True
-
-- name: download jar repository
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ repository }}" dest=~/.m2/
-
-- name: extract jar repository
- command: su -s /bin/sh -c "tar xvf ~/.m2/repository.tar -C ~/.m2/"
-
-- name: extract onos package
- command: su -s /bin/sh -c "tar xzf /opt/{{ onos_pkg_name }} -C {{ onos_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" onos
-
-- name: configure onos service
- shell: >
- echo 'export ONOS_OPTS=debug' > {{ onos_home }}/options;
- echo 'export ONOS_USER=root' >> {{ onos_home }}/options;
- mkdir {{ onos_home }}/var;
- mkdir {{ onos_home }}/config;
- sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' {{ onos_home }}/init/onos.conf;
- cp -rf {{ onos_home }}/init/onos.conf /etc/init/;
- cp -rf {{ onos_home }}/init/onos.conf /etc/init.d/;
-
-- name: configure onos boot feature
- shell: >
- sed -i '/^featuresBoot=/c\featuresBoot={{ onos_boot_features }}' {{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg;
-
-- name: wait for config time
- shell: "sleep 10"
-
-- name: start onos service
- service: name=onos state=started enabled=yes
-
-- name: wait for onos start time
- shell: "sleep 200"
-
-- name: add onos auto start
- shell: >
- echo "onos">>/opt/service
-
-##########################################################################################################
-################################ ONOS connect with OpenStack ################################
-##########################################################################################################
-- name: Configure Neutron1
- shell: >
- crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_sfc.services.sfc.plugin.SfcPlugin,networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin,onos_router;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers onos_ml2;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers vxlan
-
-- name: Create ML2 Configuration File
- template:
- src: ml2_conf.sh
- dest: "/opt/ml2_conf.sh"
- mode: 0777
-
-- name: Configure Neutron2
- command: su -s /bin/sh -c "/opt/ml2_conf.sh;"
-
-- name: Configure Neutron3
- shell: >
- mysql -e "drop database if exists neutron_ml2;";
- mysql -e "create database neutron_ml2 character set utf8;";
- mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';";
- su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron;
- su -s /bin/sh -c "neutron-db-manage --subproject networking-sfc upgrade head" neutron;
-
-- name: Restart neutron-server
- service: name=neutron-server state=restarted
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/openvswitch.yml
deleted file mode 100755
index 76863890..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/tasks/openvswitch.yml
+++ /dev/null
@@ -1,64 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: set veth port
- shell: >
- ip link add onos_port1 type veth peer name onos_port2;
- ifconfig onos_port1 up;
- ifconfig onos_port2 up;
- ignore_errors: True
-
-- name: set veth to ovs
- shell: >
- export externamMac=`ifconfig eth1 | grep -Eo '\<[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'`;
- ifconfig onos_port2 hw ether $externamMac;
- ovs-vsctl add-port br-prv onos_port1;
- ignore_errors: True
-
-- name: add openflow-base feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow-base'";
- when: inventory_hostname in groups['onos']
-
-- name: add openflow feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow'";
- when: inventory_hostname in groups['onos']
-
-- name: add ovsdatabase feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdatabase'";
- when: inventory_hostname in groups['onos']
-
-- name: add ovsdb-base feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-base'";
- when: inventory_hostname in groups['onos']
-
-- name: add onos driver ovsdb feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-drivers-ovsdb'";
- when: inventory_hostname in groups['onos']
-
-- name: add ovsdb provider host feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-provider-host'";
- when: inventory_hostname in groups['onos']
-
-- name: add vtn feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-app-vtn-onosfw'";
- when: inventory_hostname in groups['onos']
-
-- name: set public eth card start
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'externalportname-set -n onos_port2'"
- when: inventory_hostname in groups['onos']
-
-- name: Set ONOS as the manager
- command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:6640;"
-
-- name: delete default gateway
- shell: >
- route delete default;
- when: inventory_hostname not in groups['onos']
- ignore_errors: True
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/keepalived.conf b/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/keepalived.conf
deleted file mode 100755
index 4ccf1c43..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/keepalived.conf
+++ /dev/null
@@ -1,47 +0,0 @@
-global_defs {
- router_id {{ inventory_hostname }}
-}
-
-vrrp_sync_group VG1 {
- group {
- internal_vip
- public_vip
- }
-}
-
-vrrp_instance internal_vip {
- interface {{ internal_vip.interface }}
- virtual_router_id {{ vrouter_id_internal }}
- state BACKUP
- nopreempt
- advert_int 1
- priority {{ 50 + (host_index[inventory_hostname] * 50) }}
-
- authentication {
- auth_type PASS
- auth_pass 1234
- }
-
- virtual_ipaddress {
- {{ internal_vip.ip }}/{{ internal_vip.netmask }} dev {{ internal_vip.interface }}
- }
-}
-
-vrrp_instance public_vip {
- interface br-ex
- virtual_router_id {{ vrouter_id_public }}
- state BACKUP
- nopreempt
- advert_int 1
- priority {{ 50 + (host_index[inventory_hostname] * 50) }}
-
- authentication {
- auth_type PASS
- auth_pass 4321
- }
-
- virtual_ipaddress {
- {{ network_cfg.public_vip.ip }}/{{ network_cfg.public_vip.netmask }} dev br-ex
- }
-
-}
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/ml2_conf.sh b/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/ml2_conf.sh
deleted file mode 100755
index 8af03df4..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/ml2_conf.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-cat <<EOT>> /etc/neutron/plugins/ml2/ml2_conf.ini
-[onos]
-password = admin
-username = admin
-url_path = http://{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:8181/onos/vtn
-EOT
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/my_configs.debian b/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/my_configs.debian
deleted file mode 100755
index 5ab1519b..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/my_configs.debian
+++ /dev/null
@@ -1,14 +0,0 @@
-{%- for alias, intf in host_ip_settings.items() %}
-
-auto {{ alias }}
-iface {{ alias }} inet static
- address {{ intf["ip"] }}
- netmask {{ intf["netmask"] }}
-{% if "gw" in intf %}
- gateway {{ intf["gw"] }}
-{% endif %}
-{% if intf["name"] == alias %}
- pre-up ip link set {{ sys_intf_mappings[alias]["interface"] }} up
- pre-up ip link add link {{ sys_intf_mappings[alias]["interface"] }} name {{ alias }} type vlan id {{ sys_intf_mappings[alias]["vlan_tag"] }}
-{% endif %}
-{% endfor %}
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/network.cfg b/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/network.cfg
deleted file mode 100755
index 75ba90cb..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/templates/network.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-bond_mappings: {{ network_cfg["bond_mappings"] }}
-ip_settings: {{ ip_settings[inventory_hostname] }}
-sys_intf_mappings: {{ sys_intf_mappings }}
-provider_net_mappings: {{ network_cfg["provider_net_mappings"] }}
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/vars/Debian.yml b/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/vars/Debian.yml
deleted file mode 100755
index c480dd9f..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/vars/Debian.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - software-properties-common
- - crudini
- - git
-
-services: []
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/vars/RedHat.yml b/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/vars/RedHat.yml
deleted file mode 100755
index c480dd9f..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/vars/RedHat.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages:
- - software-properties-common
- - crudini
- - git
-
-services: []
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/vars/main.yml b/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/vars/main.yml
deleted file mode 100755
index 0f6204e2..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/onos_cluster/vars/main.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch: []
-
-services_noarch: []
-onos_pkg_name: onos-1.6.0.tar.gz
-onos_home: /opt/onos/
-karaf_dist: apache-karaf-3.0.5
-jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz
-jdk8_script_name: install_jdk8.tar
-onos_driver: networking-onos.tar
-onos_sfc_driver: networking-sfc.tar
-repository: repository.tar
-onos_boot_features: config,standard,region,package,kar,ssh,management,webconsole,onos-api,onos-core,onos-incubator,onos-cli,onos-rest,onos-gui,onos-openflow-base, onos-openflow, onos-ovsdatabase, onos-ovsdb-base, onos-drivers-ovsdb, onos-ovsdb-provider-host, onos-app-vtn-onosfw
-
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/open-contrail/tasks/uninstall-openvswitch.yml b/deploy/adapters/ansible/openstack_osp9/roles/open-contrail/tasks/uninstall-openvswitch.yml
deleted file mode 100755
index 836cb78b..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/open-contrail/tasks/uninstall-openvswitch.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: del ovs bridge
- shell: ovs-vsctl del-br br-int; ovs-vsctl del-br br-tun; ovs-vsctl del-br br-prv;
-
-- name: remove ovs and ovs-plugin daeman
- shell: >
- sed -i '/neutron-openvswitch-agent/d' /opt/service ;
- sed -i '/openvswitch-switch/d' /opt/service ;
-
-- name: stop ovs and ovs-plugin
- shell: service openvswitch-switch stop; service neutron-openvswitch-agent stop;
-
-- name: remove ovs and ovs-plugin files
- shell: >
- update-rc.d -f neutron-openvswitch-agent remove;
- mv /etc/init.d/neutron-openvswitch-agent /home/neutron-openvswitch-agent;
- mv /etc/init/neutron-openvswitch-agent.conf /home/neutron-openvswitch-agent.conf;
- update-rc.d -f openvswitch-switch remove ;
- mv /etc/init.d/openvswitch-switch /home/openvswitch-switch ;
- mv /etc/init/openvswitch-switch.conf /home/openvswitch-switch.conf ;
- update-rc.d -f neutron-ovs-cleanup remove ;
- mv /etc/init.d/neutron-ovs-cleanup /home/neutron-ovs-cleanup ;
- mv /etc/init/neutron-ovs-cleanup.conf /home/neutron-ovs-cleanup.conf ;
-
-- name: remove ovs kernel module
- shell: rmmod vport_vxlan; rmmod openvswitch;
- ignore_errors: True
-
-- name: copy recovery script
- copy: src={{ item }} dest=/opt/setup_networks
- with_items:
-# - recover_network_opencontrail.py
- - setup_networks_opencontrail.py
-
-#- name: recover external script
-# shell: python /opt/setup_networks/recover_network_opencontrail.py
-
-- name: modify net-init
- shell: sed -i 's/setup_networks.py/setup_networks_opencontrail.py/g' /etc/init.d/net_init
-
-- name: resolve dual NIC problem
- shell: >
- echo "net.ipv4.conf.all.arp_ignore=1" >> /etc/sysctl.conf ;
- /sbin/sysctl -p ;
- echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore ;
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/secgroup/templates/neutron.j2 b/deploy/adapters/ansible/openstack_osp9/roles/secgroup/templates/neutron.j2
deleted file mode 100755
index e7107660..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/secgroup/templates/neutron.j2
+++ /dev/null
@@ -1,7 +0,0 @@
-[securitygroup]
-firewall_driver = neutron.agent.firewall.NoopFirewallDriver
-enable_security_group = True
-
-[agent]
-prevent_arp_spoofing = False
-
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/secgroup/templates/nova.j2 b/deploy/adapters/ansible/openstack_osp9/roles/secgroup/templates/nova.j2
deleted file mode 100755
index 7dbc216a..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/secgroup/templates/nova.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-[DEFAULT]
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/secgroup/vars/Debian.yml b/deploy/adapters/ansible/openstack_osp9/roles/secgroup/vars/Debian.yml
deleted file mode 100755
index 221a3d92..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/secgroup/vars/Debian.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-configs_templates:
- - src: nova.j2
- dest:
- - /etc/nova/nova.conf
- - src: neutron.j2
- dest:
- - /etc/neutron/plugins/ml2/ml2_conf.ini
- - /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
- - /etc/neutron/plugins/ml2/restproxy.ini
-
-controller_services:
- - nova-api
- - nova-cert
- - nova-conductor
- - nova-consoleauth
- - nova-novncproxy
- - nova-scheduler
- - neutron-server
- - neutron-openvswitch-agent
- - neutron-l3-agent
- - neutron-dhcp-agent
- - neutron-metadata-agent
-
-compute_services:
- - nova-compute
- - neutron-openvswitch-agent
diff --git a/deploy/adapters/ansible/openstack_osp9/roles/tacker/templates/tacker.j2 b/deploy/adapters/ansible/openstack_osp9/roles/tacker/templates/tacker.j2
deleted file mode 100755
index f1d9125b..00000000
--- a/deploy/adapters/ansible/openstack_osp9/roles/tacker/templates/tacker.j2
+++ /dev/null
@@ -1,426 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = True
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = True
-
-# Where to store Tacker state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/tacker
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-auth_strategy = keystone
-policy_file = /usr/local/etc/tacker/policy.json
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-# log_dir =
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ internal_ip }}
-
-# Port the bind the API server to
-bind_port = 8888
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of tacker.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Tacker core plugin entrypoint to be loaded from the
-# tacker.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the tacker source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-# core_plugin =
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# tacker.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the tacker source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-
-service_plugins = vnfm,nfvo
-
-# Paste configuration file
-# api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-# auth_strategy = keystone
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Tacker is
-# being used in conjunction with nova security groups
-# allow_overlapping_ips = False
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = tacker.openstack.common.rpc.impl_kombu
-# Size of RPC thread pool
-# rpc_thread_pool_size = 64
-# Size of RPC connection pool
-# rpc_conn_pool_size = 30
-# Seconds to wait for a response from call or multicall
-# rpc_response_timeout = 60
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-# rpc_cast_timeout = 30
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = tacker.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = tacker
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# IP address of the RabbitMQ installation
-# rabbit_host = localhost
-# Password of the RabbitMQ server
-# rabbit_password = guest
-# Port where RabbitMQ server is running/listening
-# rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-# rabbit_userid = guest
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-
-# QPID
-# rpc_backend=tacker.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=tacker.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = tacker.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = tacker.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = tacker.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-# default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-# notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-# agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# tacker server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to tacker server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-# api_workers = 0
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-# rpc_workers = 0
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== tacker nova interactions ==========
-# Send notification to nova when port status is active.
-# notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-# notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-# nova_url = http://127.0.0.1:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-# nova_region_name =
-
-# Username for connection to nova in admin context
-# nova_admin_username =
-
-# The uuid of the admin nova tenant
-# nova_admin_tenant_id =
-
-# Password for connection to nova in admin context.
-# nova_admin_password =
-
-# Authorization URL for connection to nova in admin context.
-# nova_admin_auth_url =
-
-# CA file for novaclient to verify server certificates
-# nova_ca_certificates_file =
-
-# Boolean to control ignoring SSL errors on the nova url
-# nova_api_insecure = False
-
-# Number of seconds between sending events to nova if there are any events to send
-# send_events_interval = 2
-
-# ======== end of tacker nova interactions ==========
-
-[agent]
-# Use "sudo tacker-rootwrap /etc/tacker/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = sudo /usr/local/bin/tacker-rootwrap /usr/local/etc/tacker/rootwrap.conf
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-# report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-signing_dir = /var/cache/tacker
-#cafile = /opt/stack/data/ca-bundle.pem
-#project_domain_id = default
-project_name = service
-#user_domain_id = default
-password = console
-username = tacker
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_plugin = password
-identity_uri = http://{{ internal_vip.ip }}:5000
-auth_uri = http://{{ internal_vip.ip }}:5000
-
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/tacker
-connection = mysql://tacker:TACKER_DBPASS@{{ internal_vip.ip }}:3306/tacker?charset=utf8
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main tacker server. (Leave it as is if the database runs on this host.)
-# connection = sqlite://
-# NOTE: In deployment the [database] section and its connection attribute may
-# be set in the corresponding core plugin '.ini' file. However, it is suggested
-# to put the [database] section and its connection attribute in this
-# configuration file.
-
-# Database engine for which script will be generated when using offline
-# migration
-# engine =
-
-# The SQLAlchemy connection string used to connect to the slave database
-# slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-# max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-# retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-# min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-# max_pool_size = 10
-
-# Timeout in seconds before idle sql connections are reaped
-# idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-# max_overflow = 20
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-# connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-# connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-# pool_timeout = 10
-
-[tacker]
-# Specify drivers for hosting device
-# infra_driver = heat,nova,noop
-
-# Specify drivers for mgmt
-# mgmt_driver = noop,openwrt
-
-# Specify drivers for monitoring
-# monitor_driver = ping, http_ping
-
-[nfvo_vim]
-# Supported VIM drivers, resource orchestration controllers such as OpenStack, kvm
-#Default VIM driver is OpenStack
-#vim_drivers = openstack
-#Default VIM placement if vim id is not provided
-default_vim = VIM0
-
-[vim_keys]
-#openstack = /etc/tacker/vim/fernet_keys
-[tacker_nova]
-# parameters for novaclient to talk to nova
-region_name = RegionOne
-#project_domain_id = default
-project_name = service
-#user_domain_id = default
-password = console
-username = nova
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_plugin = password
-
-[tacker_heat]
-heat_uri = http://{{ internal_vip.ip }}:8004/v1
-stack_retries = 60
-stack_retry_wait = 5
diff --git a/deploy/adapters/ansible/openstack_osp9/templates/dnsmasq-neutron.conf b/deploy/adapters/ansible/openstack_osp9/templates/dnsmasq-neutron.conf
deleted file mode 100755
index 7bcbd9df..00000000
--- a/deploy/adapters/ansible/openstack_osp9/templates/dnsmasq-neutron.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-dhcp-option-force=26,1454
-
diff --git a/deploy/adapters/ansible/openstack_osp9/templates/ml2_conf.ini b/deploy/adapters/ansible/openstack_osp9/templates/ml2_conf.ini
deleted file mode 100755
index 7b3e76da..00000000
--- a/deploy/adapters/ansible/openstack_osp9/templates/ml2_conf.ini
+++ /dev/null
@@ -1,113 +0,0 @@
-[ml2]
-# (ListOpt) List of network type driver entrypoints to be loaded from
-# the neutron.ml2.type_drivers namespace.
-#
-# type_drivers = local,flat,vlan,gre,vxlan
-# Example: type_drivers = flat,vlan,gre,vxlan
-type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
-
-# (ListOpt) Ordered list of network_types to allocate as tenant
-# networks. The default value 'local' is useful for single-box testing
-# but provides no connectivity between hosts.
-#
-# tenant_network_types = local
-# Example: tenant_network_types = vlan,gre,vxlan
-tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
-
-# (ListOpt) Ordered list of networking mechanism driver entrypoints
-# to be loaded from the neutron.ml2.mechanism_drivers namespace.
-# mechanism_drivers =
-# Example: mechanism_drivers = openvswitch,mlnx
-# Example: mechanism_drivers = arista
-# Example: mechanism_drivers = cisco,logger
-# Example: mechanism_drivers = openvswitch,brocade
-# Example: mechanism_drivers = linuxbridge,brocade
-mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
-
-[ml2_type_flat]
-# (ListOpt) List of physical_network names with which flat networks
-# can be created. Use * to allow flat networks with arbitrary
-# physical_network names.
-#
-flat_networks = *
-# Example:flat_networks = physnet1,physnet2
-# Example:flat_networks = *
-
-[ml2_type_vlan]
-# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
-# specifying physical_network names usable for VLAN provider and
-# tenant networks, as well as ranges of VLAN tags on each
-# physical_network available for allocation as tenant networks.
-#
-network_vlan_ranges = {{ NEUTRON_VLAN_RANGES|join(",") }}
-# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
-
-[ml2_type_gre]
-# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
-tunnel_id_ranges = 1:1000
-
-[ml2_type_vxlan]
-# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
-# ranges of VXLAN VNI IDs that are available for tenant network allocation.
-#
-vni_ranges = 1001:4095
-
-# (StrOpt) Multicast group for the VXLAN interface. When configured, will
-# enable sending all broadcast traffic to this multicast group. When left
-# unconfigured, will disable multicast VXLAN mode.
-#
-vxlan_group = 239.1.1.1
-# Example: vxlan_group = 239.1.1.1
-
-[securitygroup]
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-# enable_security_group = True
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-enable_security_group = True
-
-[database]
-connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
-
-[ovs]
-local_ip = {{ internal_ip }}
-{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
-integration_bridge = br-int
-{% if NEUTRON_TUNNEL_TYPES %}
-tunnel_bridge = br-tun
-tunnel_id_ranges = 1001:4095
-tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-{% endif %}
-bridge_mappings = {{ NEUTRON_OVS_BRIDGE_MAPPINGS | join(",") }}
-{% endif %}
-
-[agent]
-root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
-tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
-vxlan_udp_port = 4789
-{% endif %}
-l2_population = False
-
-[odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-network_vlan_ranges = 1001:4095
-{% if NEUTRON_TUNNEL_TYPES %}
-tunnel_id_ranges = 1001:4095
-tun_peer_patch_port = patch-int
-int_peer_patch_port = patch-tun
-tunnel_bridge = br-tun
-{% endif %}
-
-tenant_network_type = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
-integration_bridge = br-int
-controllers = 10.1.0.15:8080:admin:admin
-{% endif %}
-
-[ml2_odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-username = {{ odl_username }}
-password = {{ odl_password }}
-url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
-{% endif %}
-
diff --git a/deploy/adapters/ansible/openstack_osp9/templates/neutron.conf b/deploy/adapters/ansible/openstack_osp9/templates/neutron.conf
deleted file mode 100755
index 33231ed5..00000000
--- a/deploy/adapters/ansible/openstack_osp9/templates/neutron.conf
+++ /dev/null
@@ -1,486 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ VERBOSE }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-notify_nova_on_port_status_changes = True
-notify_nova_on_port_data_changes = True
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ internal_vip.ip }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = RegionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-{% if NOVA_ADMIN_TENANT_ID|default('') %}
-nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
-{% endif %}
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 30
-use_db_reconnect = True
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewllDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
-
-{% if enable_fwaas %}
-[fwaas]
-driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
-enabled = True
-{% endif %}
-
-[nova]
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-project_name = service
-username = nova
-password = {{ NOVA_PASS }}
-
diff --git a/deploy/adapters/ansible/openstack_osp9/templates/nova.conf b/deploy/adapters/ansible/openstack_osp9/templates/nova.conf
deleted file mode 100755
index 3a5735cf..00000000
--- a/deploy/adapters/ansible/openstack_osp9/templates/nova.conf
+++ /dev/null
@@ -1,96 +0,0 @@
-{% set memcached_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-
-[DEFAULT]
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/var/lib/nova/tmp
-force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
-enabled_apis=osapi_compute,metadata
-
-default_floating_pool={{ public_net_info.network }}
-auth_strategy = keystone
-
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-osapi_compute_listen={{ internal_ip }}
-metadata_listen={{ internal_ip }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
-
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-
-memcached_servers = {{ memcached_servers }}
-
-[database]
-# The SQLAlchemy connection string used to connect to the database
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-idle_timeout = 30
-use_db_reconnect = True
-pool_timeout = 10
-
-[api_database]
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova_api
-idle_timeout = 30
-use_db_reconnect = True
-pool_timeout = 10
-
-[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/2.0
-identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-memcached_servers = {{ memcached_servers }}
-
-[glance]
-host = {{ internal_vip.ip }}
-
-[neutron]
-url = http://{{ internal_vip.ip }}:9696
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
-service_metadata_proxy = True
-metadata_proxy_shared_secret = {{ METADATA_SECRET }}
-auth_type = password
-auth_url = http://{{ internal_vip.ip }}:35357
-password = {{ NEUTRON_PASS }}
-username = neutron
-project_domain_name = default
-user_domain_name = default
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/handlers/main.yml b/deploy/adapters/ansible/roles/aodh/handlers/main.yml
index b3399e0c..b3399e0c 100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/aodh/handlers/main.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/tasks/aodh_config.yml b/deploy/adapters/ansible/roles/aodh/tasks/aodh_config.yml
index e60d5338..e60d5338 100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/tasks/aodh_config.yml
+++ b/deploy/adapters/ansible/roles/aodh/tasks/aodh_config.yml
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/aodh_install.yml b/deploy/adapters/ansible/roles/aodh/tasks/aodh_install.yml
index d8a82270..d8a82270 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/aodh_install.yml
+++ b/deploy/adapters/ansible/roles/aodh/tasks/aodh_install.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/tasks/main.yml b/deploy/adapters/ansible/roles/aodh/tasks/main.yml
index 9b61915f..9b61915f 100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/aodh/tasks/main.yml
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/aodh.conf.j2 b/deploy/adapters/ansible/roles/aodh/templates/aodh.conf.j2
index d9eb0599..d9eb0599 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/aodh.conf.j2
+++ b/deploy/adapters/ansible/roles/aodh/templates/aodh.conf.j2
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/Debian.yml b/deploy/adapters/ansible/roles/aodh/vars/Debian.yml
index 9bf4ad7a..9bf4ad7a 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/aodh/vars/Debian.yml
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/RedHat.yml b/deploy/adapters/ansible/roles/aodh/vars/RedHat.yml
index 3d25bd6c..3d25bd6c 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/aodh/vars/RedHat.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/vars/main.yml b/deploy/adapters/ansible/roles/aodh/vars/main.yml
index b17f6ed0..b17f6ed0 100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/vars/main.yml
+++ b/deploy/adapters/ansible/roles/aodh/vars/main.yml
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/handlers/main.yml b/deploy/adapters/ansible/roles/ceilometer_compute/handlers/main.yml
index c973d7df..10b7c683 100644
--- a/deploy/adapters/ansible/roles/ceilometer_compute/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/handlers/main.yml
@@ -7,6 +7,10 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- name: restart ceilometer relation service
+- name: restart ceilometer service
service: name={{ item }} state=restarted enabled=yes
with_items: ceilometer_services
+
+- name: restart nova service
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: nova_services
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/tasks/ceilometer_config.yml b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_config.yml
index b429d65b..b429d65b 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/tasks/ceilometer_config.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_config.yml
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/tasks/ceilometer_install.yml b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_install.yml
index 0f2ba3d2..0f2ba3d2 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/tasks/ceilometer_install.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_install.yml
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/tasks/main.yml b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/main.yml
index 864ea97a..1e3c04d7 100644
--- a/deploy/adapters/ansible/roles/ceilometer_compute/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/main.yml
@@ -7,38 +7,16 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- include_vars: "{{ ansible_os_family }}.yml"
+- include: ceilometer_install.yml
+ tags:
+ - install
+ - ceilometer_install
+ - ceilometer
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install ceilometer packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: ceilometer_packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: copy ceilometer configs
- template: src={{ item.src}} dest=/opt/os_templates
- with_items: "{{ ceilometer_configs_templates }}"
-
-- name: update ceilometer configs
- shell: crudini --merge {{ item.1 }} < /opt/os_templates/{{ item.0.src }}
- with_subelements:
- - ceilometer_configs_templates
- - dest
- notify: restart ceilometer relation service
-
-- name: write services to monitor list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: ceilometer_services
+- include: ceilometer_config.yml
+ tags:
+ - config
+ - ceilometer_config
+ - ceilometer
- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/templates/ceilometer.conf.j2 b/deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.conf.j2
index bffd6068..bffd6068 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/templates/ceilometer.conf.j2
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.conf.j2
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/templates/nova.conf.j2 b/deploy/adapters/ansible/roles/ceilometer_compute/templates/nova.conf.j2
index 68ffdc0a..68ffdc0a 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_compute/templates/nova.conf.j2
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/templates/nova.conf.j2
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/vars/Debian.yml b/deploy/adapters/ansible/roles/ceilometer_compute/vars/Debian.yml
index 550d14f5..1bf3956f 100644
--- a/deploy/adapters/ansible/roles/ceilometer_compute/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/vars/Debian.yml
@@ -12,12 +12,6 @@ ceilometer_packages:
ceilometer_services:
- ceilometer-agent-compute
- - nova-compute
-ceilometer_configs_templates:
- - src: ceilometer.j2
- dest:
- - /etc/ceilometer/ceilometer.conf
- - src: nova.j2
- dest:
- - /etc/nova/nova.conf
+nova_services:
+ - nova-compute
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/vars/RedHat.yml b/deploy/adapters/ansible/roles/ceilometer_compute/vars/RedHat.yml
index 5a9128cd..c5778a49 100644
--- a/deploy/adapters/ansible/roles/ceilometer_compute/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/vars/RedHat.yml
@@ -14,12 +14,4 @@ ceilometer_packages:
ceilometer_services:
- openstack-ceilometer-compute
- - openstack-nova-compute
-
-ceilometer_configs_templates:
- - src: ceilometer.j2
- dest:
- - /etc/ceilometer/ceilometer.conf
- - src: nova.j2
- dest:
- - /etc/nova/nova.conf
+ - openstack-nova-compute
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/handlers/main.yml b/deploy/adapters/ansible/roles/ceilometer_controller/handlers/main.yml
index c973d7df..a3bfb85d 100644
--- a/deploy/adapters/ansible/roles/ceilometer_controller/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/handlers/main.yml
@@ -7,6 +7,16 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- name: restart ceilometer relation service
+- name: restart ceilometer service
service: name={{ item }} state=restarted enabled=yes
with_items: ceilometer_services
+
+- name: restart glance_cinder service
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: glance_cinder_services
+
+- name: reload apache server
+ service: name=apache2 state=reloaded
+
+- name: restart apache server
+ service: name=apache2 state=restarted enabled=yes
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/tasks/ceilometer_config.yml b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_config.yml
index 7f5209c1..7f5209c1 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/tasks/ceilometer_config.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_config.yml
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/tasks/ceilometer_install.yml b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_install.yml
index 0f2ba3d2..0f2ba3d2 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/tasks/ceilometer_install.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_install.yml
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/tasks/main.yml b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/main.yml
index 6b1882cc..1e3c04d7 100644
--- a/deploy/adapters/ansible/roles/ceilometer_controller/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/main.yml
@@ -7,45 +7,16 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install ceilometer packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: ceilometer_packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: copy ceilometer configs
- template: src={{ item.src}} dest=/opt/os_templates
- with_items: "{{ ceilometer_configs_templates }}"
-
-- name: update ceilometer configs
- shell: crudini --merge {{ item.1 }} < /opt/os_templates/{{ item.0.src }}
- with_subelements:
- - ceilometer_configs_templates
- - dest
- notify: restart ceilometer relation service
-
-- name: change meter polling interval to 300s
- replace:
- dest: /etc/ceilometer/pipeline.yaml
- regexp: 'interval: .+'
- replace: 'interval: 300'
- notify: restart ceilometer relation service
-
-- name: write services to monitor list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: ceilometer_services
+- include: ceilometer_install.yml
+ tags:
+ - install
+ - ceilometer_install
+ - ceilometer
+
+- include: ceilometer_config.yml
+ tags:
+ - config
+ - ceilometer_config
+ - ceilometer
- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/templates/ceilometer.conf.j2 b/deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.conf.j2
index 50271732..50271732 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/templates/ceilometer.conf.j2
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.conf.j2
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/templates/cinder.conf.j2 b/deploy/adapters/ansible/roles/ceilometer_controller/templates/cinder.conf.j2
index e2d19cc3..e2d19cc3 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/templates/cinder.conf.j2
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/templates/cinder.conf.j2
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/templates/glance-api.conf.j2 b/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance-api.conf.j2
index e2d19cc3..e2d19cc3 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/templates/glance-api.conf.j2
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance-api.conf.j2
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/templates/glance-registry.conf.j2 b/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance-registry.conf.j2
index e2d19cc3..e2d19cc3 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/templates/glance-registry.conf.j2
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance-registry.conf.j2
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/templates/wsgi-ceilometer.conf.j2 b/deploy/adapters/ansible/roles/ceilometer_controller/templates/wsgi-ceilometer.conf.j2
index 9909f800..9909f800 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/templates/wsgi-ceilometer.conf.j2
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/templates/wsgi-ceilometer.conf.j2
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/vars/Debian.yml b/deploy/adapters/ansible/roles/ceilometer_controller/vars/Debian.yml
index 55f5aa19..de860533 100644
--- a/deploy/adapters/ansible/roles/ceilometer_controller/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/vars/Debian.yml
@@ -17,17 +17,10 @@ ceilometer_packages:
ceilometer_services:
- ceilometer-agent-central
- ceilometer-agent-notification
- - ceilometer-api
- ceilometer-collector
-ceilometer_configs_templates:
- - src: ceilometer.j2
- dest:
- - /etc/ceilometer/ceilometer.conf
- - src: cinder.j2
- dest:
- - /etc/cinder/cinder.conf
- - src: glance.j2
- dest:
- - /etc/glance/glance-api.conf
- - /etc/glance/glance-registry.conf
+glance_cinder_services:
+ - glance-registry
+ - glance-api
+ - cinder-api
+ - cinder-scheduler
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/vars/RedHat.yml b/deploy/adapters/ansible/roles/ceilometer_controller/vars/RedHat.yml
index 86f464a5..de860533 100644
--- a/deploy/adapters/ansible/roles/ceilometer_controller/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/vars/RedHat.yml
@@ -8,26 +8,19 @@
##############################################################################
---
ceilometer_packages:
- - openstack-ceilometer-api
- - openstack-ceilometer-collector
- - openstack-ceilometer-central
- - openstack-ceilometer-notification
+ - ceilometer-api
+ - ceilometer-collector
+ - ceilometer-agent-central
+ - ceilometer-agent-notification
- python-ceilometerclient
ceilometer_services:
- - openstack-ceilometer-central
- - openstack-ceilometer-notification
- - openstack-ceilometer-api
- - openstack-ceilometer-collector
+ - ceilometer-agent-central
+ - ceilometer-agent-notification
+ - ceilometer-collector
-ceilometer_configs_templates:
- - src: ceilometer.j2
- dest:
- - /etc/ceilometer/ceilometer.conf
- - src: cinder.j2
- dest:
- - /etc/cinder/cinder.conf
- - src: glance.j2
- dest:
- - /etc/glance/glance-api.conf
- - /etc/glance/glance-registry.conf
+glance_cinder_services:
+ - glance-registry
+ - glance-api
+ - cinder-api
+ - cinder-scheduler
diff --git a/deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml b/deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml
index 658d109e..1d14c2d2 100644
--- a/deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml
+++ b/deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml
@@ -15,6 +15,17 @@
- name: Populate the monitor daemon
shell: "ceph-mon --mkfs -i {{ inventory_hostname }} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring"
+- name: Change ceph/mon dir owner to ceph
+ shell: "chown -R ceph:ceph /var/lib/ceph/mon"
+ when: ansible_os_family == "Debian"
+
+- name: copy templates
+ template:
+ src: ceph-mon.service
+ dest: /lib/systemd/system/ceph-mon.service
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
- name: Touch the done and auto start file
file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}/{{ item }}" state="touch"
with_items:
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/templates/ceph-mon.service b/deploy/adapters/ansible/roles/ceph-mon/templates/ceph-mon.service
index 5a3cf753..5a3cf753 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/templates/ceph-mon.service
+++ b/deploy/adapters/ansible/roles/ceph-mon/templates/ceph-mon.service
diff --git a/deploy/adapters/ansible/roles/ceph-mon/vars/Debian.yml b/deploy/adapters/ansible/roles/ceph-mon/vars/Debian.yml
index 16b7989b..a792acad 100644
--- a/deploy/adapters/ansible/roles/ceph-mon/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/ceph-mon/vars/Debian.yml
@@ -8,5 +8,5 @@
##############################################################################
---
-ceph_start_script: "start ceph-mon id={{ inventory_hostname }}"
-ceph_start_type: "upstart"
+ceph_start_script: "service ceph-mon start"
+ceph_start_type: "systemd"
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/ceph-openstack/tasks/ceph_openstack_post.yml b/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_post.yml
index 2097ca57..2097ca57 100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/ceph-openstack/tasks/ceph_openstack_post.yml
+++ b/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_post.yml
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml b/deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml
index 8c9734d7..06c3acb6 100644
--- a/deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml
@@ -11,6 +11,7 @@
- ceph_deploy
- ceph_openstack_pre
- ceph_openstack_conf
+ - ceph_openstack_post
- ceph_openstack
- include: ceph_openstack_pre.yml
@@ -24,3 +25,9 @@
- ceph_deploy
- ceph_openstack_conf
- ceph_openstack
+
+- include: ceph_openstack_post.yml
+ tags:
+ - ceph_deploy
+ - ceph_openstack_post
+ - ceph_openstack
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/vars/Debian.yml b/deploy/adapters/ansible/roles/ceph-openstack/vars/Debian.yml
index 1da42323..db10bd14 100755
--- a/deploy/adapters/ansible/roles/ceph-openstack/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/ceph-openstack/vars/Debian.yml
@@ -11,9 +11,9 @@ packages:
- ceph-deploy
- python-flask
- libgoogle-perftools4
- - libleveldb1
+ - libleveldb1v5
- liblttng-ust0
- - libsnappy1
+ - libsnappy1v5
- librbd1
- librados2
- python-ceph
diff --git a/deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml b/deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml
index ff99d68a..363e5e6d 100644
--- a/deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml
+++ b/deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml
@@ -22,6 +22,10 @@
- name: prepare osd disk
shell: ceph-disk prepare --fs-type xfs /var/local/osd
+- name: change local/osd dir owner to ceph
+ shell: chown -R ceph:ceph /var/local/osd
+ when: ansible_os_family == "Debian"
+
- name: activate osd node
shell: ceph-disk activate /var/local/osd
diff --git a/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf b/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf
index 66d9948a..d428a078 100644
--- a/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf
+++ b/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf
@@ -1,3 +1,9 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
[DEFAULT]
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
@@ -9,17 +15,13 @@ debug = {{ DEBUG }}
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
-notification_driver=cinder.openstack.common.notifier.rpc_notifier
+notification_driver = cinder.openstack.common.notifier.rpc_notifier
volumes_dir = /var/lib/cinder/volumes
-
-log_file=/var/log/cinder/cinder.log
+transport_url = rabbit://{{ RABBIT_USER }}:{{ RABBIT_PASS }}@{{ rabbit_host }}
+log_file = /var/log/cinder/cinder.log
control_exchange = cinder
rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_port = 5672
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
my_ip = {{ storage_controller_host }}
glance_host = {{ internal_vip.ip }}
@@ -28,8 +30,8 @@ api_rate_limit = False
storage_availability_zone = nova
quota_volumes = 10
-quota_gigabytes=1000
-quota_driver=cinder.quota.DbQuotaDriver
+quota_gigabytes = 1000
+quota_driver = cinder.quota.DbQuotaDriver
osapi_volume_listen = {{ storage_controller_host }}
osapi_volume_listen_port = 8776
@@ -38,29 +40,46 @@ db_backend = sqlalchemy
volume_name_template = volume-%s
snapshot_name_template = snapshot-%s
-max_gigabytes=10000
+max_gigabytes = 10000
-volume_clear=zero
-volume_clear_size=10
+volume_clear = zero
+volume_clear_size = 10
-iscsi_ip_address={{ storage_controller_host }}
-iscsi_port=3260
-iscsi_helper=tgtadm
+iscsi_ip_address = {{ storage_controller_host }}
+iscsi_port = 3260
+iscsi_helper = tgtadm
-volumes_dir=/var/lib/cinder/volumes
+volumes_dir = /var/lib/cinder/volumes
+volume_driver = cinder.volume.drivers.lvm.LVMISCSIDriver
-volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
+[database]
+connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
+idle_timeout = 30
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v3
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = cinder
+password = {{ CINDER_PASS }}
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = cinder
admin_password = {{ CINDER_PASS }}
-[database]
-connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
-idle_timeout = 30
-
[keymgr]
encryption_auth_url=http://{{ internal_vip.ip }}:5000/v3
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ rabbit_host }}
+rabbit_port = 5672
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+[oslo_concurrency]
+lock_path = /var/lib/cinder/tmp
diff --git a/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf b/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf
index 0660cba9..e7946b5c 100644
--- a/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf
+++ b/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf
@@ -10,25 +10,22 @@ state_path = /var/lib/cinder
lock_path = /var/lib/cinder/tmp
notification_driver=cinder.openstack.common.notifier.rpc_notifier
volumes_dir = /var/lib/cinder/volumes
-
+transport_url = rabbit://{{ RABBIT_USER }}:{{ RABBIT_PASS }}@{{ rabbit_host }}
log_file=/var/log/cinder/cinder.log
control_exchange = cinder
rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_port = 5672
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
my_ip = {{ storage_controller_host }}
glance_host = {{ internal_vip.ip }}
glance_port = 9292
+glance_api_servers = http://{{ internal_vip.ip }}:9292
api_rate_limit = False
storage_availability_zone = nova
quota_volumes = 10
-quota_gigabytes=1000
-quota_driver=cinder.quota.DbQuotaDriver
+quota_gigabytes = 1000
+quota_driver = cinder.quota.DbQuotaDriver
osapi_volume_listen = {{ storage_controller_host }}
osapi_volume_listen_port = 8776
@@ -37,26 +34,42 @@ db_backend = sqlalchemy
volume_name_template = volume-%s
snapshot_name_template = snapshot-%s
-max_gigabytes=10000
+max_gigabytes = 10000
-volume_clear=zero
-volume_clear_size=10
+volume_clear = zero
+volume_clear_size = 10
-iscsi_ip_address={{ storage_controller_host }}
+iscsi_ip_address = {{ storage_controller_host }}
iscsi_port=3260
iscsi_helper=tgtadm
volumes_dir=/var/lib/cinder/volumes
-
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
+[database]
+connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
+idle_timeout = 30
+
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v3
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = cinder
+password = {{ CINDER_PASS }}
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = cinder
admin_password = {{ CINDER_PASS }}
-[database]
-connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
-idle_timeout = 30
+[lvm]
+volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
+volume_group = cinder-volumes
+iscsi_protocol = iscsi
+iscsi_helper = tgtadm
+
+[oslo_concurrency]
+lock_path = /var/lib/cinder/tmp
diff --git a/deploy/adapters/ansible/roles/common/templates/pip.conf b/deploy/adapters/ansible/roles/common/templates/pip.conf
index 7bb3e43e..59981258 100644
--- a/deploy/adapters/ansible/roles/common/templates/pip.conf
+++ b/deploy/adapters/ansible/roles/common/templates/pip.conf
@@ -1,5 +1,5 @@
[global]
-find-links = http://{{ COMPASS_SERVER.stdout_lines[0] }}/pip
+find-links = http://{{ COMPASS_SERVER.stdout_lines[0] }}/pip-openstack
no-index = true
[install]
trusted-host={{ COMPASS_SERVER.stdout_lines[0] }}
diff --git a/deploy/adapters/ansible/roles/common/vars/Debian.yml b/deploy/adapters/ansible/roles/common/vars/Debian.yml
index 1d7972eb..46e0374f 100644
--- a/deploy/adapters/ansible/roles/common/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/common/vars/Debian.yml
@@ -10,11 +10,12 @@
packages:
- ubuntu-cloud-keyring
- python-dev
- - openvswitch-datapath-dkms
- openvswitch-switch
+ - openvswitch-switch-dpdk
- python-memcache
- python-iniparse
- python-lxml
+ - python-crypto
#- python-d* #TODO, need remove
pip_packages:
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/files/congress.service b/deploy/adapters/ansible/roles/congress/files/congress.service
index 4ec26c8c..4ec26c8c 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/files/congress.service
+++ b/deploy/adapters/ansible/roles/congress/files/congress.service
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/congress/handlers/main.yml b/deploy/adapters/ansible/roles/congress/handlers/main.yml
index cf535a11..b4ea8e90 100755..100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/congress/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/congress/handlers/main.yml
@@ -9,4 +9,4 @@
---
- name: restart congress services
service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
+ with_items: "{{ services | union(services_noarch) }}"
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/tasks/congress_config.yml b/deploy/adapters/ansible/roles/congress/tasks/congress_config.yml
index f40d4c22..f40d4c22 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/tasks/congress_config.yml
+++ b/deploy/adapters/ansible/roles/congress/tasks/congress_config.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/congress_db.yml b/deploy/adapters/ansible/roles/congress/tasks/congress_db.yml
index 1883509b..1883509b 100755..100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/congress/tasks/congress_db.yml
+++ b/deploy/adapters/ansible/roles/congress/tasks/congress_db.yml
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/tasks/congress_install.yml b/deploy/adapters/ansible/roles/congress/tasks/congress_install.yml
index 19eed11c..1e620783 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/tasks/congress_install.yml
+++ b/deploy/adapters/ansible/roles/congress/tasks/congress_install.yml
@@ -11,7 +11,7 @@
- name: install congress packages
pip: name={{ item }} state=present
- with_items: packages
+ with_items: "{{ packages }}"
- name: create congress etc directory
file: path=/etc/congress state=directory
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/tasks/main.yml b/deploy/adapters/ansible/roles/congress/tasks/main.yml
index f8056d15..f8056d15 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/congress/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/congress/tasks/main.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/congress/templates/api-paste.ini b/deploy/adapters/ansible/roles/congress/templates/api-paste.ini
index 39be570b..39be570b 100755..100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/congress/templates/api-paste.ini
+++ b/deploy/adapters/ansible/roles/congress/templates/api-paste.ini
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/congress/templates/congress.conf b/deploy/adapters/ansible/roles/congress/templates/congress.conf
index 0305b418..0305b418 100755..100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/congress/templates/congress.conf
+++ b/deploy/adapters/ansible/roles/congress/templates/congress.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/congress/templates/policy.json b/deploy/adapters/ansible/roles/congress/templates/policy.json
index 4476051d..4476051d 100755..100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/congress/templates/policy.json
+++ b/deploy/adapters/ansible/roles/congress/templates/policy.json
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/congress/vars/Debian.yml b/deploy/adapters/ansible/roles/congress/vars/Debian.yml
index 1cc4645e..1cc4645e 100755..100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/congress/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/congress/vars/Debian.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/congress/vars/main.yml b/deploy/adapters/ansible/roles/congress/vars/main.yml
index f6fef749..f6fef749 100755..100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/congress/vars/main.yml
+++ b/deploy/adapters/ansible/roles/congress/vars/main.yml
diff --git a/deploy/adapters/ansible/roles/dashboard/tasks/main.yml b/deploy/adapters/ansible/roles/dashboard/tasks/main.yml
index 229e3cfe..9be6fd6c 100644
--- a/deploy/adapters/ansible/roles/dashboard/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/dashboard/tasks/main.yml
@@ -71,19 +71,9 @@
- restart dashboard services
- name: update ubuntu horizon settings
- lineinfile:
- dest: /etc/openstack-dashboard/local_settings.py
- regexp: '{{ item.regexp }}'
- line: '{{ item.line }}'
- with_items:
- - regexp: '^WEBROOT[ \t]*=.*'
- line: 'WEBROOT = "/horizon"'
- - regexp: '^COMPRESS_OFFLINE[ \t]*=.*'
- line: 'COMPRESS_OFFLINE=True'
- - regexp: '^ALLOWED_HOSTS[ \t]*=.*'
- line: 'ALLOWED_HOSTS = ["*"]'
- - regexp: '^OPENSTACK_HOST[ \t]*=.*'
- line: 'OPENSTACK_HOST = "{{ internal_ip }}"'
+ template:
+ src: local_settings.py.j2
+ dest: "/etc/openstack-dashboard/local_settings.py"
when: ansible_os_family == 'Debian'
notify:
- restart dashboard services
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/templates/local_settings.py.j2 b/deploy/adapters/ansible/roles/dashboard/templates/local_settings.py.j2
index 7278d5c2..7278d5c2 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/templates/local_settings.py.j2
+++ b/deploy/adapters/ansible/roles/dashboard/templates/local_settings.py.j2
diff --git a/deploy/adapters/ansible/roles/dashboard/templates/openstack-dashboard.conf.j2 b/deploy/adapters/ansible/roles/dashboard/templates/openstack-dashboard.conf.j2
index 403fcc22..664af687 100755
--- a/deploy/adapters/ansible/roles/dashboard/templates/openstack-dashboard.conf.j2
+++ b/deploy/adapters/ansible/roles/dashboard/templates/openstack-dashboard.conf.j2
@@ -1,8 +1,11 @@
{% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
+{% if work_threads > 10 %}
+{% set work_threads = 10 %}
+{% endif %}
<VirtualHost {{ internal_ip }}:80>
WSGIScriptAlias /horizon {{ horizon_dir }}/wsgi/django.wsgi
- WSGIDaemonProcess horizon user=horizon group=horizon processes={{ work_threads }} threads={{ work_threads }}
+ WSGIDaemonProcess horizon user=horizon group=horizon processes=4 threads={{ work_threads }}
WSGIProcessGroup horizon
Alias /static {{ horizon_dir }}/static/
Alias /horizon/static {{ horizon_dir }}/static/
diff --git a/deploy/adapters/ansible/roles/dashboard/vars/Debian.yml b/deploy/adapters/ansible/roles/dashboard/vars/Debian.yml
index 5c9b032e..aaeb8cdb 100644
--- a/deploy/adapters/ansible/roles/dashboard/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/dashboard/vars/Debian.yml
@@ -11,6 +11,7 @@ packages: []
services:
- memcached
+ - apache2
apache_config_dir: /etc/apache2
horizon_dir: /usr/share/openstack-dashboard/openstack_dashboard
diff --git a/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml b/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml
index f083a40f..442cd18b 100644
--- a/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml
@@ -7,14 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- name: Register RECOVERY
- set_fact: RECOVERY_ENV={{RECOVERY_ENV | default('False')}}
-
-- name: killall mysqld processes
- shell: sudo killall -9 mysqld
- when: RECOVERY_ENV
- ignore_errors: True
-
- name: get cluster status
shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}'
register: cluster_status
@@ -22,11 +14,9 @@
- inventory_hostname == haproxy_hosts.keys()[0]
- name: start first node to create new cluster
- service:
- name: mysql
- state: restarted
- enabled: yes
- args: "--wsrep-new-cluster"
+ shell: >
+ service mysql bootstrap;
+ service mysql start;
when: |
inventory_hostname == haproxy_hosts.keys()[0]
and not cluster_status.stdout | search("OPERATIONAL")
@@ -47,19 +37,30 @@
register: cluster_nodes
changed_when: false
-- name: restart other nodes and join cluster
- service:
- name: mysql
- state: restarted
- enabled: yes
+- name: restart other nodes and join cluster1
+ shell: service mysql restart;
+ when: |
+ inventory_hostname != haproxy_hosts.keys()[0]
+ and not cluster_nodes.stdout | search( "{{ internal_ip }}" )
+ ignore_errors: True
+
+- name: delay 60 seconds
+ shell: sleep 60
+
+- name: restart other nodes and join cluster2
+ shell: service mysql restart;
when: |
inventory_hostname != haproxy_hosts.keys()[0]
and not cluster_nodes.stdout | search( "{{ internal_ip }}" )
+- name: chmod directory
+ shell: >
+ chmod 755 -R /var/lib/mysql/ ;
+ chmod 755 -R /var/log/mysql/ ;
+ chmod 755 -R /etc/mysql/conf.d/;
+
- name: restart first nodes
- service:
- name: mysql
- state: restarted
+ shell: service mysql restart
when: |
(inventory_hostname == haproxy_hosts.keys()[0]
and haproxy_hosts|length > 1
diff --git a/deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml b/deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml
index bf9f3464..1b08172d 100644
--- a/deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml
@@ -14,16 +14,16 @@
mode: 0755
when: ansible_os_family == "Debian"
-- name: install python-mysqldb
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: maridb_packages | union(packages_noarch)
-
- name: change open file limit
copy:
content: "* - nofile 65536 }}"
dest: "/etc/security/limits.conf"
mode: 0755
+- name: install python-mysqldb
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: maridb_packages | union(packages_noarch)
+
- name: create conf dir for wsrep
file: path=/etc/my.cnf.d state=directory mode=0755
when: ansible_os_family == "RedHat"
@@ -52,7 +52,7 @@
when: ansible_os_family == "Debian"
- name: set owner
- file: path=/var/lib/mysql owner=mysql group=mysql recurse=yes state=directory
+ file: path=/var/lib/mysql owner=mysql group=mysql recurse=yes state=directory mode=0755
- name: get logfile stat
stat: path='{{ mysql_data_dir }}/ib_logfile0'
@@ -67,3 +67,4 @@
when: |
logfile_stat.stat.exists
and logfile_stat.stat.size != 1073741824
+
diff --git a/deploy/adapters/ansible/roles/database/templates/data.j2 b/deploy/adapters/ansible/roles/database/templates/data.j2
index 109201ab..66c2fead 100644
--- a/deploy/adapters/ansible/roles/database/templates/data.j2
+++ b/deploy/adapters/ansible/roles/database/templates/data.j2
@@ -6,6 +6,7 @@ drop database if exists neutron;
drop database if exists nova;
drop database if exists cinder;
drop database if exists heat;
+drop database if exists aodh;
CREATE DATABASE keystone;
{% for host in ['%', 'localhost', inventory_hostname] %}
@@ -37,6 +38,11 @@ CREATE DATABASE heat;
GRANT ALL ON heat.* TO 'heat'@'{{ host }}' IDENTIFIED BY '{{ HEAT_DBPASS }}';
{% endfor %}
+CREATE DATABASE aodh;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON aodh.* TO 'aodh'@'{{ host }}' IDENTIFIED BY '{{ AODH_DBPASS }}';
+{% endfor %}
+
{% if WSREP_SST_USER is defined %}
{% for host in ['%', 'localhost', inventory_hostname] %}
GRANT ALL ON *.* TO '{{ WSREP_SST_USER }}'@'{{ host }}' IDENTIFIED BY '{{ WSREP_SST_PASS }}';
diff --git a/deploy/adapters/ansible/roles/database/vars/Debian.yml b/deploy/adapters/ansible/roles/database/vars/Debian.yml
index 621dc492..1021524d 100644
--- a/deploy/adapters/ansible/roles/database/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/database/vars/Debian.yml
@@ -17,11 +17,21 @@ mysql_packages:
- mysql-server
maridb_packages:
- - libaio1
- - libssl0.9.8
+ - apt-transport-https
+ - debconf-utils
+ - libaio1
+ - libc6
+ - libdbd-mysql-perl
+ - libgcc1
+ - libgcrypt20
+ - libstdc++6
+ - python-software-properties
+ - mariadb-client
+ - galera-3
+ - rsync
+ - socat
+ - mariadb-galera-server-10.0
- python-mysqldb
- - mysql-wsrep-server-5.5
- - galera-3
pip_packages: []
diff --git a/deploy/adapters/ansible/roles/database/vars/main.yml b/deploy/adapters/ansible/roles/database/vars/main.yml
index c0538899..a32897f0 100644
--- a/deploy/adapters/ansible/roles/database/vars/main.yml
+++ b/deploy/adapters/ansible/roles/database/vars/main.yml
@@ -23,6 +23,9 @@ credentials:
db: glance
password: "{{ GLANCE_DBPASS }}"
- user: nova
+ db: nova_api
+ password: "{{ NOVA_DBPASS }}"
+ - user: nova
db: nova
password: "{{ NOVA_DBPASS }}"
- user: cinder
@@ -31,4 +34,6 @@ credentials:
- user: heat
db: heat
password: "{{ HEAT_DBPASS }}"
-
+ - user: aodh
+ db: aodh
+ password: "{{ AODH_DBPASS }}"
diff --git a/deploy/adapters/ansible/roles/ext-network/handlers/main.yml b/deploy/adapters/ansible/roles/ext-network/handlers/main.yml
index a7945861..36e39072 100644
--- a/deploy/adapters/ansible/roles/ext-network/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/ext-network/handlers/main.yml
@@ -8,7 +8,7 @@
##############################################################################
---
- name: restart neutron-plugin-openvswitch-agent
- service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
+ service: name=neutron-openvswitch-agent state=restarted enabled=yes
when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
- name: restart neutron-l3-agent
diff --git a/deploy/adapters/ansible/roles/ext-network/tasks/main.yml b/deploy/adapters/ansible/roles/ext-network/tasks/main.yml
index cb6cb2ce..f68105f1 100644
--- a/deploy/adapters/ansible/roles/ext-network/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ext-network/tasks/main.yml
@@ -7,40 +7,38 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+# FIXME: temporary workaround for openstack api access random failure
+- name: restart api server
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: api_services | union(api_services_noarch)
+ ignore_errors: True
+
- name: restart neutron server
service: name=neutron-server state=restarted enabled=yes
+- name: wait for neutron ready
+ wait_for: port=9696 delay=10 timeout=60 host={{ internal_ip }}
+
- name: create external net
- neutron_network:
- login_username: ADMIN
- login_password: "{{ ADMIN_PASS }}"
- login_tenant_name: admin
- auth_url: "http://{{ internal_vip.ip }}:35357/v2.0"
- name: "{{ public_net_info.network }}"
- provider_network_type: "{{ public_net_info.type }}"
- provider_physical_network: "{{ public_net_info.provider_network }}"
- provider_segmentation_id: "{{ public_net_info.segment_id}}"
- shared: false
- router_external: yes
- state: present
- run_once: true
- when: 'public_net_info.enable == True'
+ shell:
+ . /opt/admin-openrc.sh;
+ neutron net-create \
+ {{ public_net_info.network }} \
+ --provider:network_type {{ public_net_info.type }} \
+ --provider:physical_network {{ public_net_info.provider_network }} \
+ --router:external True
+ when: public_net_info.enable == True and inventory_hostname == groups['controller'][0]
- name: create external subnet
- neutron_subnet:
- login_username: ADMIN
- login_password: "{{ ADMIN_PASS }}"
- login_tenant_name: admin
- auth_url: "http://{{ internal_vip.ip }}:35357/v2.0"
- name: "{{ public_net_info.subnet }}"
- network_name: "{{ public_net_info.network }}"
- cidr: "{{ public_net_info.floating_ip_cidr }}"
- enable_dhcp: "{{ public_net_info.enable_dhcp }}"
- no_gateway: "{{ public_net_info.no_gateway }}"
- gateway_ip: "{{ public_net_info.external_gw }}"
- allocation_pool_start: "{{ public_net_info.floating_ip_start }}"
- allocation_pool_end: "{{ public_net_info.floating_ip_end }}"
- state: present
- run_once: true
- when: 'public_net_info.enable == True'
+ shell:
+ . /opt/admin-openrc.sh;
+ neutron subnet-create \
+ --name {{ public_net_info.subnet }} \
+ --gateway {{ public_net_info.external_gw }} \
+ --allocation-pool \
+ start={{ public_net_info.floating_ip_start }},end={{ public_net_info.floating_ip_end }} \
+ {{ public_net_info.network }} {{ public_net_info.floating_ip_cidr }}
+ when: public_net_info.enable == True and inventory_hostname == groups['controller'][0]
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/vars/Debian.yml b/deploy/adapters/ansible/roles/ext-network/vars/Debian.yml
index 0b5c78b6..0b5c78b6 100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/ext-network/vars/Debian.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/vars/RedHat.yml b/deploy/adapters/ansible/roles/ext-network/vars/RedHat.yml
index 886401fd..886401fd 100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/ext-network/vars/RedHat.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/vars/main.yml b/deploy/adapters/ansible/roles/ext-network/vars/main.yml
index b19b6ebf..b19b6ebf 100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/vars/main.yml
+++ b/deploy/adapters/ansible/roles/ext-network/vars/main.yml
diff --git a/deploy/adapters/ansible/roles/glance/tasks/nfs.yml b/deploy/adapters/ansible/roles/glance/tasks/nfs.yml
index 482be22e..39a49dc1 100644
--- a/deploy/adapters/ansible/roles/glance/tasks/nfs.yml
+++ b/deploy/adapters/ansible/roles/glance/tasks/nfs.yml
@@ -7,6 +7,10 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+- name: install nfs packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: nfs_packages
+
- name: install nfs
local_action: yum name={{ item }} state=present
with_items:
@@ -56,7 +60,7 @@
mkdir -p /var/lib/glance/images
mount -t nfs -onfsvers=3 {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images
sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
- #echo {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab
+ echo {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab
when: mount_info.stdout.find('images') == -1
retries: 5
delay: 3
diff --git a/deploy/adapters/ansible/roles/glance/templates/glance-api.conf b/deploy/adapters/ansible/roles/glance/templates/glance-api.conf
index 9be29f4f..241f04ce 100644
--- a/deploy/adapters/ansible/roles/glance/templates/glance-api.conf
+++ b/deploy/adapters/ansible/roles/glance/templates/glance-api.conf
@@ -36,31 +36,43 @@ scrub_time = 43200
image_cache_dir = /var/lib/glance/image-cache/
show_image_direct_url = True
-[task]
-task_executor = taskflow
-
[database]
backend = sqlalchemy
connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance?charset=utf8
idle_timeout = 30
+sqlite_db = /var/lib/glance/glance.sqlite
+
+[task]
+task_executor = taskflow
[glance_store]
default_store = file
stores = file,http,cinder,rbd
filesystem_store_datadir = /var/lib/glance/images/
+[image_format]
+disk_formats = ami,ari,aki,vhd,vhdx,vmdk,raw,qcow2,vdi,iso,root-tar
+
[profiler]
enabled = True
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = glance
+password = {{ GLANCE_PASS }}
+token_cache_time = 300
+revocation_cache_time = 60
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = glance
admin_password = {{ GLANCE_PASS }}
-memcached_servers = {{ memcached_servers }}
-token_cache_time = 300
-revocation_cache_time = 60
[paste_deploy]
flavor= keystone
@@ -74,6 +86,8 @@ rabbit_use_ssl = false
rabbit_userid = {{ RABBIT_USER }}
rabbit_password = {{ RABBIT_PASS }}
rabbit_virtual_host = /
+default_notification_exchange = glance
+
rabbit_notification_exchange = glance
rabbit_notification_topic = notifications
rabbit_durable_queues = False
diff --git a/deploy/adapters/ansible/roles/glance/templates/glance-registry.conf b/deploy/adapters/ansible/roles/glance/templates/glance-registry.conf
index 8453b966..ccd8f1bb 100644
--- a/deploy/adapters/ansible/roles/glance/templates/glance-registry.conf
+++ b/deploy/adapters/ansible/roles/glance/templates/glance-registry.conf
@@ -30,12 +30,20 @@ idle_timeout = 30
enabled = True
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = glance
+password = {{ GLANCE_PASS }}
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = glance
admin_password = {{ GLANCE_PASS }}
-memcached_servers = {{ memcached_servers }}
token_cache_time = 300
revocation_cache_time = 60
diff --git a/deploy/adapters/ansible/roles/glance/vars/Debian.yml b/deploy/adapters/ansible/roles/glance/vars/Debian.yml
index b5b4b6c0..d1825012 100644
--- a/deploy/adapters/ansible/roles/glance/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/glance/vars/Debian.yml
@@ -11,6 +11,9 @@ packages:
- glance
- nfs-common
+nfs_packages:
+ - nfs-common
+
nfs_services: []
services:
diff --git a/deploy/adapters/ansible/roles/glance/vars/RedHat.yml b/deploy/adapters/ansible/roles/glance/vars/RedHat.yml
index 517f347c..2987d0c4 100644
--- a/deploy/adapters/ansible/roles/glance/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/glance/vars/RedHat.yml
@@ -11,6 +11,10 @@ packages:
- openstack-glance
- rpcbind
+nfs_packages:
+ - nfs-utils
+ - rpcbind
+
nfs_services:
- rpcbind
diff --git a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
index a6876da7..5fbcc9d9 100644
--- a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
+++ b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
@@ -158,6 +158,16 @@ listen proxy-cinder_api_cluster
server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5
{% endfor %}
+#listen proxy-swift-proxy
+# bind {{ internal_vip.ip }}:8080
+# bind {{ public_vip.ip }}:8080
+# balance source
+# option tcpka
+# option tcplog
+#{% for host,ip in haproxy_hosts.items() %}
+# server {{ host }} {{ ip }}:8080 weight 1 check inter 2000 rise 2 fall 5
+#{% endfor %}
+
listen proxy-ceilometer_api_cluster
bind {{ internal_vip.ip }}:8777
bind {{ public_vip.ip }}:8777
@@ -180,6 +190,17 @@ listen proxy-aodh_api_cluster
server {{ host }} {{ ip }}:8042 weight 1 check inter 2000 rise 2 fall 5
{% endfor %}
+listen proxy-congress_api_cluster
+ bind {{ internal_vip.ip }}:1789
+ bind {{ public_vip.ip }}:1789
+ mode tcp
+ option tcp-check
+ option tcplog
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:1789 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
listen proxy-dashboarad
bind {{ public_vip.ip }}:80
mode http
@@ -196,7 +217,7 @@ listen proxy-dashboarad
listen stats
mode http
- bind 0.0.0.0:9998
+ bind 0.0.0.0:9999
stats enable
stats refresh 30s
stats uri /
diff --git a/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml b/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml
index a6e76c74..6a0f1c73 100644
--- a/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml
+++ b/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml
@@ -21,13 +21,13 @@
- name: create heat user domain
shell: >
- . /opt/admin-openrc-v3.sh;
+ . /opt/admin-openrc.sh;
openstack domain create --description "Stack projects and users" heat;
openstack user create --domain heat --password {{ HEAT_PASS }} heat_domain_admin;
openstack role add --domain heat --user-domain heat --user heat_domain_admin admin;
openstack role create heat_stack_owner;
openstack role add --project demo --user demo heat_stack_owner;
- when: inventory_hostname == groups['controller'][0] and ansible_os_family == "Debian"
+ when: inventory_hostname == groups['controller'][0]
- name: update heat conf
template: src=heat.j2
@@ -36,13 +36,4 @@
notify:
- restart heat service
- remove heat-sqlite-db
- when: ansible_os_family == "RedHat"
-- name: update heat conf
- template: src=heat_debian.j2
- dest=/etc/heat/heat.conf
- backup=yes
- notify:
- - restart heat service
- - remove heat-sqlite-db
- when: ansible_os_family == "Debian"
diff --git a/deploy/adapters/ansible/roles/heat/templates/heat.j2 b/deploy/adapters/ansible/roles/heat/templates/heat.j2
index aec6b2eb..72d4b61e 100644
--- a/deploy/adapters/ansible/roles/heat/templates/heat.j2
+++ b/deploy/adapters/ansible/roles/heat/templates/heat.j2
@@ -1,11 +1,17 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
[DEFAULT]
heat_metadata_server_url = http://{{ internal_vip.ip }}:8000
heat_waitcondition_server_url = http://{{ internal_vip.ip }}:8000/v1/waitcondition
rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
log_dir = /var/log/heat
+stack_domain_admin = heat_domain_admin
+stack_domain_admin_password = {{ HEAT_PASS }}
+stack_user_domain_name = heat
[database]
connection = mysql://heat:{{ HEAT_DBPASS }}@{{ db_host }}/heat
@@ -14,12 +20,35 @@ use_db_reconnect = True
pool_timeout = 10
[ec2authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+
+[clients_keystone]
+auth_uri = http://{{ internal_vip.ip }}:35357
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = heat
+password = {{ HEAT_PASS }}
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = heat
admin_password = {{ HEAT_PASS }}
+[oslo_messaging_rabbit]
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+[trustee]
+auth_type = password
+auth_url = http://{{ internal_vip.ip }}:35357
+username = heat
+password = {{ HEAT_PASS }}
+user_domain_name = default
diff --git a/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml b/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml
index e7e9297e..ea211470 100644
--- a/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml
+++ b/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml
@@ -7,55 +7,90 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+- include_vars: "{{ ansible_os_family }}.yml"
+
- name: keystone-manage db-sync
- #keystone_manage: action=dbsync
shell: su -s /bin/sh -c 'keystone-manage db_sync' keystone
+- name: Check if fernet keys already exist
+ stat:
+ path: "/etc/keystone/fernet-keys/0"
+ register: fernet_keys_0
+
+- name: Create fernet keys for Keystone
+ command:
+ keystone-manage fernet_setup
+ --keystone-user keystone
+ --keystone-group keystone
+ when: not fernet_keys_0.stat.exists
+ notify:
+ - restart keystone services
+
+- name: Rotate fernet keys for Keystone
+ command:
+ keystone-manage fernet_rotate
+ --keystone-user keystone
+ --keystone-group keystone
+ when: fernet_keys_0.stat.exists
+ notify:
+ - restart keystone services
+
+- name: Distribute the fernet key repository
+ shell: rsync -e 'ssh -o StrictHostKeyChecking=no' \
+ -avz \
+ --delete \
+ /etc/keystone/fernet-keys \
+ root@{{ hostvars[ item ].ansible_eth0.ipv4.address }}:/etc/keystone/
+ with_items: groups['controller'][1:]
+ notify:
+ - restart keystone services
+
+- name: Check if credential keys already exist
+ stat:
+ path: "/etc/keystone/credential-keys/0"
+ register: credential_keys_0
+
+- name: Create credential keys for Keystone
+ command:
+ keystone-manage credential_setup
+ --keystone-user keystone
+ --keystone-group keystone
+ when: not credential_keys_0.stat.exists
+ notify:
+ - restart keystone services
+
+- name: Rotate credential keys for Keystone
+ command:
+ keystone-manage credential_rotate
+ --keystone-user keystone
+ --keystone-group keystone
+ when: credential_keys_0.stat.exists
+ notify:
+ - restart keystone services
+
+- name: Distribute the credential key repository
+ shell: rsync -e 'ssh -o StrictHostKeyChecking=no' \
+ -avz \
+ --delete \
+ /etc/keystone/credential-keys \
+ root@{{ hostvars[ item ].ansible_eth0.ipv4.address }}:/etc/keystone/
+ with_items: groups['controller'][1:]
+ notify:
+ - restart keystone services
+
+- name: Bootstrap the Identity service
+ shell:
+ keystone-manage bootstrap \
+ --bootstrap-password {{ ADMIN_PASS }} \
+ --bootstrap-admin-url http://{{ internal_ip }}:35357/v3/ \
+ --bootstrap-internal-url http://{{ internal_ip }}:35357/v3/ \
+ --bootstrap-public-url http://{{ internal_ip }}:5000/v3/
+ --bootstrap-region-id RegionOne \
+ notify:
+ - restart keystone services
+
+- meta: flush_handlers
+
- name: wait for keystone ready
- wait_for: port=35357 delay=3 timeout=10 host={{ internal_ip }}
-
-- name: cron job to purge expired tokens hourly
- cron:
- name: 'purge expired tokens'
- special_time: hourly
- job: '/usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1'
-
-- name: add tenants
- keystone_user:
- token: "{{ ADMIN_TOKEN }}"
- endpoint: "http://{{ internal_ip }}:35357/v2.0"
- tenant: "{{ item.tenant }}"
- tenant_description: "{{ item.tenant_description }}"
- with_items: "{{ os_users }}"
-
-- name: add users
- keystone_user:
- token: "{{ ADMIN_TOKEN }}"
- endpoint: "http://{{ internal_ip }}:35357/v2.0"
- user: "{{ item.user }}"
- tenant: "{{ item.tenant }}"
- password: "{{ item.password }}"
- email: "{{ item.email }}"
- with_items: "{{ os_users }}"
-
-- name: grant roles
- keystone_user:
- token: "{{ ADMIN_TOKEN }}"
- endpoint: "http://{{ internal_ip }}:35357/v2.0"
- user: "{{ item.user }}"
- role: "{{ item.role }}"
- tenant: "{{ item.tenant }}"
- with_items: "{{ os_users }}"
-
-- name: add endpoints
- keystone_service:
- token: "{{ ADMIN_TOKEN }}"
- endpoint: "http://{{ internal_ip }}:35357/v2.0"
- name: "{{ item.name }}"
- type: "{{ item.type }}"
- region: "{{ item.region}}"
- description: "{{ item.description }}"
- publicurl: "{{ item.publicurl }}"
- internalurl: "{{ item.internalurl }}"
- adminurl: "{{ item.adminurl }}"
- with_items: "{{ os_services }}"
+ wait_for: port=35357 delay=15 timeout=60 host={{ internal_ip }}
+
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_create.yml b/deploy/adapters/ansible/roles/keystone/tasks/keystone_create.yml
index 53077776..53077776 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_create.yml
+++ b/deploy/adapters/ansible/roles/keystone/tasks/keystone_create.yml
diff --git a/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml b/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml
index ea6926f4..757349c5 100644
--- a/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml
+++ b/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml
@@ -26,6 +26,16 @@
state=absent
when: ansible_os_family == "Debian"
+- name: disable boot auto start
+ file:
+ path={{ item }}
+ state=absent
+ with_items:
+ - /etc/init.d/keystone
+ - /etc/init/keystone.conf
+ - /lib/systemd/system/keystone.service
+ when: ansible_os_family == "Debian"
+
- name: generate keystone service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
@@ -56,7 +66,7 @@
- name: update apache2 configs
template:
src: wsgi-keystone.conf.j2
- dest: '{{ apache_config_dir }}/sites-available/wsgi-keystone.conf'
+ dest: '{{ apache_config_dir }}/sites-available/keystone.conf'
when: ansible_os_family == 'Debian'
notify:
- restart keystone services
@@ -64,15 +74,15 @@
- name: update apache2 configs
template:
src: wsgi-keystone.conf.j2
- dest: '{{ apache_config_dir }}/wsgi-keystone.conf'
+ dest: '{{ apache_config_dir }}/keystone.conf'
when: ansible_os_family == 'RedHat'
notify:
- restart keystone services
- name: enable keystone server
file:
- src: "{{ apache_config_dir }}/sites-available/wsgi-keystone.conf"
- dest: "{{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf"
+ src: "{{ apache_config_dir }}/sites-available/keystone.conf"
+ dest: "{{ apache_config_dir }}/sites-enabled/keystone.conf"
state: "link"
when: ansible_os_family == 'Debian'
notify:
@@ -82,7 +92,7 @@
template: src={{ item }} dest=/opt/{{ item }}
with_items:
- admin-openrc.sh
+ - admin-openrc-v2.sh
- demo-openrc.sh
- - admin-openrc-v3.sh
- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/keystone/tasks/main.yml b/deploy/adapters/ansible/roles/keystone/tasks/main.yml
index 21939fa7..ad619d40 100644
--- a/deploy/adapters/ansible/roles/keystone/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/keystone/tasks/main.yml
@@ -20,4 +20,11 @@
- keystone_config
- keystone
+- include: keystone_create.yml
+ when: inventory_hostname == groups['controller'][0]
+ tags:
+ - config
+ - keystone_create
+ - keystone
+
- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/admin-openrc-v2.sh b/deploy/adapters/ansible/roles/keystone/templates/admin-openrc-v2.sh
index 6ba620ff..6ba620ff 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/admin-openrc-v2.sh
+++ b/deploy/adapters/ansible/roles/keystone/templates/admin-openrc-v2.sh
diff --git a/deploy/adapters/ansible/roles/keystone/templates/admin-openrc.sh b/deploy/adapters/ansible/roles/keystone/templates/admin-openrc.sh
index 6ba620ff..94d5850f 100644
--- a/deploy/adapters/ansible/roles/keystone/templates/admin-openrc.sh
+++ b/deploy/adapters/ansible/roles/keystone/templates/admin-openrc.sh
@@ -7,9 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# Verify the Identity Service installation
-export OS_PASSWORD={{ ADMIN_PASS }}
+export OS_PROJECT_DOMAIN_NAME=default
+export OS_USER_DOMAIN_NAME=default
export OS_TENANT_NAME=admin
-export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
+export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
-export OS_VOLUME_API_VERSION=2
-
+export OS_PASSWORD={{ ADMIN_PASS }}
+export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_IMAGE_API_VERSION=2
diff --git a/deploy/adapters/ansible/roles/keystone/templates/demo-openrc.sh b/deploy/adapters/ansible/roles/keystone/templates/demo-openrc.sh
index 5807e868..920f42ed 100644
--- a/deploy/adapters/ansible/roles/keystone/templates/demo-openrc.sh
+++ b/deploy/adapters/ansible/roles/keystone/templates/demo-openrc.sh
@@ -6,8 +6,12 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+export OS_PROJECT_DOMAIN_NAME=default
+export OS_USER_DOMAIN_NAME=default
+export OS_TENANT_NAME=demo
+export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD={{ DEMO_PASS }}
-export OS_TENANT_NAME=demo
-export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
-
+export OS_AUTH_URL=http://{{ internal_vip.ip }}:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_IMAGE_API_VERSION=2
diff --git a/deploy/adapters/ansible/roles/keystone/templates/keystone.conf b/deploy/adapters/ansible/roles/keystone/templates/keystone.conf
index 649fc32c..919be344 100644
--- a/deploy/adapters/ansible/roles/keystone/templates/keystone.conf
+++ b/deploy/adapters/ansible/roles/keystone/templates/keystone.conf
@@ -7,51 +7,52 @@
{% set memcached_servers = memcached_servers|join(',') %}
{% set rabbitmq_servers = rabbitmq_servers|join(',') %}
[DEFAULT]
-admin_token={{ ADMIN_TOKEN }}
debug={{ DEBUG }}
log_dir = /var/log/keystone
[cache]
-backend=keystone.cache.memcache_pool
-memcache_servers={{ memcached_servers}}
+backend = keystone.cache.memcache_pool
+memcache_servers = {{ memcached_servers}}
enabled=true
[revoke]
-driver=sql
-expiration_buffer=3600
-caching=true
+driver = sql
+expiration_buffer = 3600
+caching = true
[database]
connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone?charset=utf8
-idle_timeout=30
-min_pool_size=5
-max_pool_size=120
-pool_timeout=30
+idle_timeout = 30
+min_pool_size = 5
+max_pool_size = 120
+pool_timeout = 30
+[fernet_tokens]
+key_repository = /etc/keystone/fernet-keys/
[identity]
-default_domain_id=default
-driver=sql
+default_domain_id = default
+driver = sql
[assignment]
-driver=sql
+driver = sql
[resource]
-driver=sql
-caching=true
-cache_time=3600
-
+driver = sql
+caching = true
+cache_time = 3600
+
[token]
-enforce_token_bind=permissive
-expiration=43200
-provider=uuid
-driver=sql
-caching=true
-cache_time=3600
+enforce_token_bind = permissive
+expiration = 43200
+provider = fernet
+driver = sql
+caching = true
+cache_time = 3600
[eventlet_server]
-public_bind_host= {{ identity_host }}
-admin_bind_host= {{ identity_host }}
+public_bind_host = {{ identity_host }}
+admin_bind_host = {{ identity_host }}
[oslo_messaging_rabbit]
rabbit_userid = {{ RABBIT_USER }}
diff --git a/deploy/adapters/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 b/deploy/adapters/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
index 64d864af..55c89839 100644
--- a/deploy/adapters/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
+++ b/deploy/adapters/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
@@ -1,6 +1,10 @@
- {% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
+{% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
+{% if work_threads > 10 %}
+{% set work_threads = 10 %}
+{% endif %}
+
<VirtualHost {{ internal_ip }}:5000>
- WSGIDaemonProcess keystone-public processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
+ WSGIDaemonProcess keystone-public processes=4 threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-public
WSGIScriptAlias / /usr/bin/keystone-wsgi-public
WSGIApplicationGroup %{GLOBAL}
@@ -23,7 +27,7 @@
</VirtualHost>
<VirtualHost {{ internal_ip }}:35357>
- WSGIDaemonProcess keystone-admin processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
+ WSGIDaemonProcess keystone-admin processes=4 threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-admin
WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
WSGIApplicationGroup %{GLOBAL}
diff --git a/deploy/adapters/ansible/roles/keystone/vars/Debian.yml b/deploy/adapters/ansible/roles/keystone/vars/Debian.yml
index b8d8e7c2..89bfbe0a 100644
--- a/deploy/adapters/ansible/roles/keystone/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/keystone/vars/Debian.yml
@@ -11,8 +11,11 @@
cron_path: "/var/spool/cron/crontabs"
packages:
- - keystone
+ - apache2
+ - libapache2-mod-wsgi
+ - python-keystone
- python-openstackclient
+ - keystone
services:
- apache2
diff --git a/deploy/adapters/ansible/roles/keystone/vars/main.yml b/deploy/adapters/ansible/roles/keystone/vars/main.yml
index 655cd98d..ecaf7b51 100644
--- a/deploy/adapters/ansible/roles/keystone/vars/main.yml
+++ b/deploy/adapters/ansible/roles/keystone/vars/main.yml
@@ -9,6 +9,7 @@
---
packages_noarch:
- python-keystoneclient
+ - python3-keystoneclient
services_noarch: []
os_services:
@@ -16,9 +17,9 @@ os_services:
type: identity
region: RegionOne
description: "OpenStack Identity"
- publicurl: "http://{{ public_vip.ip }}:5000/v2.0"
- internalurl: "http://{{ internal_vip.ip }}:5000/v2.0"
- adminurl: "http://{{ internal_vip.ip }}:35357/v2.0"
+ publicurl: "http://{{ public_vip.ip }}:5000/v3"
+ internalurl: "http://{{ internal_vip.ip }}:5000/v3"
+ adminurl: "http://{{ internal_vip.ip }}:35357/v3"
- name: glance
type: image
@@ -32,9 +33,9 @@ os_services:
type: compute
region: RegionOne
description: "OpenStack Compute"
- publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
+ publicurl: "http://{{ public_vip.ip }}:8774/v2.1/%\\(tenant_id\\)s"
+ internalurl: "http://{{ internal_vip.ip }}:8774/v2.1/%\\(tenant_id\\)s"
+ adminurl: "http://{{ internal_vip.ip }}:8774/v2.1/%\\(tenant_id\\)s"
- name: neutron
type: network
@@ -52,29 +53,37 @@ os_services:
internalurl: "http://{{ internal_vip.ip }}:8777"
adminurl: "http://{{ internal_vip.ip }}:8777"
+ - name: aodh
+ type: alarming
+ region: RegionOne
+ description: "OpenStack Telemetry"
+ publicurl: "http://{{ public_vip.ip }}:8042"
+ internalurl: "http://{{ internal_vip.ip }}:8042"
+ adminurl: "http://{{ internal_vip.ip }}:8042"
+
- name: cinder
type: volume
region: RegionOne
description: "OpenStack Block Storage"
- publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
+ publicurl: "http://{{ public_vip.ip }}:8776/v1/%\\(tenant_id\\)s"
+ internalurl: "http://{{ internal_vip.ip }}:8776/v1/%\\(tenant_id\\)s"
+ adminurl: "http://{{ internal_vip.ip }}:8776/v1/%\\(tenant_id\\)s"
- name: cinderv2
type: volumev2
region: RegionOne
description: "OpenStack Block Storage v2"
- publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
+ publicurl: "http://{{ public_vip.ip }}:8776/v2/%\\(tenant_id\\)s"
+ internalurl: "http://{{ internal_vip.ip }}:8776/v2/%\\(tenant_id\\)s"
+ adminurl: "http://{{ internal_vip.ip }}:8776/v2/%\\(tenant_id\\)s"
- name: heat
type: orchestration
region: RegionOne
description: "OpenStack Orchestration"
- publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
+ publicurl: "http://{{ public_vip.ip }}:8004/v1/%\\(tenant_id\\)s"
+ internalurl: "http://{{ internal_vip.ip }}:8004/v1/%\\(tenant_id\\)s"
+ adminurl: "http://{{ internal_vip.ip }}:8004/v1/%\\(tenant_id\\)s"
- name: heat-cfn
type: cloudformation
@@ -84,6 +93,22 @@ os_services:
internalurl: "http://{{ internal_vip.ip }}:8000/v1"
adminurl: "http://{{ internal_vip.ip }}:8000/v1"
+ - name: congress
+ type: policy
+ region: RegionOne
+ description: "OpenStack Policy Service"
+ publicurl: "http://{{ public_vip.ip }}:1789"
+ internalurl: "http://{{ internal_vip.ip }}:1789"
+ adminurl: "http://{{ internal_vip.ip }}:1789"
+
+# - name: swift
+# type: object-store
+# region: RegionOne
+# description: "OpenStack Object Storage"
+# publicurl: "http://{{ public_vip.ip }}:8080/v1/AUTH_%\\(tenant_id\\)s"
+# internalurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%\\(tenant_id\\)s"
+# adminurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%\\(tenant_id\\)s"
+
os_users:
- user: admin
password: "{{ ADMIN_PASS }}"
@@ -134,6 +159,13 @@ os_users:
tenant: service
tenant_description: "Service Tenant"
+ - user: aodh
+ password: "{{ AODH_PASS }}"
+ email: aodh@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
- user: heat
password: "{{ HEAT_PASS }}"
email: heat@admin.com
@@ -141,9 +173,23 @@ os_users:
tenant: service
tenant_description: "Service Tenant"
+ - user: congress
+ password: "{{ CONGRESS_PASS }}"
+ email: congress@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
- user: demo
password: "{{ DEMO_PASS }}"
email: heat@demo.com
role: heat_stack_user
tenant: demo
tenant_description: "Demo Tenant"
+
+# - user: swift
+# password: "{{ CINDER_PASS }}"
+# email: swift@admin.com
+# role: admin
+# tenant: service
+# tenant_description: "Service Tenant"
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/controllers.py b/deploy/adapters/ansible/roles/moon/files/controllers.py
index fd107a5e..fd107a5e 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/controllers.py
+++ b/deploy/adapters/ansible/roles/moon/files/controllers.py
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/files/deb.conf b/deploy/adapters/ansible/roles/moon/files/deb.conf
index 6e1159a1..6e1159a1 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/files/deb.conf
+++ b/deploy/adapters/ansible/roles/moon/files/deb.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/files/get_deb_depends.py b/deploy/adapters/ansible/roles/moon/files/get_deb_depends.py
index d510bcf4..d510bcf4 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/files/get_deb_depends.py
+++ b/deploy/adapters/ansible/roles/moon/files/get_deb_depends.py
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/handlers/main.yml b/deploy/adapters/ansible/roles/moon/handlers/main.yml
index 608a8a09..608a8a09 100755
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/moon/handlers/main.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/tasks/main.yml b/deploy/adapters/ansible/roles/moon/tasks/main.yml
index a3511de7..a3511de7 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/moon/tasks/main.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/tasks/moon-compute.yml b/deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml
index e4142b5f..e4142b5f 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/tasks/moon-compute.yml
+++ b/deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/tasks/moon-controller.yml b/deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml
index 95dd2e89..95dd2e89 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/tasks/moon-controller.yml
+++ b/deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/tasks/moon.yml b/deploy/adapters/ansible/roles/moon/tasks/moon.yml
index 40e1c98c..40e1c98c 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/tasks/moon.yml
+++ b/deploy/adapters/ansible/roles/moon/tasks/moon.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/admin-openrc.sh b/deploy/adapters/ansible/roles/moon/templates/admin-openrc.sh
index 6ba620ff..6ba620ff 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/admin-openrc.sh
+++ b/deploy/adapters/ansible/roles/moon/templates/admin-openrc.sh
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/api-paste.ini b/deploy/adapters/ansible/roles/moon/templates/api-paste.ini
index f99689b7..f99689b7 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/api-paste.ini
+++ b/deploy/adapters/ansible/roles/moon/templates/api-paste.ini
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/demo-openrc.sh b/deploy/adapters/ansible/roles/moon/templates/demo-openrc.sh
index 5807e868..5807e868 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/demo-openrc.sh
+++ b/deploy/adapters/ansible/roles/moon/templates/demo-openrc.sh
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/keystone-paste.ini b/deploy/adapters/ansible/roles/moon/templates/keystone-paste.ini
index cd9ebede..cd9ebede 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/keystone-paste.ini
+++ b/deploy/adapters/ansible/roles/moon/templates/keystone-paste.ini
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/keystone.conf b/deploy/adapters/ansible/roles/moon/templates/keystone.conf
index 649fc32c..649fc32c 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/keystone.conf
+++ b/deploy/adapters/ansible/roles/moon/templates/keystone.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/proxy-server.conf b/deploy/adapters/ansible/roles/moon/templates/proxy-server.conf
index 9bea7a8e..9bea7a8e 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/proxy-server.conf
+++ b/deploy/adapters/ansible/roles/moon/templates/proxy-server.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/wsgi-keystone.conf.j2 b/deploy/adapters/ansible/roles/moon/templates/wsgi-keystone.conf.j2
index 64d864af..64d864af 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/templates/wsgi-keystone.conf.j2
+++ b/deploy/adapters/ansible/roles/moon/templates/wsgi-keystone.conf.j2
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/vars/Debian.yml b/deploy/adapters/ansible/roles/moon/vars/Debian.yml
index 0da81179..0da81179 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/moon/vars/Debian.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/vars/main.yml b/deploy/adapters/ansible/roles/moon/vars/main.yml
index cff8c7c2..cff8c7c2 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon/vars/main.yml
+++ b/deploy/adapters/ansible/roles/moon/vars/main.yml
diff --git a/deploy/adapters/ansible/roles/neutron-compute/handlers/main.yml b/deploy/adapters/ansible/roles/neutron-compute/handlers/main.yml
index d5444946..ca4e8088 100644
--- a/deploy/adapters/ansible/roles/neutron-compute/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-compute/handlers/main.yml
@@ -10,3 +10,6 @@
- name: restart neutron compute service
service: name={{ item }} state=restarted enabled=yes
with_items: services | union(services_noarch)
+
+- name: restart nova-compute services
+ service: name=nova-compute state=restarted enabled=yes
diff --git a/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml b/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
index 3e4b24bc..375e325d 100644
--- a/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
@@ -44,6 +44,12 @@
systemctl daemon-reload
when: ansible_os_family == 'RedHat'
+- name: fix openstack neutron plugin config file ubuntu
+ shell: |
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
+ when: ansible_os_family == "Debian"
+
- name: generate neutron compute service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
@@ -57,7 +63,7 @@
file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
- name: config neutron
- template: src=templates/neutron.conf
+ template: src=neutron.conf
dest=/etc/neutron/neutron.conf backup=yes
notify:
- restart neutron compute service
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/templates/neutron.conf b/deploy/adapters/ansible/roles/neutron-compute/templates/neutron.conf
index a676e951..a676e951 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/templates/neutron.conf
+++ b/deploy/adapters/ansible/roles/neutron-compute/templates/neutron.conf
diff --git a/deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml b/deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml
index 8319e42c..83d7f323 100644
--- a/deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml
@@ -11,9 +11,9 @@
packages:
- neutron-common
- neutron-plugin-ml2
- - openvswitch-datapath-dkms
+ - openvswitch-switch-dpdk
- openvswitch-switch
- neutron-plugin-openvswitch-agent
services:
- - neutron-plugin-openvswitch-agent
+ - neutron-openvswitch-agent
diff --git a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
index be64c41c..917a8356 100644
--- a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
+++ b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
@@ -31,7 +31,9 @@
with_items: services | union(services_noarch)
- name: get tenant id to fill neutron.conf
- shell: keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 tenant-get service | grep id | awk '{print $4}'
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack project show service | grep id | sed -n "2,1p" | awk '{print $4}'
register: NOVA_ADMIN_TENANT_ID
- name: update neutron conf
diff --git a/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml b/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
index 99b21135..31f7f17c 100644
--- a/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
@@ -58,6 +58,12 @@
systemctl daemon-reload
when: ansible_os_family == 'RedHat'
+- name: fix openstack neutron plugin config file ubuntu
+ shell: |
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
+ when: ansible_os_family == "Debian"
+
- name: config l3 agent
template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini
backup=yes
diff --git a/deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml b/deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml
index 86d1af67..1a78ca8c 100644
--- a/deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml
@@ -9,7 +9,7 @@
---
packages:
- neutron-plugin-ml2
- - openvswitch-datapath-dkms
+ - openvswitch-switch-dpdk
- openvswitch-switch
- neutron-l3-agent
- neutron-dhcp-agent
@@ -17,7 +17,7 @@ packages:
services:
- openvswitch-switch
- - neutron-plugin-openvswitch-agent
+ - neutron-openvswitch-agent
openvswitch_agent: neutron-plugin-openvswitch-agent
diff --git a/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml b/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
index 5b80f400..16315b36 100644
--- a/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
@@ -20,22 +20,24 @@
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
with_items: packages | union(packages_noarch)
+- name: restart virtlogd
+ service: name=virtlogd state=started enabled=yes
+ when: ansible_os_family == "Debian"
+
- name: enable auto start
file:
path=/usr/sbin/policy-rc.d
state=absent
when: ansible_os_family == "Debian"
-- name: update nova-compute conf
- template: src={{ item }} dest=/etc/nova/{{ item }}
- with_items:
- - nova.conf
- notify:
- - restart nova-compute services
+- name: get number of cpu support virtualization
+ shell: egrep -c '(vmx|svm)' /proc/cpuinfo
+ register: kvm_cpu_num
- name: update nova-compute conf
template: src={{ item }} dest=/etc/nova/{{ item }}
with_items:
+ - nova.conf
- nova-compute.conf
notify:
- restart nova-compute services
@@ -43,8 +45,13 @@
- name: generate neutron control service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
-
+#'
- name: remove nova sqlite db
shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed
- meta: flush_handlers
+
+- name: restart nova-compute and libvirt-bin
+ shell: >
+ service nova-compute restart;
+ service libvirt-bin restart;
diff --git a/deploy/adapters/ansible/roles/nova-compute/templates/nova-compute.conf b/deploy/adapters/ansible/roles/nova-compute/templates/nova-compute.conf
index 1ac775b1..305d408b 100644
--- a/deploy/adapters/ansible/roles/nova-compute/templates/nova-compute.conf
+++ b/deploy/adapters/ansible/roles/nova-compute/templates/nova-compute.conf
@@ -2,7 +2,7 @@
compute_driver=libvirt.LibvirtDriver
force_raw_images = true
[libvirt]
-{% if deploy_type == 'virtual' %}
+{% if kvm_cpu_num.stdout_lines[0]|int == 0 %}
virt_type=qemu
{% else %}
virt_type=kvm
diff --git a/deploy/adapters/ansible/roles/nova-compute/templates/nova.conf b/deploy/adapters/ansible/roles/nova-compute/templates/nova.conf
index e1e915c8..8d7e9a5f 100644
--- a/deploy/adapters/ansible/roles/nova-compute/templates/nova.conf
+++ b/deploy/adapters/ansible/roles/nova-compute/templates/nova.conf
@@ -1,77 +1,104 @@
[DEFAULT]
-block_device_allocate_retries=5
-block_device_allocate_retries_interval=300
+transport_url = rabbit://{{ RABBIT_USER }}:{{ RABBIT_PASS }}@{{ rabbit_host }}
+auth_strategy = keystone
+my_ip = {{ internal_ip }}
+use_neutron = True
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
+log-dir=/var/log/nova
state_path=/var/lib/nova
-lock_path=/var/lib/nova/tmp
force_dhcp_release=True
+verbose={{ VERBOSE }}
+ec2_private_dns_show_ip=True
+enabled_apis=osapi_compute,metadata
+default_floating_pool={{ public_net_info.network }}
+metadata_listen={{ internal_ip }}
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
-enabled_apis=osapi_compute,metadata
-
-default_floating_pool={{ public_net_info.network }}
-auth_strategy = keystone
-
rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
osapi_compute_listen={{ internal_ip }}
-metadata_listen={{ internal_ip }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
security_group_api = neutron
-
instance_usage_audit = True
instance_usage_audit_period = hour
notify_on_state_change = vm_and_task_state
notification_driver = nova.openstack.common.notifier.rpc_notifier
notification_driver = ceilometer.compute.nova_notifier
+[api_database]
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova_api
+idle_timeout = 30
+pool_timeout = 10
+use_db_reconnect = True
+
[database]
-# The SQLAlchemy connection string used to connect to the database
connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
idle_timeout = 30
-use_db_reconnect = True
pool_timeout = 10
+use_db_reconnect = True
+
+[glance]
+api_servers = http://{{ internal_vip.ip }}:9292
+host = {{ internal_vip.ip }}
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = nova
+password = {{ NOVA_PASS }}
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = nova
admin_password = {{ NOVA_PASS }}
-[glance]
-host = {{ internal_vip.ip }}
+[libvirt]
+use_virtio_for_bridges=True
[neutron]
url = http://{{ internal_vip.ip }}:9696
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+region_name = RegionOne
+project_name = service
+username = neutron
+password = {{ NEUTRON_PASS }}
+service_metadata_proxy = True
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
auth_strategy = keystone
admin_tenant_name = service
admin_username = neutron
admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
-service_metadata_proxy = True
-metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+admin_auth_url = http://{{ internal_vip.ip }}:35357/v3
+
+[oslo_concurrency]
+lock_path=/var/lib/nova/tmp
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+[vnc]
+enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+[wsgi]
+api_paste_config=/etc/nova/api-paste.ini
diff --git a/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml b/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml
index bf1b0f6b..f332c97a 100644
--- a/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml
+++ b/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml
@@ -7,6 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+- name: nova api db sync
+ shell: su -s /bin/sh -c "nova-manage api_db sync" nova
+ ignore_errors: True
+ notify:
+ - restart nova service
+
- name: nova db sync
nova_manage: action=dbsync
notify:
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/files/opendaylight.service b/deploy/adapters/ansible/roles/odl_cluster/files/opendaylight.service
index 6c9e4c44..6c9e4c44 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/files/opendaylight.service
+++ b/deploy/adapters/ansible/roles/odl_cluster/files/opendaylight.service
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_00_download_packages.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_00_download_packages.yml
index da7356dc..efd359db 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_00_download_packages.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_00_download_packages.yml
@@ -20,10 +20,15 @@
- name: download oracle-jdk8 script file
get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/
-# "
+#"
- name: download odl package
get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/odl/{{ odl_pkg_url }}" dest=/opt/{{ odl_pkg_name }}
+# "
+- name: download odl pip package
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/pip/{{ networking_odl_pkg_name }}" dest=/opt/{{ networking_odl_pkg_name }}
+
+#"
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml
index 507d12ce..8d71606f 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml
@@ -8,7 +8,7 @@
##############################################################################
---
- name: opendaylight system file
- template:
+ copy:
src: "{{ service_file.src }}"
dest: "{{ service_file.dst }}"
mode: 0755
@@ -44,3 +44,10 @@
template:
src: tomcat-server.xml
dest: "{{ odl_home }}/configuration/tomcat-server.xml"
+
+- name: create tomcat config
+ template:
+ src: jetty.xml
+ dest: "{{ odl_home }}/etc/jetty.xml"
+
+
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_04_install_pip_packages.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_04_install_pip_packages.yml
index 85bb534a..869d264a 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_04_install_pip_packages.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_04_install_pip_packages.yml
@@ -8,8 +8,19 @@
##############################################################################
---
-- name: install odl pip packages
- pip: name={{ item }} state=present
- with_items: odl_pip
-
+- name: patch odl pip package
+ shell: |
+ cd /opt
+ tar xf /opt/{{ networking_odl_pkg_name }}
+ rm -rf /opt/{{ networking_odl_pkg_name }}
+ sed -i 's/^neutron-lib.*/neutron-lib/' networking-odl-2.0.0/requirements.txt
+ tar zcf /opt/{{ networking_odl_pkg_name }} networking-odl-2.0.0
+ rm -rf networking-odl-2.0.0
+ cd -
+- name: odl pip package install
+ shell: |
+ cd /opt
+ pip install {{ networking_odl_pkg_name }}
+ rm -rf {{ networking_odl_pkg_name }}
+ cd -
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml
index 8dfaf4df..f44b373b 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml
@@ -20,23 +20,3 @@
- name: turn off keepalived on control node
service: name=keepalived state=stopped
when: ansible_os_family == "Debian"
-
-################ l3 agent remove ###################
-
-- name: turn off neutron-l3-agent on control node
- service: name=neutron-l3-agent state=stopped
- when: odl_l3_agent == "Enable"
-
-- name: remove neutron-l3-agent daemon
- shell: >
- sed -i 'neutron-l3-agent/d' /opt/service ;
- mv /etc/init.d/neutron-l3-agent /home/ ;
- mv /etc/init/neutron-l3-agent.conf /home/ ;
- when: odl_l3_agent == "Enable" and ansible_os_family == "Debian"
-
-- name: remove neutron-l3-agent daemon
- shell: >
- sed -i 'neutron-l3-agent/d' /opt/service ;
- mv /lib/systemd/system/neutron-l3-agent.service /home/ ;
- when: odl_l3_agent == "Enable" and ansible_os_family == "RedHat"
-
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_odl_controller.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_odl_controller.yml
index 7ca38f17..d78a76e0 100755..100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_odl_controller.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_odl_controller.yml
@@ -29,6 +29,14 @@
- name: stop openstack services
include: 01_06_stop_openstack_services.yml
+- name: set opendaylight cluster
+ include: 05_set_opendaylight_cluster.yml
+ when: groups['odl']|length > 1
+
+- name: install moon
+ include: moon-odl.yml
+ when: moon == "Enable"
+
- name: start and check odl
include: 01_07_start_check_odl.yml
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
index c312490b..04f0ec61 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
@@ -8,6 +8,16 @@
##############################################################################
---
+- name: restart keepalived to recover external IP before check br-int
+ shell: service keepalived restart
+ when: inventory_hostname in groups['odl']
+ ignore_errors: True
+
+- name: restart opendaylight (for newton, opendaylight doesn't listen 6640 port, need restart)
+ shell: service opendaylight restart; sleep 60
+ when: inventory_hostname in groups['odl']
+ ignore_errors: True
+
- name: set opendaylight as the manager
command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ internal_vip.ip }}:6640;"
@@ -18,4 +28,3 @@
shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config={'local_ip'=' {{ internal_ip }} '};
#'
-
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml
index 7eddf7fa..7eddf7fa 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml
index 50f40db7..32952c51 100755..100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml
@@ -22,7 +22,3 @@
- name: Provision ODL on Compute nodes
include: 02_odl_compute.yml
when: groups['odl']|length !=0 and inventory_hostname not in groups['odl']
-
-- name: Config nova
- include: 04_odl_l3_nova.yml
- when: groups['odl']|length !=0 and odl_l3_agent == "Enable"
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/moon-odl.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/moon-odl.yml
index b89b2823..b89b2823 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/moon-odl.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/moon-odl.yml
diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/jetty.xml b/deploy/adapters/ansible/roles/odl_cluster/templates/jetty.xml
index 3ee37509..50ac7c35 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/templates/jetty.xml
+++ b/deploy/adapters/ansible/roles/odl_cluster/templates/jetty.xml
@@ -34,7 +34,7 @@ DTD Configure//EN" "http://jetty.mortbay.org/configure.dtd">
<Arg>
<New class="org.eclipse.jetty.server.nio.SelectChannelConnector">
<Set name="host">
- <Property name="jetty.host" default="{{ internal_ip }}"/>
+ <Property name="jetty.host"/>
</Set>
<Set name="port">
<Property name="jetty.port" default="8181" />
@@ -48,24 +48,6 @@ DTD Configure//EN" "http://jetty.mortbay.org/configure.dtd">
</New>
</Arg>
</Call>
- <Call name="addConnector">
- <Arg>
- <New class="org.eclipse.jetty.server.nio.SelectChannelConnector">
- <Set name="host">
- <Property name="jetty.host" default="{{ internal_ip }}"/>
- </Set>
- <Set name="port">
- <Property name="jetty.port" default="8080" />
- </Set>
- <Set name="maxIdleTime">300000</Set>
- <Set name="Acceptors">2</Set>
- <Set name="statsOn">false</Set>
- <Set name="confidentialPort">8443</Set>
- <Set name="lowResourcesConnections">20000</Set>
- <Set name="lowResourcesMaxIdleTime">5000</Set>
- </New>
- </Arg>
- </Call>
<!-- =========================================================== -->
<!-- Configure Authentication Realms -->
diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/ml2_conf.sh b/deploy/adapters/ansible/roles/odl_cluster/templates/ml2_conf.sh
index 0d42e48b..5e3627bf 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/templates/ml2_conf.sh
+++ b/deploy/adapters/ansible/roles/odl_cluster/templates/ml2_conf.sh
@@ -10,5 +10,5 @@ cat <<EOT>> /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2_odl]
password = admin
username = admin
-url = http://{{ internal_vip.ip }}:8080/controller/nb/v2/neutron
+url = http://{{ internal_vip.ip }}:8181/controller/nb/v2/neutron
EOT
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/moon-environment b/deploy/adapters/ansible/roles/odl_cluster/templates/moon-environment
index 9a13da8e..9a13da8e 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/moon-environment
+++ b/deploy/adapters/ansible/roles/odl_cluster/templates/moon-environment
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/settings.xml b/deploy/adapters/ansible/roles/odl_cluster/templates/settings.xml
index 5ba3b50c..5ba3b50c 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/templates/settings.xml
+++ b/deploy/adapters/ansible/roles/odl_cluster/templates/settings.xml
diff --git a/deploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml b/deploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml
index b44107e2..640a264a 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml
@@ -12,8 +12,10 @@ common_packages:
- crudini
service_ovs_name: openvswitch-switch
-service_ovs_agent_name: neutron-plugin-openvswitch-agent
+service_ovs_agent_name: neutron-openvswitch-agent
service_file:
- src: opendaylight.conf
- dst: /etc/init/opendaylight.conf
+ src: opendaylight.service
+ dst: /lib/systemd/system/opendaylight.service
+
+networking_odl_pkg_name: networking-odl-2.0.0.tar.gz
diff --git a/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml b/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml
index 5b2676a8..e5f52b42 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml
@@ -9,7 +9,7 @@
---
odl_username: admin
odl_password: admin
-odl_api_port: 8080
+odl_api_port: 8181
#odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.3.0-Lithium/distribution-karaf-0.3.0-Lithium.tar.gz
odl_pkg_url: karaf.tar.gz
@@ -18,7 +18,8 @@ odl_home: "/opt/opendaylight-0.3.0/"
odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'odl-restconf','odl-l2switch-switch','odl-openflowplugin-all','odl-mdsal-apidocs','odl-dlux-all','odl-adsal-northbound','odl-nsf-all','odl-ovsdb-openstack','odl-ovsdb-northbound','odl-dlux-core']
odl_extra_features: ['odl-restconf-all','odl-mdsal-clustering','odl-openflowplugin-flow-services','http','jolokia-osgi']
odl_features: "{{ odl_base_features + odl_extra_features }}"
-odl_api_port: 8080
+
+odl_aaa_moon: odl-aaa-moon.tar.gz
jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz
jdk8_script_name: install_jdk8.tar
diff --git a/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml b/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml
index 64fff472..c8ce1155 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml
@@ -46,8 +46,6 @@
include: onos_controller.yml
when: inventory_hostname in groups['onos']
-- name: Install ONOS Cluster on Compute
+- name: Config ONOS Cluster
include: openvswitch.yml
when: groups['onos']|length !=0
-# when: groups['onos']|length !=0 and inventory_hostname not in groups['onos']
-
diff --git a/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml b/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml
index 6d62a2e9..d51151a9 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml
@@ -7,25 +7,41 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- name: upload onos driver package
- unarchive: src=networking-onos.tar dest=/opt/
+
+- name: get image http server
+ shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
+ register: http_server
+
+- name: download onos driver packages
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_driver }}" dest=/opt/
+
+- name: upload onos sfc driver package
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_sfc_driver }}" dest=/opt/
+
+- name: unarchive onos driver package
+ command: su -s /bin/sh -c "tar xvf /opt/networking-onos.tar -C /opt/"
+
+- name: upload onos sfc driver package
+ command: su -s /bin/sh -c "tar xvf /opt/networking-sfc.tar -C /opt/"
- name: install onos driver
command: su -s /bin/sh -c "/opt/networking-onos/install_driver.sh"
+- name: install onos sfc driver
+ command: su -s /bin/sh -c "/opt/networking-sfc/install_driver.sh"
+
- name: install onos required packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
with_items: packages
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
- name: download oracle-jdk8 package file
get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }}
-
-- name: upload install_jdk8 scripts
- unarchive: src=install_jdk8.tar dest=/opt/
+
+- name: download oracle-jdk8 script file
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/
+
+- name: unarchive onos driver package
+ command: su -s /bin/sh -c "tar xvf /opt/install_jdk8.tar -C /opt/"
- name: install install_jdk8 package
command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh"
@@ -58,7 +74,7 @@
ignore_errors: True
- name: download jar repository
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/repository.tar" dest=~/.m2/
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ repository }}" dest=~/.m2/
- name: extract jar repository
command: su -s /bin/sh -c "tar xvf ~/.m2/repository.tar -C ~/.m2/"
@@ -75,47 +91,19 @@
sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' {{ onos_home }}/init/onos.conf;
cp -rf {{ onos_home }}/init/onos.conf /etc/init/;
cp -rf {{ onos_home }}/init/onos.conf /etc/init.d/;
-# notify:
-# - restart onos service
-
+
- name: configure onos boot feature
shell: >
sed -i '/^featuresBoot=/c\featuresBoot={{ onos_boot_features }}' {{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg;
-#- name: create cluster json
-# template:
-# src: cluster.json
-# dest: "{{ onos_home }}/config/cluster.json"
-# notify:
-# - restart onos service
-
-#- name: create tablets json
-# template:
-# src: tablets.json
-# dest: "{{ onos_home }}/config/tablets.json"
-# notify:
-# - restart onos service
-
- name: wait for config time
shell: "sleep 10"
- name: start onos service
service: name=onos state=started enabled=yes
-- name: wait for restart time
- shell: "sleep 60"
-
-- name: start onos service
- service: name=onos state=restarted enabled=yes
-
- name: wait for onos start time
- shell: "sleep 60"
-
-- name: start onos service
- service: name=onos state=restarted enabled=yes
-
-- name: wait for onos start time
- shell: "sleep 100"
+ shell: "sleep 200"
- name: add onos auto start
shell: >
@@ -124,12 +112,9 @@
##########################################################################################################
################################ ONOS connect with OpenStack ################################
##########################################################################################################
-#- name: Run OpenVSwitch Script
-# include: openvswitch.yml
-
- name: Configure Neutron1
shell: >
- crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins onos_router;
+ crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_sfc.services.sfc.plugin.SfcPlugin, networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin, onos_router;
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers onos_ml2;
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers vxlan
@@ -143,13 +128,13 @@
- name: Configure Neutron2
command: su -s /bin/sh -c "/opt/ml2_conf.sh;"
-
- name: Configure Neutron3
shell: >
mysql -e "drop database if exists neutron_ml2;";
mysql -e "create database neutron_ml2 character set utf8;";
mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';";
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron;
+ su -s /bin/sh -c "neutron-db-manage --subproject networking-sfc upgrade head" neutron;
- name: Restart neutron-server
service: name=neutron-server state=restarted
diff --git a/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml
index 47f0f6e8..aac787ea 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml
@@ -7,38 +7,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-#- name: remove neutron-plugin-openvswitch-agent auto start
-# shell: >
-# update-rc.d neutron-plugin-openvswitch-agent remove;
-# sed -i /neutron-plugin-openvswitch-agent/d /opt/service
-#- name: shut down and disable Neutron's agent services
-# service: name=neutron-plugin-openvswitch-agent state=stopped
-
-#- name: Stop the Open vSwitch service and clear existing OVSDB
-# shell: >
-# ovs-vsctl del-br br-int ;
-# ovs-vsctl del-br br-tun ;
-# ovs-vsctl del-manager ;
-
-#- name: get image http server
-# shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
-# register: http_server
-#
-#- name: download ovs
-# get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/openvswitch.tar" dest=/opt/openvswitch.tar
-#
-#- name: extract ovs
-# command: su -s /bin/sh -c "tar xvf /opt/openvswitch.tar -C /opt/"
-#
-#- name: update ovs
-# shell: >
-# cd /opt/openvswitch;
-# dpkg -i openvswitch-common_2.3.0-1_amd64.deb;
-# dpkg -i openvswitch-switch_2.3.0-1_amd64.deb;
-
-#- name: start up onos-external nic
-# command: su -s /bin/sh -c "ifconfig eth2 0 up"
- name: set veth port
shell: >
ip link add onos_port1 type veth peer name onos_port2;
@@ -46,26 +15,28 @@
ifconfig onos_port2 up;
ignore_errors: True
-- name: set veth to ovs
- shell: >
- export externamMac=`ifconfig eth1 | grep -Eo '\<[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'`;
- ifconfig onos_port2 hw ether $externamMac;
- ovs-vsctl add-port br-prv onos_port1;
- ignore_errors: True
+- name: add openflow-base feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow-base'";
+ when: inventory_hostname in groups['onos']
-#- name: wait for onos start time
-# shell: "sleep 200"
+- name: add openflow feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow'";
+ when: inventory_hostname in groups['onos']
- name: add ovsdatabase feature
command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdatabase'";
- when: inventory_hostname == groups['onos'][0]
+ when: inventory_hostname in groups['onos']
-- name: add openflow-base feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow-base'";
+- name: add ovsdb-base feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-base'";
when: inventory_hostname in groups['onos']
-- name: add openflow feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow'";
+- name: add onos driver ovsdb feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-drivers-ovsdb'";
+ when: inventory_hostname in groups['onos']
+
+- name: add ovsdb provider host feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-provider-host'";
when: inventory_hostname in groups['onos']
- name: add vtn feature
@@ -79,23 +50,6 @@
- name: Set ONOS as the manager
command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:6640;"
-- name: create public network
- shell: >
- export OS_PASSWORD=console;
- export OS_TENANT_NAME=admin;
- export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0;
- export OS_USERNAME=ADMIN;
- neutron net-create ext-net --shared --router:external=True;
- neutron subnet-create ext-net {{ public_net_info.floating_ip_cidr }} --name ext-subnet --allocation-pool start={{ public_net_info.floating_ip_start }},end={{ public_net_info.floating_ip_end }};
- when: inventory_hostname == groups['controller'][0]
-
-- name: set gateway mac address
- shell: >
- ping -c 1 {{ ansible_default_ipv4.gateway }};
- gatewayMac=`arp -a {{ ansible_default_ipv4.gateway }} | awk '{print $4}'`;
- /opt/onos/bin/onos "externalgateway-update -m $gatewayMac";
- when: inventory_hostname in groups['onos']
-
- name: delete default gateway
shell: >
route delete default;
diff --git a/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml b/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml
index 1cbc070d..f11f1102 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml
@@ -6,9 +6,14 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-onos_pkg_url: http://downloads.onosproject.org/release/onos-1.3.0.tar.gz
-onos_pkg_name: onos-1.3.0.tar.gz
+onos_pkg_name: onos-1.6.0.tar.gz
onos_home: /opt/onos/
-karaf_dist: apache-karaf-3.0.3
+karaf_dist: apache-karaf-3.0.5
jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz
-onos_boot_features: config,standard,region,package,kar,ssh,management,webconsole,onos-api,onos-core,onos-incubator,onos-cli,onos-rest,onos-gui,onos-openflow-base,onos-openflow,onos-ovsdatabase, onos-app-vtn-onosfw
+jdk8_script_name: install_jdk8.tar
+onos_driver: networking-onos.tar
+onos_sfc_driver: networking-sfc.tar
+repository: repository.tar
+onos_boot_features: config,standard,region,package,kar,ssh,management,webconsole,onos-api,onos-core,onos-incubator,onos-cli,onos-rest,onos-gui,onos-openflow-base, onos-openflow, onos-ovsdatabase, onos-ovsdb-base, onos-drivers-ovsdb, onos-ovsdb-provider-host, onos-app-vtn-onosfw
+
+
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml
index 0714d2e9..836cb78b 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml
@@ -4,17 +4,17 @@
- name: remove ovs and ovs-plugin daeman
shell: >
- sed -i '/neutron-plugin-openvswitch-agent/d' /opt/service ;
+ sed -i '/neutron-openvswitch-agent/d' /opt/service ;
sed -i '/openvswitch-switch/d' /opt/service ;
- name: stop ovs and ovs-plugin
- shell: service openvswitch-switch stop; service neutron-plugin-openvswitch-agent stop;
+ shell: service openvswitch-switch stop; service neutron-openvswitch-agent stop;
- name: remove ovs and ovs-plugin files
shell: >
- update-rc.d -f neutron-plugin-openvswitch-agent remove;
- mv /etc/init.d/neutron-plugin-openvswitch-agent /home/neutron-plugin-openvswitch-agent;
- mv /etc/init/neutron-plugin-openvswitch-agent.conf /home/neutron-plugin-openvswitch-agent.conf;
+ update-rc.d -f neutron-openvswitch-agent remove;
+ mv /etc/init.d/neutron-openvswitch-agent /home/neutron-openvswitch-agent;
+ mv /etc/init/neutron-openvswitch-agent.conf /home/neutron-openvswitch-agent.conf;
update-rc.d -f openvswitch-switch remove ;
mv /etc/init.d/openvswitch-switch /home/openvswitch-switch ;
mv /etc/init/openvswitch-switch.conf /home/openvswitch-switch.conf ;
diff --git a/deploy/adapters/ansible/roles/secgroup/templates/neutron.j2 b/deploy/adapters/ansible/roles/secgroup/templates/neutron.j2
index 9f3652c4..e7107660 100644
--- a/deploy/adapters/ansible/roles/secgroup/templates/neutron.j2
+++ b/deploy/adapters/ansible/roles/secgroup/templates/neutron.j2
@@ -1,6 +1,6 @@
[securitygroup]
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-enable_security_group = False
+firewall_driver = neutron.agent.firewall.NoopFirewallDriver
+enable_security_group = True
[agent]
prevent_arp_spoofing = False
diff --git a/deploy/adapters/ansible/roles/secgroup/templates/nova.j2 b/deploy/adapters/ansible/roles/secgroup/templates/nova.j2
index 91fa6cd2..7dbc216a 100644
--- a/deploy/adapters/ansible/roles/secgroup/templates/nova.j2
+++ b/deploy/adapters/ansible/roles/secgroup/templates/nova.j2
@@ -1,3 +1,3 @@
[DEFAULT]
firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = nova
+security_group_api = neutron
diff --git a/deploy/adapters/ansible/roles/secgroup/vars/Debian.yml b/deploy/adapters/ansible/roles/secgroup/vars/Debian.yml
index a6669088..221a3d92 100644
--- a/deploy/adapters/ansible/roles/secgroup/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/secgroup/vars/Debian.yml
@@ -25,11 +25,11 @@ controller_services:
- nova-novncproxy
- nova-scheduler
- neutron-server
- - neutron-plugin-openvswitch-agent
+ - neutron-openvswitch-agent
- neutron-l3-agent
- neutron-dhcp-agent
- neutron-metadata-agent
compute_services:
- nova-compute
- - neutron-plugin-openvswitch-agent
+ - neutron-openvswitch-agent
diff --git a/deploy/adapters/ansible/roles/setup-network/files/setup_networks/net_init b/deploy/adapters/ansible/roles/setup-network/files/setup_networks/net_init
index c27a8bf8..41ccb988 100755
--- a/deploy/adapters/ansible/roles/setup-network/files/setup_networks/net_init
+++ b/deploy/adapters/ansible/roles/setup-network/files/setup_networks/net_init
@@ -1,13 +1,17 @@
-#!/bin/bash
-## BEGIN INIT INFO
-# Provides: anamon.init
-# Default-Start: 3 5
-# Default-Stop: 0 1 2 4 6
-# Required-Start: $network
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides: anamon.init
+# Required-Start: $network
+# Required-Stop:
+# Should-Start:
+# Should-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
# Short-Description: Starts the cobbler anamon boot notification program
-# Description: anamon runs the first time a machine is booted after
-# installation.
-## END INIT INFO
+# Description: anamon runs the first time a machine is booted after installation.
+### END INIT INFO
+
+
#
# anamon.init: Starts the cobbler post-install boot notification program
diff --git a/deploy/adapters/ansible/roles/storage/files/storage b/deploy/adapters/ansible/roles/storage/files/storage
index 775e8fd7..3acc6115 100755
--- a/deploy/adapters/ansible/roles/storage/files/storage
+++ b/deploy/adapters/ansible/roles/storage/files/storage
@@ -1,2 +1,10 @@
#! /bin/bash
+### BEGIN INIT INFO
+# Provides: Storage
+# Required-Start: $remote_fs $network
+# Required-Stop: $remote_fs $network
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Description: Storage
+### END INIT INFO
loop_dev=`sh /opt/setup_storage/losetup.sh`
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/main.yml b/deploy/adapters/ansible/roles/swift/tasks/main.yml
index 0f083146..0f083146 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/swift/tasks/main.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-compute1.yml b/deploy/adapters/ansible/roles/swift/tasks/swift-compute1.yml
index be00484b..be00484b 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-compute1.yml
+++ b/deploy/adapters/ansible/roles/swift/tasks/swift-compute1.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-controller1.yml b/deploy/adapters/ansible/roles/swift/tasks/swift-controller1.yml
index 36d05040..36d05040 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-controller1.yml
+++ b/deploy/adapters/ansible/roles/swift/tasks/swift-controller1.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-controller2.yml b/deploy/adapters/ansible/roles/swift/tasks/swift-controller2.yml
index 92d4ab22..92d4ab22 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-controller2.yml
+++ b/deploy/adapters/ansible/roles/swift/tasks/swift-controller2.yml
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift.yml b/deploy/adapters/ansible/roles/swift/tasks/swift.yml
index 4e2651a7..4e2651a7 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift.yml
+++ b/deploy/adapters/ansible/roles/swift/tasks/swift.yml
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/account-server.conf b/deploy/adapters/ansible/roles/swift/templates/account-server.conf
index ea84799f..ea84799f 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/account-server.conf
+++ b/deploy/adapters/ansible/roles/swift/templates/account-server.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/container-server.conf b/deploy/adapters/ansible/roles/swift/templates/container-server.conf
index 88cd2ebb..88cd2ebb 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/container-server.conf
+++ b/deploy/adapters/ansible/roles/swift/templates/container-server.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/object-server.conf b/deploy/adapters/ansible/roles/swift/templates/object-server.conf
index effd4f22..effd4f22 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/object-server.conf
+++ b/deploy/adapters/ansible/roles/swift/templates/object-server.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/proxy-server.conf b/deploy/adapters/ansible/roles/swift/templates/proxy-server.conf
index b76796cf..b76796cf 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/proxy-server.conf
+++ b/deploy/adapters/ansible/roles/swift/templates/proxy-server.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/rsyncd.conf b/deploy/adapters/ansible/roles/swift/templates/rsyncd.conf
index 703c55eb..703c55eb 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/rsyncd.conf
+++ b/deploy/adapters/ansible/roles/swift/templates/rsyncd.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/swift.conf b/deploy/adapters/ansible/roles/swift/templates/swift.conf
index 9a31501b..9a31501b 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/swift.conf
+++ b/deploy/adapters/ansible/roles/swift/templates/swift.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/Debian.yml b/deploy/adapters/ansible/roles/swift/vars/Debian.yml
index 39aea32d..39aea32d 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/swift/vars/Debian.yml
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/vars/main.yml b/deploy/adapters/ansible/roles/swift/vars/main.yml
index 540068da..540068da 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/vars/main.yml
+++ b/deploy/adapters/ansible/roles/swift/vars/main.yml
diff --git a/deploy/adapters/ansible/roles/tacker/templates/tacker.j2 b/deploy/adapters/ansible/roles/tacker/templates/tacker.j2
index d7311f62..ae0f644a 100644
--- a/deploy/adapters/ansible/roles/tacker/templates/tacker.j2
+++ b/deploy/adapters/ansible/roles/tacker/templates/tacker.j2
@@ -1,29 +1,426 @@
[DEFAULT]
-bind_host = {{ internal_ip }}
-bind_port = 8888
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = True
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = True
+
+# Where to store Tacker state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/tacker
+
+# Where to store lock files
+lock_path = $state_path/lock
+
auth_strategy = keystone
policy_file = /usr/local/etc/tacker/policy.json
-debug = True
-verbose = True
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
use_syslog = False
-state_path = /var/lib/tacker
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+# log_dir =
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ internal_ip }}
+
+# Port the bind the API server to
+bind_port = 8888
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of tacker.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Tacker core plugin entrypoint to be loaded from the
+# tacker.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the tacker source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+# core_plugin =
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# tacker.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the tacker source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+
+service_plugins = vnfm,nfvo
+
+# Paste configuration file
+# api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+# auth_strategy = keystone
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Tacker is
+# being used in conjunction with nova security groups
+# allow_overlapping_ips = False
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = tacker.openstack.common.rpc.impl_kombu
+# Size of RPC thread pool
+# rpc_thread_pool_size = 64
+# Size of RPC connection pool
+# rpc_conn_pool_size = 30
+# Seconds to wait for a response from call or multicall
+# rpc_response_timeout = 60
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+# rpc_cast_timeout = 30
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = tacker.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = tacker
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# IP address of the RabbitMQ installation
+# rabbit_host = localhost
+# Password of the RabbitMQ server
+# rabbit_password = guest
+# Port where RabbitMQ server is running/listening
+# rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+# rabbit_userid = guest
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+
+# QPID
+# rpc_backend=tacker.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=tacker.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = tacker.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = tacker.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = tacker.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+# default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+# notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+# agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# tacker server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to tacker server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+# api_workers = 0
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+# rpc_workers = 0
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== tacker nova interactions ==========
+# Send notification to nova when port status is active.
+# notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+# notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+# nova_url = http://127.0.0.1:8774/v3
+
+# Name of nova region to use. Useful if keystone manages more than one region
+# nova_region_name =
+
+# Username for connection to nova in admin context
+# nova_admin_username =
+
+# The uuid of the admin nova tenant
+# nova_admin_tenant_id =
+
+# Password for connection to nova in admin context.
+# nova_admin_password =
+
+# Authorization URL for connection to nova in admin context.
+# nova_admin_auth_url =
+
+# CA file for novaclient to verify server certificates
+# nova_ca_certificates_file =
+
+# Boolean to control ignoring SSL errors on the nova url
+# nova_api_insecure = False
+
+# Number of seconds between sending events to nova if there are any events to send
+# send_events_interval = 2
+
+# ======== end of tacker nova interactions ==========
+
+[agent]
+# Use "sudo tacker-rootwrap /etc/tacker/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = sudo /usr/local/bin/tacker-rootwrap /usr/local/etc/tacker/rootwrap.conf
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+# report_interval = 30
+
+# =========== end of items for agent management extension =====
[keystone_authtoken]
+signing_dir = /var/cache/tacker
+#cafile = /opt/stack/data/ca-bundle.pem
+#project_domain_id = default
+project_name = service
+#user_domain_id = default
password = console
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+username = tacker
auth_url = http://{{ internal_vip.ip }}:35357
-project_name = service
+auth_plugin = password
+identity_uri = http://{{ internal_vip.ip }}:5000
+auth_uri = http://{{ internal_vip.ip }}:5000
-[agent]
-root_helper = sudo /usr/local/bin/tacker-rootwrap /usr/local/etc/tacker/rootwrap.conf
[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/tacker
connection = mysql://tacker:TACKER_DBPASS@{{ internal_vip.ip }}:3306/tacker?charset=utf8
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main tacker server. (Leave it as is if the database runs on this host.)
+# connection = sqlite://
+# NOTE: In deployment the [database] section and its connection attribute may
+# be set in the corresponding core plugin '.ini' file. However, it is suggested
+# to put the [database] section and its connection attribute in this
+# configuration file.
+
+# Database engine for which script will be generated when using offline
+# migration
+# engine =
+
+# The SQLAlchemy connection string used to connect to the slave database
+# slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+# max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+# retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+# min_pool_size = 1
-[servicevm_nova]
+# Maximum number of SQL connections to keep open in a pool
+# max_pool_size = 10
+
+# Timeout in seconds before idle sql connections are reaped
+# idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+# max_overflow = 20
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+# connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+# connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# pool_timeout = 10
+
+[tacker]
+# Specify drivers for hosting device
+# infra_driver = heat,nova,noop
+
+# Specify drivers for mgmt
+# mgmt_driver = noop,openwrt
+
+# Specify drivers for monitoring
+# monitor_driver = ping, http_ping
+
+[nfvo_vim]
+# Supported VIM drivers, resource orchestration controllers such as OpenStack, kvm
+#Default VIM driver is OpenStack
+#vim_drivers = openstack
+#Default VIM placement if vim id is not provided
+default_vim = VIM0
+
+[vim_keys]
+#openstack = /etc/tacker/vim/fernet_keys
+[tacker_nova]
+# parameters for novaclient to talk to nova
+region_name = RegionOne
+#project_domain_id = default
+project_name = service
+#user_domain_id = default
password = console
+username = nova
auth_url = http://{{ internal_vip.ip }}:35357
+auth_plugin = password
-[servicevm_heat]
+[tacker_heat]
heat_uri = http://{{ internal_vip.ip }}:8004/v1
-
+stack_retries = 60
+stack_retry_wait = 5