summaryrefslogtreecommitdiffstats
path: root/compass/deploy/ansible/roles
diff options
context:
space:
mode:
Diffstat (limited to 'compass/deploy/ansible/roles')
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/handlers/main.yml6
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml20
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml20
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/tasks/main.yml13
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini71
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf63
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh6
-rw-r--r--compass/deploy/ansible/roles/cinder-volume/files/loop.yml1
-rw-r--r--compass/deploy/ansible/roles/cinder-volume/handlers/main.yml3
-rw-r--r--compass/deploy/ansible/roles/cinder-volume/tasks/main.yml55
-rw-r--r--compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf62
-rw-r--r--compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list1
-rw-r--r--compass/deploy/ansible/roles/common/tasks/main.yml28
-rw-r--r--compass/deploy/ansible/roles/common/templates/hosts22
-rw-r--r--compass/deploy/ansible/roles/common/templates/ntp.conf56
-rw-r--r--compass/deploy/ansible/roles/dashboard/tasks/main.yml30
-rw-r--r--compass/deploy/ansible/roles/dashboard/templates/local_settings.py511
-rw-r--r--compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf14
-rw-r--r--compass/deploy/ansible/roles/database/files/my.cnf131
-rw-r--r--compass/deploy/ansible/roles/database/tasks/main.yml12
-rw-r--r--compass/deploy/ansible/roles/database/tasks/mariadb.yml46
-rw-r--r--compass/deploy/ansible/roles/database/tasks/mysql.yml22
-rw-r--r--compass/deploy/ansible/roles/database/templates/data.j239
-rw-r--r--compass/deploy/ansible/roles/database/templates/my.cnf134
-rw-r--r--compass/deploy/ansible/roles/database/templates/wsrep.cnf126
-rw-r--r--compass/deploy/ansible/roles/glance/handlers/main.yml6
-rw-r--r--compass/deploy/ansible/roles/glance/tasks/glance_config.yml29
-rw-r--r--compass/deploy/ansible/roles/glance/tasks/glance_install.yml26
-rw-r--r--compass/deploy/ansible/roles/glance/tasks/main.yml18
-rw-r--r--compass/deploy/ansible/roles/glance/tasks/nfs.yml41
-rw-r--r--compass/deploy/ansible/roles/glance/templates/glance-api.conf677
-rw-r--r--compass/deploy/ansible/roles/glance/templates/glance-registry.conf190
-rw-r--r--compass/deploy/ansible/roles/glance/templates/image_upload.sh2
-rw-r--r--compass/deploy/ansible/roles/ha/files/galera_chk10
-rw-r--r--compass/deploy/ansible/roles/ha/files/mysqlchk15
-rw-r--r--compass/deploy/ansible/roles/ha/files/notify.sh4
-rw-r--r--compass/deploy/ansible/roles/ha/handlers/main.yml9
-rw-r--r--compass/deploy/ansible/roles/ha/tasks/main.yml94
-rw-r--r--compass/deploy/ansible/roles/ha/templates/failover.j265
-rw-r--r--compass/deploy/ansible/roles/ha/templates/haproxy.cfg133
-rw-r--r--compass/deploy/ansible/roles/ha/templates/keepalived.conf42
-rw-r--r--compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml16
-rw-r--r--compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml29
-rw-r--r--compass/deploy/ansible/roles/keystone/tasks/main.yml13
-rw-r--r--compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh6
-rw-r--r--compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh5
-rw-r--r--compass/deploy/ansible/roles/keystone/templates/keystone.conf1317
-rw-r--r--compass/deploy/ansible/roles/keystone/templates/keystone_init43
-rw-r--r--compass/deploy/ansible/roles/monitor/files/check_service.sh7
-rw-r--r--compass/deploy/ansible/roles/monitor/files/root1
-rw-r--r--compass/deploy/ansible/roles/monitor/tasks/main.yml11
-rw-r--r--compass/deploy/ansible/roles/mq/tasks/main.yml5
-rw-r--r--compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml45
-rw-r--r--compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml27
-rw-r--r--compass/deploy/ansible/roles/mq/templates/.erlang.cookie1
-rw-r--r--compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf1
-rw-r--r--compass/deploy/ansible/roles/neutron-common/handlers/main.yml13
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/defaults/main.yml2
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/handlers/main.yml13
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/tasks/main.yml55
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini90
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf2
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot25
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini81
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini46
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini108
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf465
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf466
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh4
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/nova.conf73
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/handlers/main.yml24
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/tasks/main.yml13
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml10
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml29
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini90
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf2
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot25
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini81
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini46
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini108
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf465
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf466
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh4
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/nova.conf69
-rw-r--r--compass/deploy/ansible/roles/neutron-network/handlers/main.yml21
-rw-r--r--compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml20
-rw-r--r--compass/deploy/ansible/roles/neutron-network/tasks/main.yml114
-rw-r--r--compass/deploy/ansible/roles/neutron-network/tasks/odl.yml13
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini90
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf2
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot25
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini81
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini46
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini108
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf465
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/neutron.conf466
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh4
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/nova.conf69
-rw-r--r--compass/deploy/ansible/roles/nova-compute/handlers/main.yml3
-rw-r--r--compass/deploy/ansible/roles/nova-compute/tasks/main.yml21
-rw-r--r--compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf7
-rw-r--r--compass/deploy/ansible/roles/nova-compute/templates/nova.conf73
-rw-r--r--compass/deploy/ansible/roles/nova-controller/handlers/main.yml24
-rw-r--r--compass/deploy/ansible/roles/nova-controller/tasks/main.yml13
-rw-r--r--compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml16
-rw-r--r--compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml35
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini90
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf2
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot25
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini81
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini46
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini108
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf465
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/neutron.conf466
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh4
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/nova.conf72
-rw-r--r--compass/deploy/ansible/roles/repo/tasks/main.yml6
-rw-r--r--compass/deploy/ansible/roles/repo/templates/sources.list1
118 files changed, 0 insertions, 10337 deletions
diff --git a/compass/deploy/ansible/roles/cinder-controller/handlers/main.yml b/compass/deploy/ansible/roles/cinder-controller/handlers/main.yml
deleted file mode 100644
index ef671dd..0000000
--- a/compass/deploy/ansible/roles/cinder-controller/handlers/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: restart cinder-scheduler
- service: name=cinder-scheduler state=restarted enabled=yes
-- name: restart cinder-api
- service: name=cinder-api state=restarted enabled=yes
-
diff --git a/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml b/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml
deleted file mode 100644
index 7796cf7..0000000
--- a/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: sync cinder db
- shell: su -s /bin/sh -c "cinder-manage db sync" cinder && cinder
- register: result
- until: result.rc == 0
- retries: 5
- delay: 3
- notify:
- - restart cinder-scheduler
- - restart cinder-api
-
-- meta: flush_handlers
-
-- name: upload cinder keystone register script
- template: src=cinder_init.sh dest=/opt/cinder_init.sh mode=0744
-
-- name: run cinder register script
- shell: for i in {0..5}; do /opt/cinder_init.sh && touch cinder_init_complete; if [ $? != 0 ]; then sleep 5; else break; fi; done
- args:
- creates: cinder_init_complete
diff --git a/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml b/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml
deleted file mode 100644
index 03ad432..0000000
--- a/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: install cinder packages
- apt: name={{ item }} state=present force=yes
- with_items:
- - cinder-api
- - cinder-scheduler
- - python-cinderclient
-
-- name: generate cinder service list
- shell: echo {{ item }} >> /opt/service
- with_items:
- - cinder-api
- - cinder-scheduler
-
-- name: upload cinder conf
- template: src=cinder.conf dest=/etc/cinder/cinder.conf
- notify:
- - restart cinder-scheduler
- - restart cinder-api
-
diff --git a/compass/deploy/ansible/roles/cinder-controller/tasks/main.yml b/compass/deploy/ansible/roles/cinder-controller/tasks/main.yml
deleted file mode 100644
index 1dbe91f..0000000
--- a/compass/deploy/ansible/roles/cinder-controller/tasks/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- include: cinder_install.yml
- tags:
- - install
- - cinder-install
- - cinder
-
-- include: cinder_config.yml
- when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
- tags:
- - config
- - cinder-config
- - cinder
diff --git a/compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini b/compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini
deleted file mode 100644
index b568a17..0000000
--- a/compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini
+++ /dev/null
@@ -1,71 +0,0 @@
-#############
-# OpenStack #
-#############
-
-[composite:osapi_volume]
-use = call:cinder.api:root_app_factory
-/: apiversions
-/v1: openstack_volume_api_v1
-/v2: openstack_volume_api_v2
-
-[composite:openstack_volume_api_v1]
-use = call:cinder.api.middleware.auth:pipeline_factory
-noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
-keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
-keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
-
-[composite:openstack_volume_api_v2]
-use = call:cinder.api.middleware.auth:pipeline_factory
-noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
-keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
-keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
-
-[filter:request_id]
-paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory
-
-[filter:faultwrap]
-paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
-
-[filter:osprofiler]
-paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
-hmac_keys = SECRET_KEY
-enabled = yes
-
-[filter:noauth]
-paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
-
-[filter:sizelimit]
-paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
-
-[app:apiv1]
-paste.app_factory = cinder.api.v1.router:APIRouter.factory
-
-[app:apiv2]
-paste.app_factory = cinder.api.v2.router:APIRouter.factory
-
-[pipeline:apiversions]
-pipeline = faultwrap osvolumeversionapp
-
-[app:osvolumeversionapp]
-paste.app_factory = cinder.api.versions:Versions.factory
-
-[filter:authtoken]
-paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
-# auth_host = 127.0.0.1
-# auth_port = 35357
-# auth_protocol = http
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = cinder
-admin_password = {{ CINDER_PASS }}
-
-##########
-# Shared #
-##########
-
-[filter:keystonecontext]
-paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
-
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf b/compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf
deleted file mode 100644
index e34fd2f..0000000
--- a/compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf
+++ /dev/null
@@ -1,63 +0,0 @@
-[DEFAULT]
-rootwrap_config = /etc/cinder/rootwrap.conf
-api_paste_confg = /etc/cinder/api-paste.ini
-iscsi_helper = tgtadm
-volume_name_template = volume-%s
-volume_group = cinder-volumes
-verbose = {{ VERBOSE }}
-debug = {{ DEBUG }}
-auth_strategy = keystone
-state_path = /var/lib/cinder
-lock_path = /var/lock/cinder
-notification_driver=cinder.openstack.common.notifier.rpc_notifier
-volumes_dir = /var/lib/cinder/volumes
-
-log_file=/var/log/cinder/cinder.log
-
-control_exchange = cinder
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_port = 5672
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-my_ip = {{ storage_controller_host }}
-
-glance_host = {{ HA_VIP }}
-glance_port = 9292
-api_rate_limit = False
-storage_availability_zone = nova
-
-quota_volumes = 10
-quota_gigabytes=1000
-quota_driver=cinder.quota.DbQuotaDriver
-
-osapi_volume_listen = {{ storage_controller_host }}
-osapi_volume_listen_port = 8776
-
-db_backend = sqlalchemy
-volume_name_template = volume-%s
-snapshot_name_template = snapshot-%s
-
-max_gigabytes=10000
-volume_group=cinder-volumes
-
-volume_clear=zero
-volume_clear_size=10
-
-iscsi_ip_address={{ storage_controller_host }}
-iscsi_port=3260
-iscsi_helper=tgtadm
-
-volumes_dir=/var/lib/cinder/volumes
-
-volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = cinder
-admin_password = {{ CINDER_PASS }}
-
-[database]
-connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
diff --git a/compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh b/compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh
deleted file mode 100644
index 0ec61b6..0000000
--- a/compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=cinder --pass={{ CINDER_PASS }} --email=cinder@example.com
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=cinder --tenant=service --role=admin
-
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=cinder --type=volume --description="OpenStack Block Storage"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ volume / {print $2}') --publicurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s --internalurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s --adminurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s
-
diff --git a/compass/deploy/ansible/roles/cinder-volume/files/loop.yml b/compass/deploy/ansible/roles/cinder-volume/files/loop.yml
deleted file mode 100644
index e872652..0000000
--- a/compass/deploy/ansible/roles/cinder-volume/files/loop.yml
+++ /dev/null
@@ -1 +0,0 @@
-physical_device: /dev/loop0
diff --git a/compass/deploy/ansible/roles/cinder-volume/handlers/main.yml b/compass/deploy/ansible/roles/cinder-volume/handlers/main.yml
deleted file mode 100644
index ad917ce..0000000
--- a/compass/deploy/ansible/roles/cinder-volume/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: restart cinder-volume
- service: name=cinder-volume state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/cinder-volume/tasks/main.yml b/compass/deploy/ansible/roles/cinder-volume/tasks/main.yml
deleted file mode 100644
index 8c0e626..0000000
--- a/compass/deploy/ansible/roles/cinder-volume/tasks/main.yml
+++ /dev/null
@@ -1,55 +0,0 @@
----
-- name: install cinder-volume and lvm2 packages
- apt: name={{ item }} state=present force=yes
- with_items:
- - cinder-volume
- - lvm2
-
-- name: generate cinder volume service list
- shell: echo {{ item }} >> /opt/service
- with_items:
- - cinder-volume
-
-- name: check if physical device exists
- stat: path={{ physical_device }}
- register: st
-
-- name: repace physical_device if st is false
- local_action: copy src=loop.yml dest=/tmp/loop.yml
- when: st.stat.exists == False
-
-- name: load loop.yml
- include_vars: /tmp/loop.yml
- when: st.stat.exists == False
-
-- name: check if cinder-volumes is mounted
- shell: ls /mnt
- register: cindervolumes
-
-- name: get available partition size
- shell: df / | awk '$3 ~ /[0-9]+/ { print $4 }'
- register: partition_size
-
-- name: if not mounted, mount it
- shell: dd if=/dev/zero of=/mnt/cinder-volumes
- bs=1 count=0 seek={{ partition_size.stdout }}
- when: cindervolumes.stdout != 'cinder-volumes'
-
-- name: get first lo device
- shell: ls /dev/loop* | egrep 'loop[0-9]+'|sed -n 1p
- register: first_lo
- when: cindervolumes.stdout != 'cinder-volumes'
-
-- name: do a losetup on /mnt/cinder-volumes
- shell: losetup {{ first_lo.stdout }} /mnt/cinder-volumes
- when: cindervolumes.stdout != 'cinder-volumes'
-
-- name: create physical and group volumes
- lvg: vg=cinder-volumes pvs={{ physical_device }}
- vg_options=--force
-
-- name: upload cinder-volume configuration
- template: src=cinder.conf dest=/etc/cinder/cinder.conf
- backup=yes
- notify:
- - restart cinder-volume
diff --git a/compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf b/compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf
deleted file mode 100644
index aa3b8cc..0000000
--- a/compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf
+++ /dev/null
@@ -1,62 +0,0 @@
-[DEFAULT]
-rootwrap_config = /etc/cinder/rootwrap.conf
-api_paste_confg = /etc/cinder/api-paste.ini
-iscsi_helper = tgtadm
-volume_name_template = volume-%s
-volume_group = cinder-volumes
-verbose = True
-auth_strategy = keystone
-state_path = /var/lib/cinder
-lock_path = /var/lock/cinder
-notification_driver=cinder.openstack.common.notifier.rpc_notifier
-volumes_dir = /var/lib/cinder/volumes
-
-log_file=/var/log/cinder/cinder.log
-
-control_exchange = cinder
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_port = 5672
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-my_ip = {{ storage_controller_host }}
-
-glance_host = {{ HA_VIP }}
-glance_port = 9292
-api_rate_limit = False
-storage_availability_zone = nova
-
-quota_volumes = 10
-quota_gigabytes=1000
-quota_driver=cinder.quota.DbQuotaDriver
-
-osapi_volume_listen = {{ storage_controller_host }}
-osapi_volume_listen_port = 8776
-
-db_backend = sqlalchemy
-volume_name_template = volume-%s
-snapshot_name_template = snapshot-%s
-
-max_gigabytes=10000
-volume_group=cinder-volumes
-
-volume_clear=zero
-volume_clear_size=10
-
-iscsi_ip_address={{ storage_controller_host }}
-iscsi_port=3260
-iscsi_helper=tgtadm
-
-volumes_dir=/var/lib/cinder/volumes
-
-volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = cinder
-admin_password = {{ CINDER_PASS }}
-
-[database]
-connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
diff --git a/compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list b/compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list
deleted file mode 100644
index 920f3d2..0000000
--- a/compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list
+++ /dev/null
@@ -1 +0,0 @@
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
diff --git a/compass/deploy/ansible/roles/common/tasks/main.yml b/compass/deploy/ansible/roles/common/tasks/main.yml
deleted file mode 100644
index ce595f5..0000000
--- a/compass/deploy/ansible/roles/common/tasks/main.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- name: install ubuntu-cloud-keyring(ubuntu)
- apt: name={{ item }} state=latest
- with_items:
- - ubuntu-cloud-keyring
-
-- name: update hosts files to all hosts
- template: src=hosts
- dest=/etc/hosts
- backup=yes
-
-- name: install common packages
- apt: name={{ item }} state=latest
- with_items:
- - python-pip
- - python-dev
- - python-mysqldb
- - ntp
-
-- name: restart ntp
- command: su -s /bin/sh -c "service ntp stop; ntpd -gq; hwclock --systohc"
- ignore_errors: True
-
-- name: update ntp conf
- template: src=ntp.conf dest=/etc/ntp.conf backup=yes
-
-- name: restart ntp
- service: name=ntp state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/common/templates/hosts b/compass/deploy/ansible/roles/common/templates/hosts
deleted file mode 100644
index 9d27c0a..0000000
--- a/compass/deploy/ansible/roles/common/templates/hosts
+++ /dev/null
@@ -1,22 +0,0 @@
-# compute-controller
-10.145.89.136 host-136
-# database
-10.145.89.136 host-136
-# messaging
-10.145.89.136 host-136
-# storage-controller
-10.145.89.138 host-138
-# image
-10.145.89.138 host-138
-# identity
-10.145.89.136 host-136
-# network-server
-10.145.89.138 host-138
-# dashboard
-10.145.89.136 host-136
-# storage-volume
-10.145.89.139 host-139
-# network-worker
-10.145.89.139 host-139
-# compute-worker
-10.145.89.137 host-137
diff --git a/compass/deploy/ansible/roles/common/templates/ntp.conf b/compass/deploy/ansible/roles/common/templates/ntp.conf
deleted file mode 100644
index c613809..0000000
--- a/compass/deploy/ansible/roles/common/templates/ntp.conf
+++ /dev/null
@@ -1,56 +0,0 @@
-# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
-
-driftfile /var/lib/ntp/ntp.drift
-
-
-# Enable this if you want statistics to be logged.
-#statsdir /var/log/ntpstats/
-
-statistics loopstats peerstats clockstats
-filegen loopstats file loopstats type day enable
-filegen peerstats file peerstats type day enable
-filegen clockstats file clockstats type day enable
-
-# Specify one or more NTP servers.
-
-# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board
-# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for
-# more information.
-server {{ NTP_SERVER_LOCAL }}
-server 0.ubuntu.pool.ntp.org
-server 1.ubuntu.pool.ntp.org
-server 2.ubuntu.pool.ntp.org
-server 3.ubuntu.pool.ntp.org
-
-# Use Ubuntu's ntp server as a fallback.
-server ntp.ubuntu.com
-
-# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for
-# details. The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions>
-# might also be helpful.
-#
-# Note that "restrict" applies to both servers and clients, so a configuration
-# that might be intended to block requests from certain clients could also end
-# up blocking replies from your own upstream servers.
-
-# By default, exchange time with everybody, but don't allow configuration.
-restrict -4 default kod notrap nomodify nopeer noquery
-restrict -6 default kod notrap nomodify nopeer noquery
-
-# Local users may interrogate the ntp server more closely.
-restrict 127.0.0.1
-restrict ::1
-
-# Clients from this (example!) subnet have unlimited access, but only if
-# cryptographically authenticated.
-#restrict 192.168.123.0 mask 255.255.255.0 notrust
-
-
-# If you want to provide time to your local subnet, change the next line.
-# (Again, the address is an example only.)
-#broadcast 192.168.123.255
-
-# If you want to listen to time broadcasts on your local subnet, de-comment the
-# next lines. Please do this only if you trust everybody on the network!
-#disable auth
-#broadcastclient
diff --git a/compass/deploy/ansible/roles/dashboard/tasks/main.yml b/compass/deploy/ansible/roles/dashboard/tasks/main.yml
deleted file mode 100644
index 465b996..0000000
--- a/compass/deploy/ansible/roles/dashboard/tasks/main.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- name: install dashboard packages
- apt: name={{ item }} state=present force=yes
- with_items:
- - apache2
- - memcached
- - libapache2-mod-wsgi
- - openstack-dashboard
-
-- name: remove ubuntu theme
- apt: name=openstack-dashboard-ubuntu-theme
- state=absent
-
-## horizon configuration is already enabled in apache2/conf-enabled
-## by openstack-dashboard package deploy script.
-#- name: update dashboard conf
-# template: src=openstack-dashboard.conf
-# dest=/etc/apache2/sites-available/openstack-dashboard.conf
-# backup=yes
-
-- name: update horizon settings
- template: src=local_settings.py
- dest=/etc/openstack-dashboard/local_settings.py
- backup=yes
-
-- name: restart apache2
- service: name=apache2 state=restarted enabled=yes
-
-- name: restart memcached
- service: name=memcached state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/dashboard/templates/local_settings.py b/compass/deploy/ansible/roles/dashboard/templates/local_settings.py
deleted file mode 100644
index 87e06e3..0000000
--- a/compass/deploy/ansible/roles/dashboard/templates/local_settings.py
+++ /dev/null
@@ -1,511 +0,0 @@
-import os
-
-from django.utils.translation import ugettext_lazy as _
-
-from openstack_dashboard import exceptions
-
-DEBUG = True
-TEMPLATE_DEBUG = DEBUG
-
-# Required for Django 1.5.
-# If horizon is running in production (DEBUG is False), set this
-# with the list of host/domain names that the application can serve.
-# For more information see:
-# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
-#ALLOWED_HOSTS = ['horizon.example.com', ]
-
-# Set SSL proxy settings:
-# For Django 1.4+ pass this header from the proxy after terminating the SSL,
-# and don't forget to strip it from the client's request.
-# For more information see:
-# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
-# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
-
-# If Horizon is being served through SSL, then uncomment the following two
-# settings to better secure the cookies from security exploits
-#CSRF_COOKIE_SECURE = True
-#SESSION_COOKIE_SECURE = True
-
-# Overrides for OpenStack API versions. Use this setting to force the
-# OpenStack dashboard to use a specific API version for a given service API.
-# NOTE: The version should be formatted as it appears in the URL for the
-# service API. For example, The identity service APIs have inconsistent
-# use of the decimal point, so valid options would be "2.0" or "3".
-# OPENSTACK_API_VERSIONS = {
-# "identity": 3,
-# "volume": 2
-# }
-
-# Set this to True if running on multi-domain model. When this is enabled, it
-# will require user to enter the Domain name in addition to username for login.
-# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
-
-# Overrides the default domain used when running on single-domain model
-# with Keystone V3. All entities will be created in the default domain.
-# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
-
-# Set Console type:
-# valid options would be "AUTO", "VNC", "SPICE" or "RDP"
-# CONSOLE_TYPE = "AUTO"
-
-# Default OpenStack Dashboard configuration.
-HORIZON_CONFIG = {
- 'dashboards': ('project', 'admin', 'settings',),
- 'default_dashboard': 'project',
- 'user_home': 'openstack_dashboard.views.get_user_home',
- 'ajax_queue_limit': 10,
- 'auto_fade_alerts': {
- 'delay': 3000,
- 'fade_duration': 1500,
- 'types': ['alert-success', 'alert-info']
- },
- 'help_url': "http://docs.openstack.org",
- 'exceptions': {'recoverable': exceptions.RECOVERABLE,
- 'not_found': exceptions.NOT_FOUND,
- 'unauthorized': exceptions.UNAUTHORIZED},
-}
-
-# Specify a regular expression to validate user passwords.
-# HORIZON_CONFIG["password_validator"] = {
-# "regex": '.*',
-# "help_text": _("Your password does not meet the requirements.")
-# }
-
-# Disable simplified floating IP address management for deployments with
-# multiple floating IP pools or complex network requirements.
-# HORIZON_CONFIG["simple_ip_management"] = False
-
-# Turn off browser autocompletion for the login form if so desired.
-# HORIZON_CONFIG["password_autocomplete"] = "off"
-
-LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
-
-# Set custom secret key:
-# You can either set it to a specific value or you can let horizion generate a
-# default secret key that is unique on this machine, e.i. regardless of the
-# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there
-# may be situations where you would want to set this explicitly, e.g. when
-# multiple dashboard instances are distributed on different machines (usually
-# behind a load-balancer). Either you have to make sure that a session gets all
-# requests routed to the same dashboard instance or you set the same SECRET_KEY
-# for all of them.
-from horizon.utils import secret_key
-SECRET_KEY = 'AJDSKLAJDKASJDKASJDKSAJDKSJAKDSA'
-# We recommend you use memcached for development; otherwise after every reload
-# of the django development server, you will have to login again. To use
-# memcached set CACHES to something like
-CACHES = {
- 'default': {
- 'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache',
- 'LOCATION' : '127.0.0.1:11211',
- }
-}
-
-#CACHES = {
-# 'default': {
-# 'BACKEND' : 'django.core.cache.backends.locmem.LocMemCache'
-# }
-#}
-
-# Enable the Ubuntu theme if it is present.
-try:
- from ubuntu_theme import *
-except ImportError:
- pass
-
-# Default Ubuntu apache configuration uses /horizon as the application root.
-# Configure auth redirects here accordingly.
-LOGIN_URL='/horizon/auth/login/'
-LOGOUT_URL='/horizon/auth/logout/'
-LOGIN_REDIRECT_URL='/horizon'
-
-# The Ubuntu package includes pre-compressed JS and compiled CSS to allow
-# offline compression by default. To enable online compression, install
-# the node-less package and enable the following option.
-COMPRESS_OFFLINE = True
-
-# By default, validation of the HTTP Host header is disabled. Production
-# installations should have this set accordingly. For more information
-# see https://docs.djangoproject.com/en/dev/ref/settings/.
-ALLOWED_HOSTS = ['{{ dashboard_host }}']
-
-# Send email to the console by default
-EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
-# Or send them to /dev/null
-#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
-
-# Configure these for your outgoing email host
-# EMAIL_HOST = 'smtp.my-company.com'
-# EMAIL_PORT = 25
-# EMAIL_HOST_USER = 'djangomail'
-# EMAIL_HOST_PASSWORD = 'top-secret!'
-
-# For multiple regions uncomment this configuration, and add (endpoint, title).
-# AVAILABLE_REGIONS = [
-# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
-# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
-# ]
-
-OPENSTACK_HOST = "{{ HA_VIP }}"
-OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
-OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
-
-# Disable SSL certificate checks (useful for self-signed certificates):
-# OPENSTACK_SSL_NO_VERIFY = True
-
-# The CA certificate to use to verify SSL connections
-# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
-
-# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
-# capabilities of the auth backend for Keystone.
-# If Keystone has been configured to use LDAP as the auth backend then set
-# can_edit_user to False and name to 'ldap'.
-#
-# TODO(tres): Remove these once Keystone has an API to identify auth backend.
-OPENSTACK_KEYSTONE_BACKEND = {
- 'name': 'native',
- 'can_edit_user': True,
- 'can_edit_group': True,
- 'can_edit_project': True,
- 'can_edit_domain': True,
- 'can_edit_role': True
-}
-
-#Setting this to True, will add a new "Retrieve Password" action on instance,
-#allowing Admin session password retrieval/decryption.
-#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
-
-# The Xen Hypervisor has the ability to set the mount point for volumes
-# attached to instances (other Hypervisors currently do not). Setting
-# can_set_mount_point to True will add the option to set the mount point
-# from the UI.
-OPENSTACK_HYPERVISOR_FEATURES = {
- 'can_set_mount_point': False,
- 'can_set_password': False,
-}
-
-# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
-# services provided by neutron. Options currently available are load
-# balancer service, security groups, quotas, VPN service.
-OPENSTACK_NEUTRON_NETWORK = {
- 'enable_lb': False,
- 'enable_firewall': False,
- 'enable_quotas': True,
- 'enable_vpn': False,
- # The profile_support option is used to detect if an external router can be
- # configured via the dashboard. When using specific plugins the
- # profile_support can be turned on if needed.
- 'profile_support': None,
- #'profile_support': 'cisco',
-}
-
-# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
-# in the OpenStack Dashboard related to the Image service, such as the list
-# of supported image formats.
-# OPENSTACK_IMAGE_BACKEND = {
-# 'image_formats': [
-# ('', ''),
-# ('aki', _('AKI - Amazon Kernel Image')),
-# ('ami', _('AMI - Amazon Machine Image')),
-# ('ari', _('ARI - Amazon Ramdisk Image')),
-# ('iso', _('ISO - Optical Disk Image')),
-# ('qcow2', _('QCOW2 - QEMU Emulator')),
-# ('raw', _('Raw')),
-# ('vdi', _('VDI')),
-# ('vhd', _('VHD')),
-# ('vmdk', _('VMDK'))
-# ]
-# }
-
-# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
-# image custom property attributes that appear on image detail pages.
-IMAGE_CUSTOM_PROPERTY_TITLES = {
- "architecture": _("Architecture"),
- "kernel_id": _("Kernel ID"),
- "ramdisk_id": _("Ramdisk ID"),
- "image_state": _("Euca2ools state"),
- "project_id": _("Project ID"),
- "image_type": _("Image Type")
-}
-
-# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
-# in the Keystone service catalog. Use this setting when Horizon is running
-# external to the OpenStack environment. The default is 'publicURL'.
-#OPENSTACK_ENDPOINT_TYPE = "publicURL"
-
-# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
-# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
-# in the Keystone service catalog. Use this setting when Horizon is running
-# external to the OpenStack environment. The default is None. This
-# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
-#SECONDARY_ENDPOINT_TYPE = "publicURL"
-
-# The number of objects (Swift containers/objects or images) to display
-# on a single page before providing a paging element (a "more" link)
-# to paginate results.
-API_RESULT_LIMIT = 1000
-API_RESULT_PAGE_SIZE = 20
-
-# The timezone of the server. This should correspond with the timezone
-# of your entire OpenStack installation, and hopefully be in UTC.
-TIME_ZONE = "UTC"
-
-# When launching an instance, the menu of available flavors is
-# sorted by RAM usage, ascending. If you would like a different sort order,
-# you can provide another flavor attribute as sorting key. Alternatively, you
-# can provide a custom callback method to use for sorting. You can also provide
-# a flag for reverse sort. For more info, see
-# http://docs.python.org/2/library/functions.html#sorted
-# CREATE_INSTANCE_FLAVOR_SORT = {
-# 'key': 'name',
-# # or
-# 'key': my_awesome_callback_method,
-# 'reverse': False,
-# }
-
-# The Horizon Policy Enforcement engine uses these values to load per service
-# policy rule files. The content of these files should match the files the
-# OpenStack services are using to determine role based access control in the
-# target installation.
-
-# Path to directory containing policy.json files
-#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
-# Map of local copy of service policy files
-#POLICY_FILES = {
-# 'identity': 'keystone_policy.json',
-# 'compute': 'nova_policy.json',
-# 'volume': 'cinder_policy.json',
-# 'image': 'glance_policy.json',
-#}
-
-# Trove user and database extension support. By default support for
-# creating users and databases on database instances is turned on.
-# To disable these extensions set the permission here to something
-# unusable such as ["!"].
-# TROVE_ADD_USER_PERMS = []
-# TROVE_ADD_DATABASE_PERMS = []
-
-LOGGING = {
- 'version': 1,
- # When set to True this will disable all logging except
- # for loggers specified in this configuration dictionary. Note that
- # if nothing is specified here and disable_existing_loggers is True,
- # django.db.backends will still log unless it is disabled explicitly.
- 'disable_existing_loggers': False,
- 'handlers': {
- 'null': {
- 'level': 'DEBUG',
- 'class': 'django.utils.log.NullHandler',
- },
- 'console': {
- # Set the level to "DEBUG" for verbose output logging.
- 'level': 'INFO',
- 'class': 'logging.StreamHandler',
- },
- },
- 'loggers': {
- # Logging from django.db.backends is VERY verbose, send to null
- # by default.
- 'django.db.backends': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- 'requests': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- 'horizon': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'openstack_dashboard': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'novaclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'cinderclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'keystoneclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'glanceclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'neutronclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'heatclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'ceilometerclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'troveclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'swiftclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'openstack_auth': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'nose.plugins.manager': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'django': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'iso8601': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- }
-}
-
-# 'direction' should not be specified for all_tcp/udp/icmp.
-# It is specified in the form.
-SECURITY_GROUP_RULES = {
- 'all_tcp': {
- 'name': 'ALL TCP',
- 'ip_protocol': 'tcp',
- 'from_port': '1',
- 'to_port': '65535',
- },
- 'all_udp': {
- 'name': 'ALL UDP',
- 'ip_protocol': 'udp',
- 'from_port': '1',
- 'to_port': '65535',
- },
- 'all_icmp': {
- 'name': 'ALL ICMP',
- 'ip_protocol': 'icmp',
- 'from_port': '-1',
- 'to_port': '-1',
- },
- 'ssh': {
- 'name': 'SSH',
- 'ip_protocol': 'tcp',
- 'from_port': '22',
- 'to_port': '22',
- },
- 'smtp': {
- 'name': 'SMTP',
- 'ip_protocol': 'tcp',
- 'from_port': '25',
- 'to_port': '25',
- },
- 'dns': {
- 'name': 'DNS',
- 'ip_protocol': 'tcp',
- 'from_port': '53',
- 'to_port': '53',
- },
- 'http': {
- 'name': 'HTTP',
- 'ip_protocol': 'tcp',
- 'from_port': '80',
- 'to_port': '80',
- },
- 'pop3': {
- 'name': 'POP3',
- 'ip_protocol': 'tcp',
- 'from_port': '110',
- 'to_port': '110',
- },
- 'imap': {
- 'name': 'IMAP',
- 'ip_protocol': 'tcp',
- 'from_port': '143',
- 'to_port': '143',
- },
- 'ldap': {
- 'name': 'LDAP',
- 'ip_protocol': 'tcp',
- 'from_port': '389',
- 'to_port': '389',
- },
- 'https': {
- 'name': 'HTTPS',
- 'ip_protocol': 'tcp',
- 'from_port': '443',
- 'to_port': '443',
- },
- 'smtps': {
- 'name': 'SMTPS',
- 'ip_protocol': 'tcp',
- 'from_port': '465',
- 'to_port': '465',
- },
- 'imaps': {
- 'name': 'IMAPS',
- 'ip_protocol': 'tcp',
- 'from_port': '993',
- 'to_port': '993',
- },
- 'pop3s': {
- 'name': 'POP3S',
- 'ip_protocol': 'tcp',
- 'from_port': '995',
- 'to_port': '995',
- },
- 'ms_sql': {
- 'name': 'MS SQL',
- 'ip_protocol': 'tcp',
- 'from_port': '1433',
- 'to_port': '1433',
- },
- 'mysql': {
- 'name': 'MYSQL',
- 'ip_protocol': 'tcp',
- 'from_port': '3306',
- 'to_port': '3306',
- },
- 'rdp': {
- 'name': 'RDP',
- 'ip_protocol': 'tcp',
- 'from_port': '3389',
- 'to_port': '3389',
- },
-}
-
-FLAVOR_EXTRA_KEYS = {
- 'flavor_keys': [
- ('quota:read_bytes_sec', _('Quota: Read bytes')),
- ('quota:write_bytes_sec', _('Quota: Write bytes')),
- ('quota:cpu_quota', _('Quota: CPU')),
- ('quota:cpu_period', _('Quota: CPU period')),
- ('quota:inbound_average', _('Quota: Inbound average')),
- ('quota:outbound_average', _('Quota: Outbound average')),
- ]
-}
-
diff --git a/compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf b/compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf
deleted file mode 100644
index a5a791a..0000000
--- a/compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf
+++ /dev/null
@@ -1,14 +0,0 @@
-<VirtualHost *:80>
-
-WSGIScriptAlias / /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi
-WSGIDaemonProcess horizon user=www-data group=www-data processes=3 threads=10
-Alias /static /usr/share/openstack-dashboard/openstack_dashboard/static/
-
-<Directory /usr/share/openstack-dashboard/openstack_dashboard/wsgi>
-Order allow,deny
-Allow from all
-</Directory>
-
-
-</VirtualHost>
-
diff --git a/compass/deploy/ansible/roles/database/files/my.cnf b/compass/deploy/ansible/roles/database/files/my.cnf
deleted file mode 100644
index d61f947..0000000
--- a/compass/deploy/ansible/roles/database/files/my.cnf
+++ /dev/null
@@ -1,131 +0,0 @@
-#
-# The MySQL database server configuration file.
-#
-# You can copy this to one of:
-# - "/etc/mysql/my.cnf" to set global options,
-# - "~/.my.cnf" to set user-specific options.
-#
-# One can use all long options that the program supports.
-# Run program with --help to get a list of available options and with
-# --print-defaults to see which it would actually understand and use.
-#
-# For explanations see
-# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
-
-# This will be passed to all mysql clients
-# It has been reported that passwords should be enclosed with ticks/quotes
-# escpecially if they contain "#" chars...
-# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
-[client]
-port = 3306
-socket = /var/run/mysqld/mysqld.sock
-
-# Here is entries for some specific programs
-# The following values assume you have at least 32M ram
-
-# This was formally known as [safe_mysqld]. Both versions are currently parsed.
-[mysqld_safe]
-socket = /var/run/mysqld/mysqld.sock
-nice = 0
-
-[mysqld]
-#
-# * Basic Settings
-#
-user = mysql
-pid-file = /var/run/mysqld/mysqld.pid
-socket = /var/run/mysqld/mysqld.sock
-port = 3306
-basedir = /usr
-datadir = /var/lib/mysql
-tmpdir = /tmp
-lc-messages-dir = /usr/share/mysql
-skip-external-locking
-#
-# Instead of skip-networking the default is now to listen only on
-# localhost which is more compatible and is not less secure.
-bind-address = 0.0.0.0
-#
-# * Fine Tuning
-#
-key_buffer = 16M
-max_allowed_packet = 16M
-thread_stack = 192K
-thread_cache_size = 8
-# This replaces the startup script and checks MyISAM tables if needed
-# the first time they are touched
-myisam-recover = BACKUP
-#max_connections = 100
-#table_cache = 64
-#thread_concurrency = 10
-#
-# * Query Cache Configuration
-#
-query_cache_limit = 1M
-query_cache_size = 16M
-#
-# * Logging and Replication
-#
-# Both location gets rotated by the cronjob.
-# Be aware that this log type is a performance killer.
-# As of 5.1 you can enable the log at runtime!
-#general_log_file = /var/log/mysql/mysql.log
-#general_log = 1
-#
-# Error log - should be very few entries.
-#
-log_error = /var/log/mysql/error.log
-#
-# Here you can see queries with especially long duration
-#log_slow_queries = /var/log/mysql/mysql-slow.log
-#long_query_time = 2
-#log-queries-not-using-indexes
-#
-# The following can be used as easy to replay backup logs or for replication.
-# note: if you are setting up a replication slave, see README.Debian about
-# other settings you may need to change.
-#server-id = 1
-#log_bin = /var/log/mysql/mysql-bin.log
-expire_logs_days = 10
-max_binlog_size = 100M
-#binlog_do_db = include_database_name
-#binlog_ignore_db = include_database_name
-#
-# * InnoDB
-#
-# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
-# Read the manual for more InnoDB related options. There are many!
-#
-# * Security Features
-#
-# Read the manual, too, if you want chroot!
-# chroot = /var/lib/mysql/
-#
-# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
-#
-# ssl-ca=/etc/mysql/cacert.pem
-# ssl-cert=/etc/mysql/server-cert.pem
-# ssl-key=/etc/mysql/server-key.pem
-default-storage-engine = innodb
-innodb_file_per_table
-collation-server = utf8_general_ci
-init-connect = 'SET NAMES utf8'
-character-set-server = utf8
-
-[mysqldump]
-quick
-quote-names
-max_allowed_packet = 16M
-
-[mysql]
-#no-auto-rehash # faster start of mysql but no tab completition
-
-[isamchk]
-key_buffer = 16M
-
-#
-# * IMPORTANT: Additional settings that can override those from this file!
-# The files must end with '.cnf', otherwise they'll be ignored.
-#
-!includedir /etc/mysql/conf.d/
-
diff --git a/compass/deploy/ansible/roles/database/tasks/main.yml b/compass/deploy/ansible/roles/database/tasks/main.yml
deleted file mode 100644
index e66f0cd..0000000
--- a/compass/deploy/ansible/roles/database/tasks/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: copy data.sh
- template: src=data.j2 dest=/opt/data.sh mode=777
- tags:
- - mysql_user
-
-- include: mysql.yml
- when: HA_CLUSTER is not defined
-
-- include: mariadb.yml
- when: HA_CLUSTER is defined
-
diff --git a/compass/deploy/ansible/roles/database/tasks/mariadb.yml b/compass/deploy/ansible/roles/database/tasks/mariadb.yml
deleted file mode 100644
index 093dfd1..0000000
--- a/compass/deploy/ansible/roles/database/tasks/mariadb.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: install python-mysqldb
- apt: name={{ item }} state=present force=yes
- with_items:
- - libaio1
- - libssl0.9.8
- #- mariadb-client-5.5
- - mysql-client-5.5
- - python-mysqldb
- - mysql-server-wsrep
- - galera
-
-- name: create mysql log directy
- file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755
-
-- name: update mariadb my.cnf
- template: src=my.cnf dest=/etc/mysql/my.cnf backup=yes
-
-- name: update galera wsrep.cnf
- template: src=wsrep.cnf dest=/etc/mysql/conf.d/wsrep.cnf backup=yes
-
-- name: update wsrep_sst_rsync uid
- lineinfile: dest=/usr/bin/wsrep_sst_rsync state=absent regexp="\s*uid = \$MYUID$" backup=yes
-
-- name: update wsrep_sst_rsync gid
- lineinfile: dest=/usr/bin/wsrep_sst_rsync state=absent regexp="\s*gid = \$MYGID$" backup=yes
-
-- name: manually restart mysql server
- service: name=mysql state=restarted enabled=yes
- register: result
- until: result|success
- retries: 5
- delay: 5
- tags:
- - mysql_restart
-
-- name: generate mysql service list
- shell: echo {{ item }} >> /opt/service
- with_items:
- - mysql
-
-- name: create database/user
- shell: /opt/data.sh
- when: HA_CLUSTER[inventory_hostname] == ''
- tags:
- - mysql_user
diff --git a/compass/deploy/ansible/roles/database/tasks/mysql.yml b/compass/deploy/ansible/roles/database/tasks/mysql.yml
deleted file mode 100644
index 327b656..0000000
--- a/compass/deploy/ansible/roles/database/tasks/mysql.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: install mysql client and server packages
- apt: name={{ item }} state=present
- with_items:
- - python-mysqldb
- - mysql-server
-
-- name: create mysql log directy
- file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755
-
-- name: update mysql my.cnf
- copy: src=my.cnf
- dest=/etc/mysql/my.cnf
- backup=yes
-
-- name: manually restart mysql server
- shell: service mysql restart
-
-- name: create database/user
- shell: /opt/data.sh
- tags:
- - mysql_user
diff --git a/compass/deploy/ansible/roles/database/templates/data.j2 b/compass/deploy/ansible/roles/database/templates/data.j2
deleted file mode 100644
index c894b32..0000000
--- a/compass/deploy/ansible/roles/database/templates/data.j2
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/sh
-mysql -uroot -Dmysql <<EOF
-drop database if exists keystone;
-drop database if exists glance;
-drop database if exists neutron;
-drop database if exists nova;
-drop database if exists cinder;
-
-CREATE DATABASE keystone;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON keystone.* TO 'keystone'@'{{ host }}' IDENTIFIED BY '{{ KEYSTONE_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE glance;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON glance.* TO 'glance'@'{{ host }}' IDENTIFIED BY '{{ GLANCE_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE neutron;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON neutron.* TO 'neutron'@'{{ host }}' IDENTIFIED BY '{{ NEUTRON_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE nova;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON nova.* TO 'nova'@'{{ host }}' IDENTIFIED BY '{{ NOVA_DBPASS }}';
-{% endfor %}
-
-CREATE DATABASE cinder;
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON cinder.* TO 'cinder'@'{{ host }}' IDENTIFIED BY '{{ CINDER_DBPASS }}';
-{% endfor %}
-
-{% if WSREP_SST_USER is defined %}
-{% for host in ['%', 'localhost', inventory_hostname] %}
-GRANT ALL ON *.* TO '{{ WSREP_SST_USER }}'@'{{ host }}' IDENTIFIED BY '{{ WSREP_SST_PASS }}';
-{% endfor %}
-{% endif %}
-EOF
diff --git a/compass/deploy/ansible/roles/database/templates/my.cnf b/compass/deploy/ansible/roles/database/templates/my.cnf
deleted file mode 100644
index 165d619..0000000
--- a/compass/deploy/ansible/roles/database/templates/my.cnf
+++ /dev/null
@@ -1,134 +0,0 @@
-#
-# The MySQL database server configuration file.
-#
-# You can copy this to one of:
-# - "/etc/mysql/my.cnf" to set global options,
-# - "~/.my.cnf" to set user-specific options.
-#
-# One can use all long options that the program supports.
-# Run program with --help to get a list of available options and with
-# --print-defaults to see which it would actually understand and use.
-#
-# For explanations see
-# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
-
-# This will be passed to all mysql clients
-# It has been reported that passwords should be enclosed with ticks/quotes
-# escpecially if they contain "#" chars...
-# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
-[client]
-port = 3306
-socket = /var/run/mysqld/mysqld.sock
-
-# Here is entries for some specific programs
-# The following values assume you have at least 32M ram
-
-# This was formally known as [safe_mysqld]. Both versions are currently parsed.
-[mysqld_safe]
-socket = /var/run/mysqld/mysqld.sock
-nice = 0
-
-[mysqld]
-#
-# * Basic Settings
-#
-user = mysql
-pid-file = /var/run/mysqld/mysqld.pid
-socket = /var/run/mysqld/mysqld.sock
-port = 3306
-basedir = /usr
-datadir = /var/lib/mysql
-tmpdir = /tmp
-lc-messages-dir = /usr/share/mysql
-skip-external-locking
-skip-name-resolve
-#
-# Instead of skip-networking the default is now to listen only on
-# localhost which is more compatible and is not less secure.
-#bind-address = {{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}
-bind-address = {{ HA_VIP }}
-#
-# * Fine Tuning
-#
-key_buffer = 16M
-max_allowed_packet = 16M
-thread_stack = 192K
-thread_cache_size = 8
-# This replaces the startup script and checks MyISAM tables if needed
-# the first time they are touched
-myisam-recover = BACKUP
-max_connections = 2000
-max_connect_errors = 8000
-#table_cache = 64
-#thread_concurrency = 10
-#
-# * Query Cache Configuration
-#
-query_cache_limit = 1M
-query_cache_size = 16M
-#
-# * Logging and Replication
-#
-# Both location gets rotated by the cronjob.
-# Be aware that this log type is a performance killer.
-# As of 5.1 you can enable the log at runtime!
-general_log_file = /var/log/mysql/mysql.log
-#general_log = 1
-#
-# Error log - should be very few entries.
-#
-log_error = /var/log/mysql/error.log
-#
-# Here you can see queries with especially long duration
-#log_slow_queries = /var/log/mysql/mysql-slow.log
-#long_query_time = 2
-#log-queries-not-using-indexes
-#
-# The following can be used as easy to replay backup logs or for replication.
-# note: if you are setting up a replication slave, see README.Debian about
-# other settings you may need to change.
-#server-id = 1
-#log_bin = /var/log/mysql/mysql-bin.log
-expire_logs_days = 10
-max_binlog_size = 100M
-#binlog_do_db = include_database_name
-#binlog_ignore_db = include_database_name
-#
-# * InnoDB
-#
-# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
-# Read the manual for more InnoDB related options. There are many!
-#
-# * Security Features
-#
-# Read the manual, too, if you want chroot!
-# chroot = /var/lib/mysql/
-#
-# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
-#
-# ssl-ca=/etc/mysql/cacert.pem
-# ssl-cert=/etc/mysql/server-cert.pem
-# ssl-key=/etc/mysql/server-key.pem
-default-storage-engine = innodb
-innodb_file_per_table
-collation-server = utf8_general_ci
-init-connect = 'SET NAMES utf8'
-character-set-server = utf8
-
-[mysqldump]
-quick
-quote-names
-max_allowed_packet = 16M
-
-[mysql]
-#no-auto-rehash # faster start of mysql but no tab completition
-
-[isamchk]
-key_buffer = 16M
-
-#
-# * IMPORTANT: Additional settings that can override those from this file!
-# The files must end with '.cnf', otherwise they'll be ignored.
-#
-!includedir /etc/mysql/conf.d/
-
diff --git a/compass/deploy/ansible/roles/database/templates/wsrep.cnf b/compass/deploy/ansible/roles/database/templates/wsrep.cnf
deleted file mode 100644
index b9e9424..0000000
--- a/compass/deploy/ansible/roles/database/templates/wsrep.cnf
+++ /dev/null
@@ -1,126 +0,0 @@
-# This file contains wsrep-related mysqld options. It should be included
-# in the main MySQL configuration file.
-#
-# Options that need to be customized:
-# - wsrep_provider
-# - wsrep_cluster_address
-# - wsrep_sst_auth
-# The rest of defaults should work out of the box.
-
-##
-## mysqld options _MANDATORY_ for correct opration of the cluster
-##
-[mysqld]
-
-# (This must be substituted by wsrep_format)
-binlog_format=ROW
-
-# Currently only InnoDB storage engine is supported
-default-storage-engine=innodb
-
-# to avoid issues with 'bulk mode inserts' using autoinc
-innodb_autoinc_lock_mode=2
-
-# This is a must for paralell applying
-innodb_locks_unsafe_for_binlog=1
-
-# Query Cache is not supported with wsrep
-query_cache_size=0
-query_cache_type=0
-
-# Override bind-address
-# In some systems bind-address defaults to 127.0.0.1, and with mysqldump SST
-# it will have (most likely) disastrous consequences on donor node
-#bind-address={{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}
-bind-address={{ HA_VIP }}
-
-##
-## WSREP options
-##
-
-# Full path to wsrep provider library or 'none'
-wsrep_provider=/usr/lib/galera/libgalera_smm.so
-
-# Provider specific configuration options
-#wsrep_provider_options=
-
-# Logical cluster name. Should be the same for all nodes.
-wsrep_cluster_name="my_wsrep_cluster"
-
-# Group communication system handle
-wsrep_cluster_address=gcomm://{{ HA_CLUSTER[inventory_hostname] }}
-
-# Human-readable node name (non-unique). Hostname by default.
-#wsrep_node_name=
-
-# Base replication <address|hostname>[:port] of the node.
-# The values supplied will be used as defaults for state transfer receiving,
-# listening ports and so on. Default: address of the first network interface.
-wsrep_node_address={{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}
-
-# Address for incoming client connections. Autodetect by default.
-#wsrep_node_incoming_address=
-
-# How many threads will process writesets from other nodes
-wsrep_slave_threads=1
-
-# DBUG options for wsrep provider
-#wsrep_dbug_option
-
-# Generate fake primary keys for non-PK tables (required for multi-master
-# and parallel applying operation)
-wsrep_certify_nonPK=1
-
-# Maximum number of rows in write set
-wsrep_max_ws_rows=131072
-
-# Maximum size of write set
-wsrep_max_ws_size=1073741824
-
-# to enable debug level logging, set this to 1
-wsrep_debug=1
-
-# convert locking sessions into transactions
-wsrep_convert_LOCK_to_trx=0
-
-# how many times to retry deadlocked autocommits
-wsrep_retry_autocommit=1
-
-# change auto_increment_increment and auto_increment_offset automatically
-wsrep_auto_increment_control=1
-
-# retry autoinc insert, which failed for duplicate key error
-wsrep_drupal_282555_workaround=0
-
-# enable "strictly synchronous" semantics for read operations
-wsrep_causal_reads=0
-
-# Command to call when node status or cluster membership changes.
-# Will be passed all or some of the following options:
-# --status - new status of this node
-# --uuid - UUID of the cluster
-# --primary - whether the component is primary or not ("yes"/"no")
-# --members - comma-separated list of members
-# --index - index of this node in the list
-wsrep_notify_cmd=
-
-##
-## WSREP State Transfer options
-##
-
-# State Snapshot Transfer method
-wsrep_sst_method=rsync
-
-# Address on THIS node to receive SST at. DON'T SET IT TO DONOR ADDRESS!!!
-# (SST method dependent. Defaults to the first IP of the first interface)
-#wsrep_sst_receive_address=
-
-# SST authentication string. This will be used to send SST to joining nodes.
-# Depends on SST method. For mysqldump method it is root:<root password>
-wsrep_sst_auth={{ WSREP_SST_USER }}:{{ WSREP_SST_PASS }}
-
-# Desired SST donor name.
-#wsrep_sst_donor=
-
-# Protocol version to use
-# wsrep_protocol_version=
diff --git a/compass/deploy/ansible/roles/glance/handlers/main.yml b/compass/deploy/ansible/roles/glance/handlers/main.yml
deleted file mode 100644
index d8eaa44..0000000
--- a/compass/deploy/ansible/roles/glance/handlers/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: restart glance-api
- service: name=glance-api state=restarted enabled=yes
-
-- name: restart glance-registry
- service: name=glance-registry state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/glance/tasks/glance_config.yml b/compass/deploy/ansible/roles/glance/tasks/glance_config.yml
deleted file mode 100644
index 28392a3..0000000
--- a/compass/deploy/ansible/roles/glance/tasks/glance_config.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-- name: init glance db version
- shell: glance-manage db_version_control 0
-
-- name: sync glance db
- shell: sleep 15; su -s /bin/sh -c "glance-manage db_sync" glance
- register: result
- until: result.rc == 0
- retries: 5
- delay: 3
- notify:
- - restart glance-registry
- - restart glance-api
-
-- meta: flush_handlers
-
-- name: place image upload script
- template: src=image_upload.sh dest=/opt/image_upload.sh mode=0744
-
-- name: download cirros image file
- get_url: url={{ build_in_image }} dest=/opt/{{ build_in_image_name }}
-
-- name: wait for 9292 port to become available
- wait_for: host={{ image_host }} port=9292 delay=5
-
-- name: run image upload
- shell: for i in {0..5}; do /opt/image_upload.sh && touch image_upload_completed; if [ $? != 0 ] ;then sleep 5; else break;fi;done
- args:
- creates: image_upload_completed
diff --git a/compass/deploy/ansible/roles/glance/tasks/glance_install.yml b/compass/deploy/ansible/roles/glance/tasks/glance_install.yml
deleted file mode 100644
index 505b3b0..0000000
--- a/compass/deploy/ansible/roles/glance/tasks/glance_install.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: install glance packages
- apt: name={{ item }} state=latest force=yes
- with_items:
- - glance
- - python-glanceclient
-
-- name: generate glance service list
- shell: echo {{ item }} >> /opt/service
- with_items:
- - glance-registry
- - glance-api
-
-- name: update glance conf
- template: src={{ item }} dest=/etc/glance/{{ item }}
- backup=yes
- with_items:
- - glance-api.conf
- - glance-registry.conf
- notify:
- - restart glance-registry
- - restart glance-api
-
-- name: remove default sqlite db
- shell: rm /var/lib/glance/glance.sqlite || touch glance.sqllite.db.removed
-
diff --git a/compass/deploy/ansible/roles/glance/tasks/main.yml b/compass/deploy/ansible/roles/glance/tasks/main.yml
deleted file mode 100644
index 296f0dc..0000000
--- a/compass/deploy/ansible/roles/glance/tasks/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- include: glance_install.yml
- tags:
- - install
- - glance_install
- - glance
-
-- include: nfs.yml
- tags:
- - nfs
-
-- include: glance_config.yml
- when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
- tags:
- - config
- - glance_config
- - glance
-
diff --git a/compass/deploy/ansible/roles/glance/tasks/nfs.yml b/compass/deploy/ansible/roles/glance/tasks/nfs.yml
deleted file mode 100644
index c03ab4d..0000000
--- a/compass/deploy/ansible/roles/glance/tasks/nfs.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: get nfs server
- local_action: shell /sbin/ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6| grep "10" -m 1 |awk '{print $2}'|tr -d "addr:"
- register: ip_info
- run_once: True
-
-- name: install nfs
- local_action: yum name=nfs-utils state=present
- run_once: True
-
-- name: create image directory
- local_action: file path=/opt/images state=directory mode=0777
- run_once: True
-
-- name: update nfs config
- local_action: lineinfile dest=/etc/exports state=present
- regexp="/opt/images *(rw,insecure,sync,all_squash)"
- line="/opt/images *(rw,insecure,sync,all_squash)"
- run_once: True
-
-- name: restart nfs service
- local_action: service name=nfs state=restarted enabled=yes
- run_once: True
-
-- name: install nfs comm
- apt: name=nfs-common state=present
-
-- name: get mount info
- command: mount
- register: mount_info
-
-- name: mount image directory
- shell: |
- mount -t nfs -onfsvers=3 {{ item }}:/opt/images /var/lib/glance/images
- sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
- echo {{ item }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab
- when: mount_info.stdout.find('images') == -1
- with_items:
- ip_info.stdout_lines
- retries: 5
- delay: 3
diff --git a/compass/deploy/ansible/roles/glance/templates/glance-api.conf b/compass/deploy/ansible/roles/glance/templates/glance-api.conf
deleted file mode 100644
index 763539e..0000000
--- a/compass/deploy/ansible/roles/glance/templates/glance-api.conf
+++ /dev/null
@@ -1,677 +0,0 @@
-[DEFAULT]
-# Show more verbose log output (sets INFO log level output)
-#verbose = False
-
-# Show debugging output in logs (sets DEBUG log level output)
-#debug = False
-
-# Which backend scheme should Glance use by default is not specified
-# in a request to add a new image to Glance? Known schemes are determined
-# by the known_stores option below.
-# Default: 'file'
-# "default_store" option has been moved to [glance_store] section in
-# Juno release
-
-# List of which store classes and store class locations are
-# currently known to glance at startup.
-# Existing but disabled stores:
-# glance.store.rbd.Store,
-# glance.store.s3.Store,
-# glance.store.swift.Store,
-# glance.store.sheepdog.Store,
-# glance.store.cinder.Store,
-# glance.store.gridfs.Store,
-# glance.store.vmware_datastore.Store,
-#known_stores = glance.store.filesystem.Store,
-# glance.store.http.Store
-
-
-# Maximum image size (in bytes) that may be uploaded through the
-# Glance API server. Defaults to 1 TB.
-# WARNING: this value should only be increased after careful consideration
-# and must be set to a value under 8 EB (9223372036854775808).
-#image_size_cap = 1099511627776
-
-# Address to bind the API server
-bind_host = {{ image_host }}
-
-# Port the bind the API server to
-bind_port = 9292
-
-# Log to this file. Make sure you do not set the same log file for both the API
-# and registry servers!
-#
-# If `log_file` is omitted and `use_syslog` is false, then log messages are
-# sent to stdout as a fallback.
-log_file = /var/log/glance/api.log
-
-# Backlog requests when creating socket
-backlog = 4096
-
-# TCP_KEEPIDLE value in seconds when creating socket.
-# Not supported on OS X.
-#tcp_keepidle = 600
-
-# API to use for accessing data. Default value points to sqlalchemy
-# package, it is also possible to use: glance.db.registry.api
-# data_api = glance.db.sqlalchemy.api
-
-# Number of Glance API worker processes to start.
-# On machines with more than one CPU increasing this value
-# may improve performance (especially if using SSL with
-# compression turned on). It is typically recommended to set
-# this value to the number of CPUs present on your machine.
-workers = 1
-
-# Maximum line size of message headers to be accepted.
-# max_header_line may need to be increased when using large tokens
-# (typically those generated by the Keystone v3 API with big service
-# catalogs)
-# max_header_line = 16384
-
-# Role used to identify an authenticated user as administrator
-#admin_role = admin
-
-# Allow unauthenticated users to access the API with read-only
-# privileges. This only applies when using ContextMiddleware.
-#allow_anonymous_access = False
-
-# Allow access to version 1 of glance api
-#enable_v1_api = True
-
-# Allow access to version 2 of glance api
-#enable_v2_api = True
-
-# Return the URL that references where the data is stored on
-# the backend storage system. For example, if using the
-# file system store a URL of 'file:///path/to/image' will
-# be returned to the user in the 'direct_url' meta-data field.
-# The default value is false.
-#show_image_direct_url = False
-
-# Send headers containing user and tenant information when making requests to
-# the v1 glance registry. This allows the registry to function as if a user is
-# authenticated without the need to authenticate a user itself using the
-# auth_token middleware.
-# The default value is false.
-#send_identity_headers = False
-
-# Supported values for the 'container_format' image attribute
-#container_formats=ami,ari,aki,bare,ovf,ova
-
-# Supported values for the 'disk_format' image attribute
-#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso
-
-# Directory to use for lock files. Default to a temp directory
-# (string value). This setting needs to be the same for both
-# glance-scrubber and glance-api.
-#lock_path=<None>
-
-# Property Protections config file
-# This file contains the rules for property protections and the roles/policies
-# associated with it.
-# If this config value is not specified, by default, property protections
-# won't be enforced.
-# If a value is specified and the file is not found, then the glance-api
-# service will not start.
-#property_protection_file =
-
-# Specify whether 'roles' or 'policies' are used in the
-# property_protection_file.
-# The default value for property_protection_rule_format is 'roles'.
-#property_protection_rule_format = roles
-
-# Specifies how long (in hours) a task is supposed to live in the tasks DB
-# after succeeding or failing before getting soft-deleted.
-# The default value for task_time_to_live is 48 hours.
-# task_time_to_live = 48
-
-# This value sets what strategy will be used to determine the image location
-# order. Currently two strategies are packaged with Glance 'location_order'
-# and 'store_type'.
-#location_strategy = location_order
-
-# ================= Syslog Options ============================
-
-# Send logs to syslog (/dev/log) instead of to file specified
-# by `log_file`
-#use_syslog = False
-
-# Facility to use. If unset defaults to LOG_USER.
-#syslog_log_facility = LOG_LOCAL0
-
-# ================= SSL Options ===============================
-
-# Certificate file to use when starting API server securely
-#cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-#key_file = /path/to/keyfile
-
-# CA certificate file to use to verify connecting clients
-#ca_file = /path/to/cafile
-
-# ================= Security Options ==========================
-
-# AES key for encrypting store 'location' metadata, including
-# -- if used -- Swift or S3 credentials
-# Should be set to a random string of length 16, 24 or 32 bytes
-#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
-
-# ============ Registry Options ===============================
-
-# Address to find the registry server
-registry_host = {{ internal_ip }}
-
-# Port the registry server is listening on
-registry_port = 9191
-
-# What protocol to use when connecting to the registry server?
-# Set to https for secure HTTP communication
-registry_client_protocol = http
-
-# The path to the key file to use in SSL connections to the
-# registry server, if any. Alternately, you may set the
-# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
-#registry_client_key_file = /path/to/key/file
-
-# The path to the cert file to use in SSL connections to the
-# registry server, if any. Alternately, you may set the
-# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
-#registry_client_cert_file = /path/to/cert/file
-
-# The path to the certifying authority cert file to use in SSL connections
-# to the registry server, if any. Alternately, you may set the
-# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
-#registry_client_ca_file = /path/to/ca/file
-
-# When using SSL in connections to the registry server, do not require
-# validation via a certifying authority. This is the registry's equivalent of
-# specifying --insecure on the command line using glanceclient for the API
-# Default: False
-#registry_client_insecure = False
-
-# The period of time, in seconds, that the API server will wait for a registry
-# request to complete. A value of '0' implies no timeout.
-# Default: 600
-#registry_client_timeout = 600
-
-# Whether to automatically create the database tables.
-# Default: False
-#db_auto_create = False
-
-# Enable DEBUG log messages from sqlalchemy which prints every database
-# query and response.
-# Default: False
-#sqlalchemy_debug = True
-
-# Pass the user's token through for API requests to the registry.
-# Default: True
-#use_user_token = True
-
-# If 'use_user_token' is not in effect then admin credentials
-# can be specified. Requests to the registry on behalf of
-# the API will use these credentials.
-# Admin user name
-#admin_user = None
-# Admin password
-#admin_password = None
-# Admin tenant name
-#admin_tenant_name = None
-# Keystone endpoint
-#auth_url = None
-# Keystone region
-#auth_region = None
-# Auth strategy
-#auth_strategy = keystone
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when images are create, updated or deleted.
-# There are three methods of sending notifications, logging (via the
-# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid
-# message queue), or noop (no notifications sent, the default)
-# NOTE: THIS CONFIGURATION OPTION HAS BEEN DEPRECATED IN FAVOR OF `notification_driver`
-# notifier_strategy = default
-
-# Driver or drivers to handle sending notifications
-# notification_driver = noop
-
-# Default publisher_id for outgoing notifications.
-# default_publisher_id = image.localhost
-
-# Configuration options if sending notifications via rabbitmq (these are
-# the defaults)
-rabbit_host = localhost
-rabbit_port = 5672
-rabbit_use_ssl = false
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-rabbit_virtual_host = /
-rabbit_notification_exchange = glance
-rabbit_notification_topic = notifications
-rabbit_durable_queues = False
-
-# Configuration options if sending notifications via Qpid (these are
-# the defaults)
-qpid_notification_exchange = glance
-qpid_notification_topic = notifications
-qpid_hostname = localhost
-qpid_port = 5672
-qpid_username =
-qpid_password =
-qpid_sasl_mechanisms =
-qpid_reconnect_timeout = 0
-qpid_reconnect_limit = 0
-qpid_reconnect_interval_min = 0
-qpid_reconnect_interval_max = 0
-qpid_reconnect_interval = 0
-qpid_heartbeat = 5
-# Set to 'ssl' to enable SSL
-qpid_protocol = tcp
-qpid_tcp_nodelay = True
-
-# ============ Filesystem Store Options ========================
-
-# Directory that the Filesystem backend store
-# writes image data to
-# this option has been moved to [glance_store] for Juno release
-# filesystem_store_datadir = /var/lib/glance/images/
-
-# A list of directories where image data can be stored.
-# This option may be specified multiple times for specifying multiple store
-# directories. Either one of filesystem_store_datadirs or
-# filesystem_store_datadir option is required. A priority number may be given
-# after each directory entry, separated by a ":".
-# When adding an image, the highest priority directory will be selected, unless
-# there is not enough space available in cases where the image size is already
-# known. If no priority is given, it is assumed to be zero and the directory
-# will be considered for selection last. If multiple directories have the same
-# priority, then the one with the most free space available is selected.
-# If same store is specified multiple times then BadStoreConfiguration
-# exception will be raised.
-#filesystem_store_datadirs = /var/lib/glance/images/:1
-
-# A path to a JSON file that contains metadata describing the storage
-# system. When show_multiple_locations is True the information in this
-# file will be returned with any location that is contained in this
-# store.
-#filesystem_store_metadata_file = None
-
-# ============ Swift Store Options =============================
-
-# Version of the authentication service to use
-# Valid versions are '2' for keystone and '1' for swauth and rackspace
-swift_store_auth_version = 2
-
-# Address where the Swift authentication service lives
-# Valid schemes are 'http://' and 'https://'
-# If no scheme specified, default to 'https://'
-# For swauth, use something like '127.0.0.1:8080/v1.0/'
-swift_store_auth_address = 127.0.0.1:5000/v2.0/
-
-# User to authenticate against the Swift authentication service
-# If you use Swift authentication service, set it to 'account':'user'
-# where 'account' is a Swift storage account and 'user'
-# is a user in that account
-swift_store_user = jdoe:jdoe
-
-# Auth key for the user authenticating against the
-# Swift authentication service
-swift_store_key = a86850deb2742ec3cb41518e26aa2d89
-
-# Container within the account that the account should use
-# for storing images in Swift
-swift_store_container = glance
-
-# Do we create the container if it does not exist?
-swift_store_create_container_on_put = False
-
-# What size, in MB, should Glance start chunking image files
-# and do a large object manifest in Swift? By default, this is
-# the maximum object size in Swift, which is 5GB
-swift_store_large_object_size = 5120
-
-# When doing a large object manifest, what size, in MB, should
-# Glance write chunks to Swift? This amount of data is written
-# to a temporary disk buffer during the process of chunking
-# the image file, and the default is 200MB
-swift_store_large_object_chunk_size = 200
-
-# Whether to use ServiceNET to communicate with the Swift storage servers.
-# (If you aren't RACKSPACE, leave this False!)
-#
-# To use ServiceNET for authentication, prefix hostname of
-# `swift_store_auth_address` with 'snet-'.
-# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
-swift_enable_snet = False
-
-# If set to True enables multi-tenant storage mode which causes Glance images
-# to be stored in tenant specific Swift accounts.
-#swift_store_multi_tenant = False
-
-# A list of swift ACL strings that will be applied as both read and
-# write ACLs to the containers created by Glance in multi-tenant
-# mode. This grants the specified tenants/users read and write access
-# to all newly created image objects. The standard swift ACL string
-# formats are allowed, including:
-# <tenant_id>:<username>
-# <tenant_name>:<username>
-# *:<username>
-# Multiple ACLs can be combined using a comma separated list, for
-# example: swift_store_admin_tenants = service:glance,*:admin
-#swift_store_admin_tenants =
-
-# The region of the swift endpoint to be used for single tenant. This setting
-# is only necessary if the tenant has multiple swift endpoints.
-#swift_store_region =
-
-# If set to False, disables SSL layer compression of https swift requests.
-# Setting to 'False' may improve performance for images which are already
-# in a compressed format, eg qcow2. If set to True, enables SSL layer
-# compression (provided it is supported by the target swift proxy).
-#swift_store_ssl_compression = True
-
-# The number of times a Swift download will be retried before the
-# request fails
-#swift_store_retry_get_count = 0
-
-# ============ S3 Store Options =============================
-
-# Address where the S3 authentication service lives
-# Valid schemes are 'http://' and 'https://'
-# If no scheme specified, default to 'http://'
-s3_store_host = 127.0.0.1:8080/v1.0/
-
-# User to authenticate against the S3 authentication service
-s3_store_access_key = <20-char AWS access key>
-
-# Auth key for the user authenticating against the
-# S3 authentication service
-s3_store_secret_key = <40-char AWS secret key>
-
-# Container within the account that the account should use
-# for storing images in S3. Note that S3 has a flat namespace,
-# so you need a unique bucket name for your glance images. An
-# easy way to do this is append your AWS access key to "glance".
-# S3 buckets in AWS *must* be lowercased, so remember to lowercase
-# your AWS access key if you use it in your bucket name below!
-s3_store_bucket = <lowercased 20-char aws access key>glance
-
-# Do we create the bucket if it does not exist?
-s3_store_create_bucket_on_put = False
-
-# When sending images to S3, the data will first be written to a
-# temporary buffer on disk. By default the platform's temporary directory
-# will be used. If required, an alternative directory can be specified here.
-#s3_store_object_buffer_dir = /path/to/dir
-
-# When forming a bucket url, boto will either set the bucket name as the
-# subdomain or as the first token of the path. Amazon's S3 service will
-# accept it as the subdomain, but Swift's S3 middleware requires it be
-# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'.
-#s3_store_bucket_url_format = subdomain
-
-# ============ RBD Store Options =============================
-
-# Ceph configuration file path
-# If using cephx authentication, this file should
-# include a reference to the right keyring
-# in a client.<USER> section
-#rbd_store_ceph_conf = /etc/ceph/ceph.conf
-
-# RADOS user to authenticate as (only applicable if using cephx)
-# If <None>, a default will be chosen based on the client. section
-# in rbd_store_ceph_conf
-#rbd_store_user = <None>
-
-# RADOS pool in which images are stored
-#rbd_store_pool = images
-
-# RADOS images will be chunked into objects of this size (in megabytes).
-# For best performance, this should be a power of two
-#rbd_store_chunk_size = 8
-
-# ============ Sheepdog Store Options =============================
-
-sheepdog_store_address = localhost
-
-sheepdog_store_port = 7000
-
-# Images will be chunked into objects of this size (in megabytes).
-# For best performance, this should be a power of two
-sheepdog_store_chunk_size = 64
-
-# ============ Cinder Store Options ===============================
-
-# Info to match when looking for cinder in the service catalog
-# Format is : separated values of the form:
-# <service_type>:<service_name>:<endpoint_type> (string value)
-#cinder_catalog_info = volume:cinder:publicURL
-
-# Override service catalog lookup with template for cinder endpoint
-# e.g. http://localhost:8776/v1/%(project_id)s (string value)
-#cinder_endpoint_template = <None>
-
-# Region name of this node (string value)
-#os_region_name = <None>
-
-# Location of ca certicates file to use for cinder client requests
-# (string value)
-#cinder_ca_certificates_file = <None>
-
-# Number of cinderclient retries on failed http calls (integer value)
-#cinder_http_retries = 3
-
-# Allow to perform insecure SSL requests to cinder (boolean value)
-#cinder_api_insecure = False
-
-# ============ VMware Datastore Store Options =====================
-
-# ESX/ESXi or vCenter Server target system.
-# The server value can be an IP address or a DNS name
-# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com
-#vmware_server_host = <None>
-
-# Server username (string value)
-#vmware_server_username = <None>
-
-# Server password (string value)
-#vmware_server_password = <None>
-
-# Inventory path to a datacenter (string value)
-# Value optional when vmware_server_ip is an ESX/ESXi host: if specified
-# should be `ha-datacenter`.
-#vmware_datacenter_path = <None>
-
-# Datastore associated with the datacenter (string value)
-#vmware_datastore_name = <None>
-
-# The number of times we retry on failures
-# e.g., socket error, etc (integer value)
-#vmware_api_retry_count = 10
-
-# The interval used for polling remote tasks
-# invoked on VMware ESX/VC server in seconds (integer value)
-#vmware_task_poll_interval = 5
-
-# Absolute path of the folder containing the images in the datastore
-# (string value)
-#vmware_store_image_dir = /openstack_glance
-
-# Allow to perform insecure SSL requests to the target system (boolean value)
-#vmware_api_insecure = False
-
-# ============ Delayed Delete Options =============================
-
-# Turn on/off delayed delete
-delayed_delete = False
-
-# Delayed delete time in seconds
-scrub_time = 43200
-
-# Directory that the scrubber will use to remind itself of what to delete
-# Make sure this is also set in glance-scrubber.conf
-scrubber_datadir = /var/lib/glance/scrubber
-
-# =============== Quota Options ==================================
-
-# The maximum number of image members allowed per image
-#image_member_quota = 128
-
-# The maximum number of image properties allowed per image
-#image_property_quota = 128
-
-# The maximum number of tags allowed per image
-#image_tag_quota = 128
-
-# The maximum number of locations allowed per image
-#image_location_quota = 10
-
-# Set a system wide quota for every user. This value is the total number
-# of bytes that a user can use across all storage systems. A value of
-# 0 means unlimited.
-#user_storage_quota = 0
-
-# =============== Image Cache Options =============================
-
-# Base directory that the Image Cache uses
-image_cache_dir = /var/lib/glance/image-cache/
-
-# =============== Manager Options =================================
-
-# DEPRECATED. TO BE REMOVED IN THE JUNO RELEASE.
-# Whether or not to enforce that all DB tables have charset utf8.
-# If your database tables do not have charset utf8 you will
-# need to convert before this option is removed. This option is
-# only relevant if your database engine is MySQL.
-#db_enforce_mysql_charset = True
-
-# =============== Glance Store ====================================
-[glance_store]
-# Moved from [DEFAULT], for Juno release
-default_store = file
-filesystem_store_datadir = /var/lib/glance/images/
-
-# =============== Database Options =================================
-
-[database]
-# The file name to use with SQLite (string value)
-sqlite_db = /var/lib/glance/glance.sqlite
-
-# If True, SQLite uses synchronous mode (boolean value)
-#sqlite_synchronous = True
-
-# The backend to use for db (string value)
-# Deprecated group/name - [DEFAULT]/db_backend
-backend = sqlalchemy
-
-# The SQLAlchemy connection string used to connect to the
-# database (string value)
-# Deprecated group/name - [DEFAULT]/sql_connection
-# Deprecated group/name - [DATABASE]/sql_connection
-# Deprecated group/name - [sql]/connection
-#connection = <None>
-connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance
-
-# The SQL mode to be used for MySQL sessions. This option,
-# including the default, overrides any server-set SQL mode. To
-# use whatever SQL mode is set by the server configuration,
-# set this to no value. Example: mysql_sql_mode= (string
-# value)
-#mysql_sql_mode = TRADITIONAL
-
-# Timeout before idle sql connections are reaped (integer
-# value)
-# Deprecated group/name - [DEFAULT]/sql_idle_timeout
-# Deprecated group/name - [DATABASE]/sql_idle_timeout
-# Deprecated group/name - [sql]/idle_timeout
-#idle_timeout = 3600
-
-# Minimum number of SQL connections to keep open in a pool
-# (integer value)
-# Deprecated group/name - [DEFAULT]/sql_min_pool_size
-# Deprecated group/name - [DATABASE]/sql_min_pool_size
-#min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-# (integer value)
-# Deprecated group/name - [DEFAULT]/sql_max_pool_size
-# Deprecated group/name - [DATABASE]/sql_max_pool_size
-#max_pool_size = <None>
-
-# Maximum db connection retries during startup. (setting -1
-# implies an infinite retry count) (integer value)
-# Deprecated group/name - [DEFAULT]/sql_max_retries
-# Deprecated group/name - [DATABASE]/sql_max_retries
-#max_retries = 10
-
-# Interval between retries of opening a sql connection
-# (integer value)
-# Deprecated group/name - [DEFAULT]/sql_retry_interval
-# Deprecated group/name - [DATABASE]/reconnect_interval
-#retry_interval = 10
-
-# If set, use this value for max_overflow with sqlalchemy
-# (integer value)
-# Deprecated group/name - [DEFAULT]/sql_max_overflow
-# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
-#max_overflow = <None>
-
-# Verbosity of SQL debugging information. 0=None,
-# 100=Everything (integer value)
-# Deprecated group/name - [DEFAULT]/sql_connection_debug
-#connection_debug = 0
-
-# Add python stack traces to SQL as comment strings (boolean
-# value)
-# Deprecated group/name - [DEFAULT]/sql_connection_trace
-#connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-# (integer value)
-# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
-#pool_timeout = <None>
-
-# Enable the experimental use of database reconnect on
-# connection lost (boolean value)
-#use_db_reconnect = False
-
-# seconds between db connection retries (integer value)
-#db_retry_interval = 1
-
-# Whether to increase interval between db connection retries,
-# up to db_max_retry_interval (boolean value)
-#db_inc_retry_interval = True
-
-# max seconds between db connection retries, if
-# db_inc_retry_interval is enabled (integer value)
-#db_max_retry_interval = 10
-
-# maximum db connection retries before error is raised.
-# (setting -1 implies an infinite retry count) (integer value)
-#db_max_retries = 20
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = glance
-admin_password = {{ GLANCE_PASS }}
-
-[paste_deploy]
-# Name of the paste configuration file that defines the available pipelines
-#config_file = glance-api-paste.ini
-
-# Partial name of a pipeline in your paste configuration file with the
-# service name removed. For example, if your paste section name is
-# [pipeline:glance-api-keystone], you would configure the flavor below
-# as 'keystone'.
-flavor= keystone
-
-[store_type_location_strategy]
-# The scheme list to use to get store preference order. The scheme must be
-# registered by one of the stores defined by the 'known_stores' config option.
-# This option will be applied when you using 'store_type' option as image
-# location strategy defined by the 'location_strategy' config option.
-#store_type_preference =
diff --git a/compass/deploy/ansible/roles/glance/templates/glance-registry.conf b/compass/deploy/ansible/roles/glance/templates/glance-registry.conf
deleted file mode 100644
index 8d731a2..0000000
--- a/compass/deploy/ansible/roles/glance/templates/glance-registry.conf
+++ /dev/null
@@ -1,190 +0,0 @@
-[DEFAULT]
-# Show more verbose log output (sets INFO log level output)
-#verbose = False
-
-# Show debugging output in logs (sets DEBUG log level output)
-#debug = False
-
-# Address to bind the registry server
-bind_host = {{ internal_ip }}
-
-# Port the bind the registry server to
-bind_port = 9191
-
-# Log to this file. Make sure you do not set the same log file for both the API
-# and registry servers!
-#
-# If `log_file` is omitted and `use_syslog` is false, then log messages are
-# sent to stdout as a fallback.
-log_file = /var/log/glance/registry.log
-
-# Backlog requests when creating socket
-backlog = 4096
-
-# TCP_KEEPIDLE value in seconds when creating socket.
-# Not supported on OS X.
-#tcp_keepidle = 600
-
-# API to use for accessing data. Default value points to sqlalchemy
-# package.
-#data_api = glance.db.sqlalchemy.api
-
-# Enable Registry API versions individually or simultaneously
-#enable_v1_registry = True
-#enable_v2_registry = True
-
-# Limit the api to return `param_limit_max` items in a call to a container. If
-# a larger `limit` query param is provided, it will be reduced to this value.
-api_limit_max = 1000
-
-# If a `limit` query param is not provided in an api request, it will
-# default to `limit_param_default`
-limit_param_default = 25
-
-# Role used to identify an authenticated user as administrator
-#admin_role = admin
-
-# Whether to automatically create the database tables.
-# Default: False
-#db_auto_create = False
-
-# Enable DEBUG log messages from sqlalchemy which prints every database
-# query and response.
-# Default: False
-#sqlalchemy_debug = True
-
-# ================= Syslog Options ============================
-
-# Send logs to syslog (/dev/log) instead of to file specified
-# by `log_file`
-#use_syslog = False
-
-# Facility to use. If unset defaults to LOG_USER.
-#syslog_log_facility = LOG_LOCAL1
-
-# ================= SSL Options ===============================
-
-# Certificate file to use when starting registry server securely
-#cert_file = /path/to/certfile
-
-# Private key file to use when starting registry server securely
-#key_file = /path/to/keyfile
-
-# CA certificate file to use to verify connecting clients
-#ca_file = /path/to/cafile
-
-# ================= Database Options ==========================
-
-[database]
-# The file name to use with SQLite (string value)
-sqlite_db = /var/lib/glance/glance.sqlite
-
-# If True, SQLite uses synchronous mode (boolean value)
-#sqlite_synchronous = True
-
-# The backend to use for db (string value)
-# Deprecated group/name - [DEFAULT]/db_backend
-backend = sqlalchemy
-
-# The SQLAlchemy connection string used to connect to the
-# database (string value)
-# Deprecated group/name - [DEFAULT]/sql_connection
-# Deprecated group/name - [DATABASE]/sql_connection
-# Deprecated group/name - [sql]/connection
-#connection = <None>
-connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance
-
-# The SQL mode to be used for MySQL sessions. This option,
-# including the default, overrides any server-set SQL mode. To
-# use whatever SQL mode is set by the server configuration,
-# set this to no value. Example: mysql_sql_mode= (string
-# value)
-#mysql_sql_mode = TRADITIONAL
-
-# Timeout before idle sql connections are reaped (integer
-# value)
-# Deprecated group/name - [DEFAULT]/sql_idle_timeout
-# Deprecated group/name - [DATABASE]/sql_idle_timeout
-# Deprecated group/name - [sql]/idle_timeout
-#idle_timeout = 3600
-
-# Minimum number of SQL connections to keep open in a pool
-# (integer value)
-# Deprecated group/name - [DEFAULT]/sql_min_pool_size
-# Deprecated group/name - [DATABASE]/sql_min_pool_size
-#min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-# (integer value)
-# Deprecated group/name - [DEFAULT]/sql_max_pool_size
-# Deprecated group/name - [DATABASE]/sql_max_pool_size
-#max_pool_size = <None>
-
-# Maximum db connection retries during startup. (setting -1
-# implies an infinite retry count) (integer value)
-# Deprecated group/name - [DEFAULT]/sql_max_retries
-# Deprecated group/name - [DATABASE]/sql_max_retries
-#max_retries = 10
-
-# Interval between retries of opening a sql connection
-# (integer value)
-# Deprecated group/name - [DEFAULT]/sql_retry_interval
-# Deprecated group/name - [DATABASE]/reconnect_interval
-#retry_interval = 10
-
-# If set, use this value for max_overflow with sqlalchemy
-# (integer value)
-# Deprecated group/name - [DEFAULT]/sql_max_overflow
-# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
-#max_overflow = <None>
-
-# Verbosity of SQL debugging information. 0=None,
-# 100=Everything (integer value)
-# Deprecated group/name - [DEFAULT]/sql_connection_debug
-#connection_debug = 0
-
-# Add python stack traces to SQL as comment strings (boolean
-# value)
-# Deprecated group/name - [DEFAULT]/sql_connection_trace
-#connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-# (integer value)
-# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
-#pool_timeout = <None>
-
-# Enable the experimental use of database reconnect on
-# connection lost (boolean value)
-#use_db_reconnect = False
-
-# seconds between db connection retries (integer value)
-#db_retry_interval = 1
-
-# Whether to increase interval between db connection retries,
-# up to db_max_retry_interval (boolean value)
-#db_inc_retry_interval = True
-
-# max seconds between db connection retries, if
-# db_inc_retry_interval is enabled (integer value)
-#db_max_retry_interval = 10
-
-# maximum db connection retries before error is raised.
-# (setting -1 implies an infinite retry count) (integer value)
-#db_max_retries = 20
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = glance
-admin_password = {{ GLANCE_PASS }}
-
-[paste_deploy]
-# Name of the paste configuration file that defines the available pipelines
-#config_file = glance-registry-paste.ini
-
-# Partial name of a pipeline in your paste configuration file with the
-# service name removed. For example, if your paste section name is
-# [pipeline:glance-registry-keystone], you would configure the flavor below
-# as 'keystone'.
-flavor= keystone
diff --git a/compass/deploy/ansible/roles/glance/templates/image_upload.sh b/compass/deploy/ansible/roles/glance/templates/image_upload.sh
deleted file mode 100644
index 9dd1fa8..0000000
--- a/compass/deploy/ansible/roles/glance/templates/image_upload.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-sleep 10
-glance --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ HA_VIP }}:35357/v2.0 image-create --name="cirros" --disk-format=qcow2 --container-format=bare --is-public=true < /opt/{{ build_in_image_name }} && touch glance.import.completed
diff --git a/compass/deploy/ansible/roles/ha/files/galera_chk b/compass/deploy/ansible/roles/ha/files/galera_chk
deleted file mode 100644
index 9fd165c..0000000
--- a/compass/deploy/ansible/roles/ha/files/galera_chk
+++ /dev/null
@@ -1,10 +0,0 @@
-#! /bin/sh
-
-code=`mysql -uroot -e "show status" | awk '/Threads_running/{print $2}'`
-
-if [ "$code"=="1" ]
-then
- echo "HTTP/1.1 200 OK\r\n"
-else
- echo "HTTP/1.1 503 Service Unavailable\r\n"
-fi
diff --git a/compass/deploy/ansible/roles/ha/files/mysqlchk b/compass/deploy/ansible/roles/ha/files/mysqlchk
deleted file mode 100644
index 2c03f19..0000000
--- a/compass/deploy/ansible/roles/ha/files/mysqlchk
+++ /dev/null
@@ -1,15 +0,0 @@
-# default: off
-# description: An xinetd internal service which echo's characters back to
-# clients.
-# This is the tcp version.
-service mysqlchk
-{
- disable = no
- flags = REUSE
- socket_type = stream
- protocol = tcp
- user = root
- wait = no
- server = /usr/local/bin/galera_chk
- port = 9200
-}
diff --git a/compass/deploy/ansible/roles/ha/files/notify.sh b/compass/deploy/ansible/roles/ha/files/notify.sh
deleted file mode 100644
index 5edffe8..0000000
--- a/compass/deploy/ansible/roles/ha/files/notify.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-python /usr/local/bin/failover.py $1
-mysql -uroot -e"flush hosts"
-service mysql restart
diff --git a/compass/deploy/ansible/roles/ha/handlers/main.yml b/compass/deploy/ansible/roles/ha/handlers/main.yml
deleted file mode 100644
index a02c686..0000000
--- a/compass/deploy/ansible/roles/ha/handlers/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- name: restart haproxy
- service: name=haproxy state=restarted enabled=yes
-
-- name: restart xinetd
- service: name=xinetd state=restarted enabled=yes
-
-- name: restart keepalived
- service: name=keepalived state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/ha/tasks/main.yml b/compass/deploy/ansible/roles/ha/tasks/main.yml
deleted file mode 100644
index a00c21a..0000000
--- a/compass/deploy/ansible/roles/ha/tasks/main.yml
+++ /dev/null
@@ -1,94 +0,0 @@
----
-- name: install keepalived xinet haproxy
- apt: name={{ item }} state=present
- with_items:
- - keepalived
- - xinetd
- - haproxy
-
-- name: generate ha service list
- shell: echo {{ item }} >> /opt/service
- with_items:
- - keepalived
- - xinetd
- - haproxy
-
-- name: install pexpect
- pip: name=pexpect state=present
-
-- name: activate ip_nonlocal_bind
- sysctl: name=net.ipv4.ip_nonlocal_bind value=1
- state=present reload=yes
-
-- name: set net.ipv4.tcp_keepalive_intvl
- sysctl: name=net.ipv4.tcp_keepalive_intvl value=1
- state=present reload=yes
-
-- name: set net.ipv4.tcp_keepalive_probes
- sysctl: name=net.ipv4.tcp_keepalive_probes value=5
- state=present reload=yes
-
-- name: set net.ipv4.tcp_keepalive_time
- sysctl: name=net.ipv4.tcp_keepalive_time value=5
- state=present reload=yes
-
-- name: update haproxy cfg
- template: src=haproxy.cfg dest=/etc/haproxy/haproxy.cfg
- notify: restart haproxy
-
-- name: set haproxy enable flag
- lineinfile: dest=/etc/default/haproxy state=present
- regexp="ENABLED=*"
- line="ENABLED=1"
- notify: restart haproxy
-
-- name: set haproxy log
- lineinfile: dest=/etc/rsyslog.conf state=present
- regexp="local0.* /var/log/haproxy.log"
- line="local0.* /var/log/haproxy.log"
-
-- name: set rsyslog udp module
- lineinfile: dest=/etc/rsyslog.conf state=present
- regexp="^#$ModLoad imudp"
- line="$ModLoad imudp"
-
-- name: set rsyslog udp port
- lineinfile: dest=/etc/rsyslog.conf state=present
- regexp="^#$UDPServerRun 514"
- line="$UDPServerRun 514"
-
-- name: copy galera_chk file
- copy: src=galera_chk dest=/usr/local/bin/galera_chk mode=0777
-
-- name: copy notify file
- copy: src=notify.sh dest=/usr/local/bin/notify.sh mode=0777
-
-- name: copy notify template file
- template: src=failover.j2 dest=/usr/local/bin/failover.py mode=0777
-
-- name: add network service
- lineinfile: dest=/etc/services state=present
- line="mysqlchk 9200/tcp"
- insertafter="Local services"
- notify: restart xinetd
-
-- name: copy mysqlchk file
- copy: src=mysqlchk dest=/etc/xinetd.d/mysqlchk mode=0777
- notify: restart xinetd
-
-- name: set keepalived start param
- lineinfile: dest=/etc/default/keepalived state=present
- regexp="^DAEMON_ARGS=*"
- line="DAEMON_ARGS=\"-D -d -S 1\""
-
-- name: set keepalived log
- lineinfile: dest=/etc/rsyslog.conf state=present
- regexp="local1.* /var/log/keepalived.log"
- line="local1.* /var/log/keepalived.log"
-
-- name: update keepalived info
- template: src=keepalived.conf dest=/etc/keepalived/keepalived.conf
- notify: restart keepalived
-
-- name: restart rsyslog
- shell: service rsyslog restart
diff --git a/compass/deploy/ansible/roles/ha/templates/failover.j2 b/compass/deploy/ansible/roles/ha/templates/failover.j2
deleted file mode 100644
index b03c737..0000000
--- a/compass/deploy/ansible/roles/ha/templates/failover.j2
+++ /dev/null
@@ -1,65 +0,0 @@
-import ConfigParser, os, socket
-import logging as LOG
-import pxssh
-import sys
-import re
-
-LOG_FILE="/var/log/mysql_failover"
-try:
- os.remove(LOG_FILE)
-except:
- pass
-
-LOG.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename=LOG_FILE,level=LOG.DEBUG)
-ha_vip = {{ HA_VIP }}
-LOG.info("ha_vip: %s" % ha_vip)
-
-#ha_vip = "10.1.0.50"
-galera_path = '/etc/mysql/conf.d/wsrep.cnf'
-pattern = re.compile(r"gcomm://(?P<prev_ip>.*)")
-
-def ssh_get_hostname(ip):
- try:
- s = pxssh.pxssh()
- s.login("%s" % ip, "root", "root")
- s.sendline('hostname') # run a command
- s.prompt() # match the prompt
- result = s.before.strip() # print everything before the prompt.
- return result.split(os.linesep)[1]
- except pxssh.ExceptionPxssh as e:
- LOG.error("pxssh failed on login.")
- raise
-
-def failover(mode):
- config = ConfigParser.ConfigParser()
- config.optionxform = str
- config.readfp(open(galera_path))
- wsrep_cluster_address = config.get("mysqld", "wsrep_cluster_address")
- wsrep_cluster_address = pattern.match(wsrep_cluster_address).groupdict()["prev_ip"]
-
- LOG.info("old wsrep_cluster_address = %s" % wsrep_cluster_address)
-
- if mode == "master":
- # refresh wsrep_cluster_address to null
- LOG.info("I'm being master, set wsrep_cluster_address to null")
- wsrep_cluster_address = ""
-
- elif mode == "backup":
- # refresh wsrep_cluster_address to master int ip
- hostname = ssh_get_hostname(ha_vip)
- wsrep_cluster_address = socket.gethostbyname(hostname)
- LOG.info("I'm being slave, set wsrep_cluster_address to master internal ip")
-
- LOG.info("new wsrep_cluster_address = %s" % wsrep_cluster_address)
- wsrep_cluster_address = "gcomm://%s" % wsrep_cluster_address
- config.set("mysqld", "wsrep_cluster_address", wsrep_cluster_address)
- with open(galera_path, 'wb') as fp:
- #config.write(sys.stdout)
- config.write(fp)
-
- os.system("service mysql restart")
- LOG.info("failover success!!!")
-
-if __name__ == "__main__":
- LOG.debug("call me: %s" % sys.argv)
- failover(sys.argv[1])
diff --git a/compass/deploy/ansible/roles/ha/templates/haproxy.cfg b/compass/deploy/ansible/roles/ha/templates/haproxy.cfg
deleted file mode 100644
index 4ed528a..0000000
--- a/compass/deploy/ansible/roles/ha/templates/haproxy.cfg
+++ /dev/null
@@ -1,133 +0,0 @@
-
-global
- #chroot /var/run/haproxy
- daemon
- user haproxy
- group haproxy
- maxconn 4000
- pidfile /var/run/haproxy/haproxy.pid
- #log 127.0.0.1 local0
- tune.bufsize 1000000
- stats socket /var/run/haproxy.sock
- stats timeout 2m
-
-defaults
- log global
- maxconn 8000
- option redispatch
- option dontlognull
- option splice-auto
- timeout http-request 10s
- timeout queue 1m
- timeout connect 10s
- timeout client 6m
- timeout server 6m
- timeout check 10s
- retries 5
-
-listen proxy-glance_registry_cluster
- bind {{ HA_VIP }}:9191
- option tcpka
- option tcplog
- balance source
-{% for host in groups['controller'] %}
- server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9191 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-glance_api_cluster
- bind {{ HA_VIP }}:9292
- option tcpka
- option httpchk
- option tcplog
- balance source
-{% for host in groups['controller'] %}
- server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9292 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-nova-novncproxy
- bind {{ HA_VIP }}:6080
- option tcpka
- option tcplog
- balance source
-{% for host in groups['controller'] %}
- server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:6080 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-network
- bind {{ HA_VIP }}:9696
- option tcpka
- option tcplog
- balance source
-{% for host in groups['controller'] %}
- server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9696 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-volume
- bind {{ HA_VIP }}:8776
- option tcpka
- option httpchk
- option tcplog
- balance source
-{% for host in groups['controller'] %}
- server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8776 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-keystone_admin_cluster
- bind {{ HA_VIP }}:35357
- option tcpka
- option httpchk
- option tcplog
- balance source
-{% for host in groups['controller'] %}
- server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:35357 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-keystone_public_internal_cluster
- bind {{ HA_VIP }}:5000
- option tcpka
- option httpchk
- option tcplog
- balance source
-{% for host in groups['controller'] %}
- server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:5000 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-nova_compute_api_cluster
- bind {{ HA_VIP }}:8774
- mode tcp
- option httpchk
- option tcplog
- balance source
-{% for host in groups['controller'] %}
- server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8774 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-nova_metadata_api_cluster
- bind {{ HA_VIP }}:8775
- option tcpka
- option tcplog
- balance source
-{% for host in groups['controller'] %}
- server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8775 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen proxy-cinder_api_cluster
- bind {{ HA_VIP }}:8776
- mode tcp
- option httpchk
- option tcplog
- balance source
-{% for host in groups['controller'] %}
- server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8776 weight 1 check inter 2000 rise 2 fall 5
-{% endfor %}
-
-listen stats
- mode http
- bind 0.0.0.0:8888
- stats enable
- stats refresh 30s
- stats uri /
- stats realm Global\ statistics
- stats auth admin:admin
-
-
diff --git a/compass/deploy/ansible/roles/ha/templates/keepalived.conf b/compass/deploy/ansible/roles/ha/templates/keepalived.conf
deleted file mode 100644
index 0b49137..0000000
--- a/compass/deploy/ansible/roles/ha/templates/keepalived.conf
+++ /dev/null
@@ -1,42 +0,0 @@
-global_defs {
-
- notification_email{
- root@huawei.com
- }
-
- notification_email_from keepalived@huawei.com
-
- smtp_server localhost
-
- smtp_connect_timeout 30
-
- router_id NodeA
-
-}
-
-vrrp_instance VI_1 {
-
- interface {{ INTERNAL_INTERFACE }}
- virtual_router_id 51
- state BACKUP
- nopreempt
- advert_int 1
-{% for host in groups['controller'] %}
-{% if host == inventory_hostname %}
- priority {{ 100 - loop.index0 * 5 }}
-{% endif %}
-{% endfor %}
-
- authentication {
- auth_type PASS
- auth_pass 1111
- }
-
- virtual_ipaddress {
- {{ HA_VIP }} dev {{ INTERNAL_INTERFACE }}
- }
-
- notify_master "/usr/local/bin/notify.sh master"
- notify_backup "/usr/local/bin/notify.sh backup"
-}
-
diff --git a/compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml b/compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml
deleted file mode 100644
index 3203b26..0000000
--- a/compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: keystone-manage db-sync
- shell: su -s /bin/sh -c "keystone-manage db_sync"
- register: result
- until: result.rc == 0
- retries: 5
- delay: 3
-
-- name: place keystone init script under /opt/
- template: src=keystone_init dest=/opt/keystone_init mode=0744
-
-- name: run keystone_init
- shell: /opt/keystone_init && touch keystone_init_complete || keystone_init_failed
- args:
- creates: keystone_init_complete
-
diff --git a/compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml b/compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml
deleted file mode 100644
index e69c069..0000000
--- a/compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-- name: install keystone packages
- apt: name=keystone state=present force=yes
-
-- name: generate keystone service list
- shell: echo {{ item }} >> /opt/service
- with_items:
- - keystone
-
-- name: update keystone conf
- template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
-
-- name: delete sqlite database
- shell: rm /var/lib/keystone/keystone.db || echo sqllite database already removed
-
-- name: cron job to purge expired tokens hourly
- shell: (crontab -l -u keystone 2>&1 | grep -q token_flush) || echo '@hourly /usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1' >> /var/spool/cron/crontabs/keystone
-
-- name: modify keystone cron rights
- file: path=/var/spool/cron/crontabs/keystone mode=0600
-
-- name: keystone source files
- template: src={{ item }} dest=/opt/{{ item }}
- with_items:
- - admin-openrc.sh
- - demo-openrc.sh
-
-- name: manually start keystone
- service: name=keystone state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/keystone/tasks/main.yml b/compass/deploy/ansible/roles/keystone/tasks/main.yml
deleted file mode 100644
index 2f36e91..0000000
--- a/compass/deploy/ansible/roles/keystone/tasks/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- include: keystone_install.yml
- tags:
- - install
- - keystone_install
- - keystone
-
-- include: keystone_config.yml
- when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
- tags:
- - config
- - keystone_config
- - keystone
diff --git a/compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh b/compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh
deleted file mode 100644
index f2e0d61..0000000
--- a/compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-# Verify the Identity Service installation
-export OS_PASSWORD={{ ADMIN_PASS }}
-export OS_TENANT_NAME=admin
-export OS_AUTH_URL=http://{{ HA_VIP }}:35357/v2.0
-export OS_USERNAME=ADMIN
-
diff --git a/compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh b/compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh
deleted file mode 100644
index 8bdc51b..0000000
--- a/compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-export OS_USERNAME=demo
-export OS_PASSWORD={{ DEMO_PASS }}
-export OS_TENANT_NAME=demo
-export OS_AUTH_URL=http://{{ HA_VIP }}:35357/v2.0
-
diff --git a/compass/deploy/ansible/roles/keystone/templates/keystone.conf b/compass/deploy/ansible/roles/keystone/templates/keystone.conf
deleted file mode 100644
index fc8bf1f..0000000
--- a/compass/deploy/ansible/roles/keystone/templates/keystone.conf
+++ /dev/null
@@ -1,1317 +0,0 @@
-[DEFAULT]
-
-admin_token={{ ADMIN_TOKEN }}
-
-public_bind_host= {{ identity_host }}
-
-admin_bind_host= {{ identity_host }}
-
-#compute_port=8774
-
-#admin_port=35357
-
-#public_port=5000
-
-# The base public endpoint URL for keystone that are
-# advertised to clients (NOTE: this does NOT affect how
-# keystone listens for connections) (string value).
-# Defaults to the base host URL of the request. Eg a
-# request to http://server:5000/v2.0/users will
-# default to http://server:5000. You should only need
-# to set this value if the base URL contains a path
-# (eg /prefix/v2.0) or the endpoint should be found on
-# a different server.
-#public_endpoint=http://localhost:%(public_port)s/
-
-# The base admin endpoint URL for keystone that are advertised
-# to clients (NOTE: this does NOT affect how keystone listens
-# for connections) (string value).
-# Defaults to the base host URL of the request. Eg a
-# request to http://server:35357/v2.0/users will
-# default to http://server:35357. You should only need
-# to set this value if the base URL contains a path
-# (eg /prefix/v2.0) or the endpoint should be found on
-# a different server.
-#admin_endpoint=http://localhost:%(admin_port)s/
-
-# onready allows you to send a notification when the process
-# is ready to serve For example, to have it notify using
-# systemd, one could set shell command: "onready = systemd-
-# notify --ready" or a module with notify() method: "onready =
-# keystone.common.systemd". (string value)
-#onready=<None>
-
-# enforced by optional sizelimit middleware
-# (keystone.middleware:RequestBodySizeLimiter). (integer
-# value)
-#max_request_body_size=114688
-
-# limit the sizes of user & tenant ID/names. (integer value)
-#max_param_size=64
-
-# similar to max_param_size, but provides an exception for
-# token values. (integer value)
-#max_token_size=8192
-
-# During a SQL upgrade member_role_id will be used to create a
-# new role that will replace records in the
-# user_tenant_membership table with explicit role grants.
-# After migration, the member_role_id will be used in the API
-# add_user_to_project. (string value)
-#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab
-
-# During a SQL upgrade member_role_id will be used to create a
-# new role that will replace records in the
-# user_tenant_membership table with explicit role grants.
-# After migration, member_role_name will be ignored. (string
-# value)
-#member_role_name=_member_
-
-# The value passed as the keyword "rounds" to passlib encrypt
-# method. (integer value)
-#crypt_strength=40000
-
-# Set this to True if you want to enable TCP_KEEPALIVE on
-# server sockets i.e. sockets used by the keystone wsgi server
-# for client connections. (boolean value)
-#tcp_keepalive=false
-
-# Sets the value of TCP_KEEPIDLE in seconds for each server
-# socket. Only applies if tcp_keepalive is True. Not supported
-# on OS X. (integer value)
-#tcp_keepidle=600
-
-# The maximum number of entities that will be returned in a
-# collection can be set with list_limit, with no limit set by
-# default. This global limit may be then overridden for a
-# specific driver, by specifying a list_limit in the
-# appropriate section (e.g. [assignment]). (integer value)
-#list_limit=<None>
-
-# Set this to false if you want to enable the ability for
-# user, group and project entities to be moved between domains
-# by updating their domain_id. Allowing such movement is not
-# recommended if the scope of a domain admin is being
-# restricted by use of an appropriate policy file (see
-# policy.v3cloudsample as an example). (boolean value)
-#domain_id_immutable=true
-
-
-#
-# Options defined in oslo.messaging
-#
-
-# Use durable queues in amqp. (boolean value)
-# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
-#amqp_durable_queues=false
-
-# Auto-delete queues in amqp. (boolean value)
-#amqp_auto_delete=false
-
-# Size of RPC connection pool. (integer value)
-#rpc_conn_pool_size=30
-
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call. (list value)
-#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions
-# Qpid broker hostname. (string value)
-#qpid_hostname=localhost
-
-# Qpid broker port. (integer value)
-#qpid_port=5672
-
-# Qpid HA cluster host:port pairs. (list value)
-#qpid_hosts=$qpid_hostname:$qpid_port
-
-# Username for Qpid connection. (string value)
-#qpid_username=
-
-# Password for Qpid connection. (string value)
-#qpid_password=
-
-# Space separated list of SASL mechanisms to use for auth.
-# (string value)
-#qpid_sasl_mechanisms=
-
-# Seconds between connection keepalive heartbeats. (integer
-# value)
-#qpid_heartbeat=60
-
-# Transport to use, either 'tcp' or 'ssl'. (string value)
-#qpid_protocol=tcp
-
-# Whether to disable the Nagle algorithm. (boolean value)
-#qpid_tcp_nodelay=true
-
-# The qpid topology version to use. Version 1 is what was
-# originally used by impl_qpid. Version 2 includes some
-# backwards-incompatible changes that allow broker federation
-# to work. Users should update to version 2 when they are
-# able to take everything down, as it requires a clean break.
-# (integer value)
-#qpid_topology_version=1
-
-# SSL version to use (valid only if SSL enabled). valid values
-# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
-# distributions. (string value)
-#kombu_ssl_version=
-
-# SSL key file (valid only if SSL enabled). (string value)
-#kombu_ssl_keyfile=
-
-# SSL cert file (valid only if SSL enabled). (string value)
-#kombu_ssl_certfile=
-
-# SSL certification authority file (valid only if SSL
-# enabled). (string value)
-#kombu_ssl_ca_certs=
-
-# How long to wait before reconnecting in response to an AMQP
-# consumer cancel notification. (floating point value)
-#kombu_reconnect_delay=1.0
-
-# The RabbitMQ broker address where a single node is used.
-# (string value)
-#rabbit_host=localhost
-
-# The RabbitMQ broker port where a single node is used.
-# (integer value)
-#rabbit_port=5672
-
-# RabbitMQ HA cluster host:port pairs. (list value)
-#rabbit_hosts=$rabbit_host:$rabbit_port
-
-# Connect over SSL for RabbitMQ. (boolean value)
-#rabbit_use_ssl=false
-
-# The RabbitMQ userid. (string value)
-rabbit_userid={{ RABBIT_USER }}
-
-# The RabbitMQ password. (string value)
-rabbit_password={{ RABBIT_PASS }}
-
-# the RabbitMQ login method (string value)
-#rabbit_login_method=AMQPLAIN
-
-# The RabbitMQ virtual host. (string value)
-#rabbit_virtual_host=/
-
-# How frequently to retry connecting with RabbitMQ. (integer
-# value)
-#rabbit_retry_interval=1
-
-# How long to backoff for between retries when connecting to
-# RabbitMQ. (integer value)
-#rabbit_retry_backoff=2
-
-# Maximum number of RabbitMQ connection retries. Default is 0
-# (infinite retry count). (integer value)
-#rabbit_max_retries=0
-
-# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
-# this option, you must wipe the RabbitMQ database. (boolean
-# value)
-#rabbit_ha_queues=false
-
-# If passed, use a fake RabbitMQ provider. (boolean value)
-#fake_rabbit=false
-
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet
-# interface, or IP. The "host" option should point or resolve
-# to this address. (string value)
-#rpc_zmq_bind_address=*
-
-# MatchMaker driver. (string value)
-#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
-
-# ZeroMQ receiver listening port. (integer value)
-#rpc_zmq_port=9501
-
-# Number of ZeroMQ contexts, defaults to 1. (integer value)
-#rpc_zmq_contexts=1
-
-# Maximum number of ingress messages to locally buffer per
-# topic. Default is unlimited. (integer value)
-#rpc_zmq_topic_backlog=<None>
-
-# Directory for holding IPC sockets. (string value)
-#rpc_zmq_ipc_dir=/var/run/openstack
-
-# Name of this node. Must be a valid hostname, FQDN, or IP
-# address. Must match "host" option, if running Nova. (string
-# value)
-#rpc_zmq_host=keystone
-
-# Seconds to wait before a cast expires (TTL). Only supported
-# by impl_zmq. (integer value)
-#rpc_cast_timeout=30
-
-# Heartbeat frequency. (integer value)
-#matchmaker_heartbeat_freq=300
-
-# Heartbeat time-to-live. (integer value)
-#matchmaker_heartbeat_ttl=600
-
-# Host to locate redis. (string value)
-#host=127.0.0.1
-
-# Use this port to connect to redis host. (integer value)
-#port=6379
-
-# Password for Redis server (optional). (string value)
-#password=<None>
-
-# Size of RPC greenthread pool. (integer value)
-#rpc_thread_pool_size=64
-
-# Driver or drivers to handle sending notifications. (multi
-# valued)
-#notification_driver=
-
-# AMQP topic used for OpenStack notifications. (list value)
-# Deprecated group/name - [rpc_notifier2]/topics
-#notification_topics=notifications
-
-# Seconds to wait for a response from a call. (integer value)
-#rpc_response_timeout=60
-
-# A URL representing the messaging driver to use and its full
-# configuration. If not set, we fall back to the rpc_backend
-# option and driver specific configuration. (string value)
-#transport_url=<None>
-
-# The messaging driver to use, defaults to rabbit. Other
-# drivers include qpid and zmq. (string value)
-#rpc_backend=rabbit
-
-# The default exchange under which topics are scoped. May be
-# overridden by an exchange name specified in the
-# transport_url option. (string value)
-#control_exchange=openstack
-
-
-#
-# Options defined in keystone.notifications
-#
-
-# Default publisher_id for outgoing notifications (string
-# value)
-#default_publisher_id=<None>
-
-
-#
-# Options defined in keystone.middleware.ec2_token
-#
-
-# URL to get token from ec2 request. (string value)
-#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens
-
-# Required if EC2 server requires client certificate. (string
-# value)
-#keystone_ec2_keyfile=<None>
-
-# Client certificate key filename. Required if EC2 server
-# requires client certificate. (string value)
-#keystone_ec2_certfile=<None>
-
-# A PEM encoded certificate authority to use when verifying
-# HTTPS connections. Defaults to the system CAs. (string
-# value)
-#keystone_ec2_cafile=<None>
-
-# Disable SSL certificate verification. (boolean value)
-#keystone_ec2_insecure=false
-
-
-#
-# Options defined in keystone.openstack.common.eventlet_backdoor
-#
-
-# Enable eventlet backdoor. Acceptable values are 0, <port>,
-# and <start>:<end>, where 0 results in listening on a random
-# tcp port number; <port> results in listening on the
-# specified port number (and not enabling backdoor if that
-# port is in use); and <start>:<end> results in listening on
-# the smallest unused port number within the specified range
-# of port numbers. The chosen port is displayed in the
-# service's log file. (string value)
-#backdoor_port=<None>
-
-
-#
-# Options defined in keystone.openstack.common.lockutils
-#
-
-# Whether to disable inter-process locks (boolean value)
-#disable_process_locking=false
-
-# Directory to use for lock files. (string value)
-#lock_path=<None>
-
-
-#
-# Options defined in keystone.openstack.common.log
-#
-
-# Print debugging output (set logging level to DEBUG instead
-# of default WARNING level). (boolean value)
-debug={{ DEBUG }}
-
-# Print more verbose output (set logging level to INFO instead
-# of default WARNING level). (boolean value)
-verbose={{ VERBOSE }}
-
-# Log output to standard error (boolean value)
-#use_stderr=true
-
-# Format string to use for log messages with context (string
-# value)
-#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
-
-# Format string to use for log messages without context
-# (string value)
-#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
-
-# Data to append to log format when level is DEBUG (string
-# value)
-#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
-
-# Prefix each line of exception output with this format
-# (string value)
-#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
-
-# List of logger=LEVEL pairs (list value)
-#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN
-
-# Publish error events (boolean value)
-#publish_errors=false
-
-# Make deprecations fatal (boolean value)
-#fatal_deprecations=false
-
-# If an instance is passed with the log message, format it
-# like this (string value)
-#instance_format="[instance: %(uuid)s] "
-
-# If an instance UUID is passed with the log message, format
-# it like this (string value)
-#instance_uuid_format="[instance: %(uuid)s] "
-
-# The name of logging configuration file. It does not disable
-# existing loggers, but just appends specified logging
-# configuration to any other existing logging options. Please
-# see the Python logging module documentation for details on
-# logging configuration files. (string value)
-# Deprecated group/name - [DEFAULT]/log_config
-#log_config_append=<None>
-
-# DEPRECATED. A logging.Formatter log message format string
-# which may use any of the available logging.LogRecord
-# attributes. This option is deprecated. Please use
-# logging_context_format_string and
-# logging_default_format_string instead. (string value)
-#log_format=<None>
-
-# Format string for %%(asctime)s in log records. Default:
-# %(default)s (string value)
-#log_date_format=%Y-%m-%d %H:%M:%S
-
-# (Optional) Name of log file to output to. If no default is
-# set, logging will go to stdout. (string value)
-# Deprecated group/name - [DEFAULT]/logfile
-#log_file=<None>
-
-# (Optional) The base directory used for relative --log-file
-# paths (string value)
-# Deprecated group/name - [DEFAULT]/logdir
-log_dir = /var/log/keystone
-
-# Use syslog for logging. Existing syslog format is DEPRECATED
-# during I, and then will be changed in J to honor RFC5424
-# (boolean value)
-#use_syslog=false
-
-# (Optional) Use syslog rfc5424 format for logging. If
-# enabled, will add APP-NAME (RFC5424) before the MSG part of
-# the syslog message. The old format without APP-NAME is
-# deprecated in I, and will be removed in J. (boolean value)
-#use_syslog_rfc_format=false
-
-# Syslog facility to receive log lines (string value)
-#syslog_log_facility=LOG_USER
-
-
-#
-# Options defined in keystone.openstack.common.policy
-#
-
-# JSON file containing policy (string value)
-#policy_file=policy.json
-
-# Rule enforced when requested rule is not found (string
-# value)
-#policy_default_rule=default
-
-
-[assignment]
-
-#
-# Options defined in keystone
-#
-
-# Keystone Assignment backend driver. (string value)
-#driver=<None>
-
-# Toggle for assignment caching. This has no effect unless
-# global caching is enabled. (boolean value)
-#caching=true
-
-# TTL (in seconds) to cache assignment data. This has no
-# effect unless global caching is enabled. (integer value)
-#cache_time=<None>
-
-# Maximum number of entities that will be returned in an
-# assignment collection. (integer value)
-#list_limit=<None>
-
-
-[auth]
-
-#
-# Options defined in keystone
-#
-
-# Default auth methods. (list value)
-#methods=external,password,token
-
-# The password auth plugin module. (string value)
-#password=keystone.auth.plugins.password.Password
-
-# The token auth plugin module. (string value)
-#token=keystone.auth.plugins.token.Token
-
-# The external (REMOTE_USER) auth plugin module. (string
-# value)
-#external=keystone.auth.plugins.external.DefaultDomain
-
-
-[cache]
-
-#
-# Options defined in keystone
-#
-
-# Prefix for building the configuration dictionary for the
-# cache region. This should not need to be changed unless
-# there is another dogpile.cache region with the same
-# configuration name. (string value)
-#config_prefix=cache.keystone
-
-# Default TTL, in seconds, for any cached item in the
-# dogpile.cache region. This applies to any cached method that
-# doesn't have an explicit cache expiration time defined for
-# it. (integer value)
-#expiration_time=600
-
-# Dogpile.cache backend module. It is recommended that
-# Memcache (dogpile.cache.memcache) or Redis
-# (dogpile.cache.redis) be used in production deployments.
-# Small workloads (single process) like devstack can use the
-# dogpile.cache.memory backend. (string value)
-#backend=keystone.common.cache.noop
-
-# Use a key-mangling function (sha1) to ensure fixed length
-# cache-keys. This is toggle-able for debugging purposes, it
-# is highly recommended to always leave this set to True.
-# (boolean value)
-#use_key_mangler=true
-
-# Arguments supplied to the backend module. Specify this
-# option once per argument to be passed to the dogpile.cache
-# backend. Example format: "<argname>:<value>". (multi valued)
-#backend_argument=
-
-# Proxy Classes to import that will affect the way the
-# dogpile.cache backend functions. See the dogpile.cache
-# documentation on changing-backend-behavior. Comma delimited
-# list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2.
-# (list value)
-#proxies=
-
-# Global toggle for all caching using the should_cache_fn
-# mechanism. (boolean value)
-#enabled=false
-
-# Extra debugging from the cache backend (cache keys,
-# get/set/delete/etc calls) This is only really useful if you
-# need to see the specific cache-backend get/set/delete calls
-# with the keys/values. Typically this should be left set to
-# False. (boolean value)
-#debug_cache_backend=false
-
-
-[catalog]
-
-#
-# Options defined in keystone
-#
-
-# Catalog template file name for use with the template catalog
-# backend. (string value)
-#template_file=default_catalog.templates
-
-# Keystone catalog backend driver. (string value)
-#driver=keystone.catalog.backends.sql.Catalog
-
-# Maximum number of entities that will be returned in a
-# catalog collection. (integer value)
-#list_limit=<None>
-
-
-[credential]
-
-#
-# Options defined in keystone
-#
-
-# Keystone Credential backend driver. (string value)
-#driver=keystone.credential.backends.sql.Credential
-
-
-[database]
-
-#
-# Options defined in keystone.openstack.common.db.options
-#
-
-# The file name to use with SQLite (string value)
-#sqlite_db=keystone.sqlite
-
-# If True, SQLite uses synchronous mode (boolean value)
-#sqlite_synchronous=true
-
-# The backend to use for db (string value)
-# Deprecated group/name - [DEFAULT]/db_backend
-#backend=sqlalchemy
-
-# The SQLAlchemy connection string used to connect to the
-# database (string value)
-# Deprecated group/name - [DEFAULT]/sql_connection
-# Deprecated group/name - [DATABASE]/sql_connection
-# Deprecated group/name - [sql]/connection
-#connection=<None>
-connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone
-
-# The SQL mode to be used for MySQL sessions. This option,
-# including the default, overrides any server-set SQL mode. To
-# use whatever SQL mode is set by the server configuration,
-# set this to no value. Example: mysql_sql_mode= (string
-# value)
-#mysql_sql_mode=TRADITIONAL
-
-# Timeout before idle sql connections are reaped (integer
-# value)
-# Deprecated group/name - [DEFAULT]/sql_idle_timeout
-# Deprecated group/name - [DATABASE]/sql_idle_timeout
-# Deprecated group/name - [sql]/idle_timeout
-#idle_timeout=3600
-
-# Minimum number of SQL connections to keep open in a pool
-# (integer value)
-# Deprecated group/name - [DEFAULT]/sql_min_pool_size
-# Deprecated group/name - [DATABASE]/sql_min_pool_size
-#min_pool_size=1
-
-# Maximum number of SQL connections to keep open in a pool
-# (integer value)
-# Deprecated group/name - [DEFAULT]/sql_max_pool_size
-# Deprecated group/name - [DATABASE]/sql_max_pool_size
-#max_pool_size=<None>
-
-# Maximum db connection retries during startup. (setting -1
-# implies an infinite retry count) (integer value)
-# Deprecated group/name - [DEFAULT]/sql_max_retries
-# Deprecated group/name - [DATABASE]/sql_max_retries
-#max_retries=10
-
-# Interval between retries of opening a sql connection
-# (integer value)
-# Deprecated group/name - [DEFAULT]/sql_retry_interval
-# Deprecated group/name - [DATABASE]/reconnect_interval
-#retry_interval=10
-
-# If set, use this value for max_overflow with sqlalchemy
-# (integer value)
-# Deprecated group/name - [DEFAULT]/sql_max_overflow
-# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
-#max_overflow=<None>
-
-# Verbosity of SQL debugging information. 0=None,
-# 100=Everything (integer value)
-# Deprecated group/name - [DEFAULT]/sql_connection_debug
-#connection_debug=0
-
-# Add python stack traces to SQL as comment strings (boolean
-# value)
-# Deprecated group/name - [DEFAULT]/sql_connection_trace
-#connection_trace=false
-
-# If set, use this value for pool_timeout with sqlalchemy
-# (integer value)
-# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
-#pool_timeout=<None>
-
-# Enable the experimental use of database reconnect on
-# connection lost (boolean value)
-#use_db_reconnect=false
-
-# seconds between db connection retries (integer value)
-#db_retry_interval=1
-
-# Whether to increase interval between db connection retries,
-# up to db_max_retry_interval (boolean value)
-#db_inc_retry_interval=true
-
-# max seconds between db connection retries, if
-# db_inc_retry_interval is enabled (integer value)
-#db_max_retry_interval=10
-
-# maximum db connection retries before error is raised.
-# (setting -1 implies an infinite retry count) (integer value)
-#db_max_retries=20
-
-
-[ec2]
-
-#
-# Options defined in keystone
-#
-
-# Keystone EC2Credential backend driver. (string value)
-#driver=keystone.contrib.ec2.backends.kvs.Ec2
-
-
-[endpoint_filter]
-
-#
-# Options defined in keystone
-#
-
-# Keystone Endpoint Filter backend driver (string value)
-#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter
-
-# Toggle to return all active endpoints if no filter exists.
-# (boolean value)
-#return_all_endpoints_if_no_filter=true
-
-
-[federation]
-
-#
-# Options defined in keystone
-#
-
-# Keystone Federation backend driver. (string value)
-#driver=keystone.contrib.federation.backends.sql.Federation
-
-# Value to be used when filtering assertion parameters from
-# the environment. (string value)
-#assertion_prefix=
-
-
-[identity]
-
-#
-# Options defined in keystone
-#
-
-# This references the domain to use for all Identity API v2
-# requests (which are not aware of domains). A domain with
-# this ID will be created for you by keystone-manage db_sync
-# in migration 008. The domain referenced by this ID cannot
-# be deleted on the v3 API, to prevent accidentally breaking
-# the v2 API. There is nothing special about this domain,
-# other than the fact that it must exist to order to maintain
-# support for your v2 clients. (string value)
-#default_domain_id=default
-
-# A subset (or all) of domains can have their own identity
-# driver, each with their own partial configuration file in a
-# domain configuration directory. Only values specific to the
-# domain need to be placed in the domain specific
-# configuration file. This feature is disabled by default; set
-# to True to enable. (boolean value)
-#domain_specific_drivers_enabled=false
-
-# Path for Keystone to locate the domain specificidentity
-# configuration files if domain_specific_drivers_enabled is
-# set to true. (string value)
-#domain_config_dir=/etc/keystone/domains
-
-# Keystone Identity backend driver. (string value)
-#driver=keystone.identity.backends.sql.Identity
-
-# Maximum supported length for user passwords; decrease to
-# improve performance. (integer value)
-#max_password_length=4096
-
-# Maximum number of entities that will be returned in an
-# identity collection. (integer value)
-#list_limit=<None>
-
-
-[kvs]
-
-#
-# Options defined in keystone
-#
-
-# Extra dogpile.cache backend modules to register with the
-# dogpile.cache library. (list value)
-#backends=
-
-# Prefix for building the configuration dictionary for the KVS
-# region. This should not need to be changed unless there is
-# another dogpile.cache region with the same configuration
-# name. (string value)
-#config_prefix=keystone.kvs
-
-# Toggle to disable using a key-mangling function to ensure
-# fixed length keys. This is toggle-able for debugging
-# purposes, it is highly recommended to always leave this set
-# to True. (boolean value)
-#enable_key_mangler=true
-
-# Default lock timeout for distributed locking. (integer
-# value)
-#default_lock_timeout=5
-
-
-[ldap]
-
-#
-# Options defined in keystone
-#
-
-# URL for connecting to the LDAP server. (string value)
-#url=ldap://localhost
-
-# User BindDN to query the LDAP server. (string value)
-#user=<None>
-
-# Password for the BindDN to query the LDAP server. (string
-# value)
-#password=<None>
-
-# LDAP server suffix (string value)
-#suffix=cn=example,cn=com
-
-# If true, will add a dummy member to groups. This is required
-# if the objectclass for groups requires the "member"
-# attribute. (boolean value)
-#use_dumb_member=false
-
-# DN of the "dummy member" to use when "use_dumb_member" is
-# enabled. (string value)
-#dumb_member=cn=dumb,dc=nonexistent
-
-# allow deleting subtrees. (boolean value)
-#allow_subtree_delete=false
-
-# The LDAP scope for queries, this can be either "one"
-# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree).
-# (string value)
-#query_scope=one
-
-# Maximum results per page; a value of zero ("0") disables
-# paging. (integer value)
-#page_size=0
-
-# The LDAP dereferencing option for queries. This can be
-# either "never", "searching", "always", "finding" or
-# "default". The "default" option falls back to using default
-# dereferencing configured by your ldap.conf. (string value)
-#alias_dereferencing=default
-
-# Override the system's default referral chasing behavior for
-# queries. (boolean value)
-#chase_referrals=<None>
-
-# Search base for users. (string value)
-#user_tree_dn=<None>
-
-# LDAP search filter for users. (string value)
-#user_filter=<None>
-
-# LDAP objectClass for users. (string value)
-#user_objectclass=inetOrgPerson
-
-# LDAP attribute mapped to user id. (string value)
-#user_id_attribute=cn
-
-# LDAP attribute mapped to user name. (string value)
-#user_name_attribute=sn
-
-# LDAP attribute mapped to user email. (string value)
-#user_mail_attribute=email
-
-# LDAP attribute mapped to password. (string value)
-#user_pass_attribute=userPassword
-
-# LDAP attribute mapped to user enabled flag. (string value)
-#user_enabled_attribute=enabled
-
-# Bitmask integer to indicate the bit that the enabled value
-# is stored in if the LDAP server represents "enabled" as a
-# bit on an integer rather than a boolean. A value of "0"
-# indicates the mask is not used. If this is not set to "0"
-# the typical value is "2". This is typically used when
-# "user_enabled_attribute = userAccountControl". (integer
-# value)
-#user_enabled_mask=0
-
-# Default value to enable users. This should match an
-# appropriate int value if the LDAP server uses non-boolean
-# (bitmask) values to indicate if a user is enabled or
-# disabled. If this is not set to "True"the typical value is
-# "512". This is typically used when "user_enabled_attribute =
-# userAccountControl". (string value)
-#user_enabled_default=True
-
-# List of attributes stripped off the user on update. (list
-# value)
-#user_attribute_ignore=default_project_id,tenants
-
-# LDAP attribute mapped to default_project_id for users.
-# (string value)
-#user_default_project_id_attribute=<None>
-
-# Allow user creation in LDAP backend. (boolean value)
-#user_allow_create=true
-
-# Allow user updates in LDAP backend. (boolean value)
-#user_allow_update=true
-
-# Allow user deletion in LDAP backend. (boolean value)
-#user_allow_delete=true
-
-# If True, Keystone uses an alternative method to determine if
-# a user is enabled or not by checking if they are a member of
-# the "user_enabled_emulation_dn" group. (boolean value)
-#user_enabled_emulation=false
-
-# DN of the group entry to hold enabled users when using
-# enabled emulation. (string value)
-#user_enabled_emulation_dn=<None>
-
-# List of additional LDAP attributes used for mapping
-# Additional attribute mappings for users. Attribute mapping
-# format is <ldap_attr>:<user_attr>, where ldap_attr is the
-# attribute in the LDAP entry and user_attr is the Identity
-# API attribute. (list value)
-#user_additional_attribute_mapping=
-
-# Search base for projects (string value)
-#tenant_tree_dn=<None>
-
-# LDAP search filter for projects. (string value)
-#tenant_filter=<None>
-
-# LDAP objectClass for projects. (string value)
-#tenant_objectclass=groupOfNames
-
-# LDAP attribute mapped to project id. (string value)
-#tenant_id_attribute=cn
-
-# LDAP attribute mapped to project membership for user.
-# (string value)
-#tenant_member_attribute=member
-
-# LDAP attribute mapped to project name. (string value)
-#tenant_name_attribute=ou
-
-# LDAP attribute mapped to project description. (string value)
-#tenant_desc_attribute=description
-
-# LDAP attribute mapped to project enabled. (string value)
-#tenant_enabled_attribute=enabled
-
-# LDAP attribute mapped to project domain_id. (string value)
-#tenant_domain_id_attribute=businessCategory
-
-# List of attributes stripped off the project on update. (list
-# value)
-#tenant_attribute_ignore=
-
-# Allow tenant creation in LDAP backend. (boolean value)
-#tenant_allow_create=true
-
-# Allow tenant update in LDAP backend. (boolean value)
-#tenant_allow_update=true
-
-# Allow tenant deletion in LDAP backend. (boolean value)
-#tenant_allow_delete=true
-
-# If True, Keystone uses an alternative method to determine if
-# a project is enabled or not by checking if they are a member
-# of the "tenant_enabled_emulation_dn" group. (boolean value)
-#tenant_enabled_emulation=false
-
-# DN of the group entry to hold enabled projects when using
-# enabled emulation. (string value)
-#tenant_enabled_emulation_dn=<None>
-
-# Additional attribute mappings for projects. Attribute
-# mapping format is <ldap_attr>:<user_attr>, where ldap_attr
-# is the attribute in the LDAP entry and user_attr is the
-# Identity API attribute. (list value)
-#tenant_additional_attribute_mapping=
-
-# Search base for roles. (string value)
-#role_tree_dn=<None>
-
-# LDAP search filter for roles. (string value)
-#role_filter=<None>
-
-# LDAP objectClass for roles. (string value)
-#role_objectclass=organizationalRole
-
-# LDAP attribute mapped to role id. (string value)
-#role_id_attribute=cn
-
-# LDAP attribute mapped to role name. (string value)
-#role_name_attribute=ou
-
-# LDAP attribute mapped to role membership. (string value)
-#role_member_attribute=roleOccupant
-
-# List of attributes stripped off the role on update. (list
-# value)
-#role_attribute_ignore=
-
-# Allow role creation in LDAP backend. (boolean value)
-#role_allow_create=true
-
-# Allow role update in LDAP backend. (boolean value)
-#role_allow_update=true
-
-# Allow role deletion in LDAP backend. (boolean value)
-#role_allow_delete=true
-
-# Additional attribute mappings for roles. Attribute mapping
-# format is <ldap_attr>:<user_attr>, where ldap_attr is the
-# attribute in the LDAP entry and user_attr is the Identity
-# API attribute. (list value)
-#role_additional_attribute_mapping=
-
-# Search base for groups. (string value)
-#group_tree_dn=<None>
-
-# LDAP search filter for groups. (string value)
-#group_filter=<None>
-
-# LDAP objectClass for groups. (string value)
-#group_objectclass=groupOfNames
-
-# LDAP attribute mapped to group id. (string value)
-#group_id_attribute=cn
-
-# LDAP attribute mapped to group name. (string value)
-#group_name_attribute=ou
-
-# LDAP attribute mapped to show group membership. (string
-# value)
-#group_member_attribute=member
-
-# LDAP attribute mapped to group description. (string value)
-#group_desc_attribute=description
-
-# List of attributes stripped off the group on update. (list
-# value)
-#group_attribute_ignore=
-
-# Allow group creation in LDAP backend. (boolean value)
-#group_allow_create=true
-
-# Allow group update in LDAP backend. (boolean value)
-#group_allow_update=true
-
-# Allow group deletion in LDAP backend. (boolean value)
-#group_allow_delete=true
-
-# Additional attribute mappings for groups. Attribute mapping
-# format is <ldap_attr>:<user_attr>, where ldap_attr is the
-# attribute in the LDAP entry and user_attr is the Identity
-# API attribute. (list value)
-#group_additional_attribute_mapping=
-
-# CA certificate file path for communicating with LDAP
-# servers. (string value)
-#tls_cacertfile=<None>
-
-# CA certificate directory path for communicating with LDAP
-# servers. (string value)
-#tls_cacertdir=<None>
-
-# Enable TLS for communicating with LDAP servers. (boolean
-# value)
-#use_tls=false
-
-# valid options for tls_req_cert are demand, never, and allow.
-# (string value)
-#tls_req_cert=demand
-
-
-[matchmaker_ring]
-
-#
-# Options defined in oslo.messaging
-#
-
-# Matchmaker ring file (JSON). (string value)
-# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
-#ringfile=/etc/oslo/matchmaker_ring.json
-
-
-[memcache]
-
-#
-# Options defined in keystone
-#
-
-# Memcache servers in the format of "host:port" (list value)
-#servers=localhost:11211
-
-# Number of compare-and-set attempts to make when using
-# compare-and-set in the token memcache back end. (integer
-# value)
-#max_compare_and_set_retry=16
-
-
-[oauth1]
-
-#
-# Options defined in keystone
-#
-
-# Keystone Credential backend driver. (string value)
-#driver=keystone.contrib.oauth1.backends.sql.OAuth1
-
-# Duration (in seconds) for the OAuth Request Token. (integer
-# value)
-#request_token_duration=28800
-
-# Duration (in seconds) for the OAuth Access Token. (integer
-# value)
-#access_token_duration=86400
-
-
-[os_inherit]
-
-#
-# Options defined in keystone
-#
-
-# role-assignment inheritance to projects from owning domain
-# can be optionally enabled. (boolean value)
-#enabled=false
-
-
-[paste_deploy]
-
-#
-# Options defined in keystone
-#
-
-# Name of the paste configuration file that defines the
-# available pipelines. (string value)
-#config_file=keystone-paste.ini
-
-
-[policy]
-
-#
-# Options defined in keystone
-#
-
-# Keystone Policy backend driver. (string value)
-#driver=keystone.policy.backends.sql.Policy
-
-# Maximum number of entities that will be returned in a policy
-# collection. (integer value)
-#list_limit=<None>
-
-
-[revoke]
-
-#
-# Options defined in keystone
-#
-
-# An implementation of the backend for persisting revocation
-# events. (string value)
-#driver=keystone.contrib.revoke.backends.kvs.Revoke
-
-# This value (calculated in seconds) is added to token
-# expiration before a revocation event may be removed from the
-# backend. (integer value)
-#expiration_buffer=1800
-
-# Toggle for revocation event cacheing. This has no effect
-# unless global caching is enabled. (boolean value)
-#caching=true
-
-
-[signing]
-
-#
-# Options defined in keystone
-#
-
-# Deprecated in favor of provider in the [token] section.
-# (string value)
-#token_format=<None>
-
-# Path of the certfile for token signing. (string value)
-#certfile=/etc/keystone/ssl/certs/signing_cert.pem
-
-# Path of the keyfile for token signing. (string value)
-#keyfile=/etc/keystone/ssl/private/signing_key.pem
-
-# Path of the CA for token signing. (string value)
-#ca_certs=/etc/keystone/ssl/certs/ca.pem
-
-# Path of the CA Key for token signing. (string value)
-#ca_key=/etc/keystone/ssl/private/cakey.pem
-
-# Key Size (in bits) for token signing cert (auto generated
-# certificate). (integer value)
-#key_size=2048
-
-# Day the token signing cert is valid for (auto generated
-# certificate). (integer value)
-#valid_days=3650
-
-# Certificate Subject (auto generated certificate) for token
-# signing. (string value)
-#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com
-
-
-[ssl]
-
-#
-# Options defined in keystone
-#
-
-# Toggle for SSL support on the keystone eventlet servers.
-# (boolean value)
-#enable=false
-
-# Path of the certfile for SSL. (string value)
-#certfile=/etc/keystone/ssl/certs/keystone.pem
-
-# Path of the keyfile for SSL. (string value)
-#keyfile=/etc/keystone/ssl/private/keystonekey.pem
-
-# Path of the ca cert file for SSL. (string value)
-#ca_certs=/etc/keystone/ssl/certs/ca.pem
-
-# Path of the CA key file for SSL. (string value)
-#ca_key=/etc/keystone/ssl/private/cakey.pem
-
-# Require client certificate. (boolean value)
-#cert_required=false
-
-# SSL Key Length (in bits) (auto generated certificate).
-# (integer value)
-#key_size=1024
-
-# Days the certificate is valid for once signed (auto
-# generated certificate). (integer value)
-#valid_days=3650
-
-# SSL Certificate Subject (auto generated certificate).
-# (string value)
-#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost
-
-
-[stats]
-
-#
-# Options defined in keystone
-#
-
-# Keystone stats backend driver. (string value)
-#driver=keystone.contrib.stats.backends.kvs.Stats
-
-
-[token]
-
-#
-# Options defined in keystone
-#
-
-# External auth mechanisms that should add bind information to
-# token e.g. kerberos, x509. (list value)
-#bind=
-
-# Enforcement policy on tokens presented to keystone with bind
-# information. One of disabled, permissive, strict, required
-# or a specifically required bind mode e.g. kerberos or x509
-# to require binding to that authentication. (string value)
-#enforce_token_bind=permissive
-
-# Amount of time a token should remain valid (in seconds).
-# (integer value)
-#expiration=3600
-
-# Controls the token construction, validation, and revocation
-# operations. Core providers are
-# "keystone.token.providers.[pki|uuid].Provider". (string
-# value)
-provider=keystone.token.providers.uuid.Provider
-
-# Keystone Token persistence backend driver. (string value)
-driver=keystone.token.persistence.backends.sql.Token
-
-# Toggle for token system cacheing. This has no effect unless
-# global caching is enabled. (boolean value)
-#caching=true
-
-# Time to cache the revocation list and the revocation events
-# if revoke extension is enabled (in seconds). This has no
-# effect unless global and token caching are enabled. (integer
-# value)
-revocation_cache_time=3600
-
-# Time to cache tokens (in seconds). This has no effect unless
-# global and token caching are enabled. (integer value)
-#cache_time=<None>
-
-# Revoke token by token identifier. Setting revoke_by_id to
-# True enables various forms of enumerating tokens, e.g. `list
-# tokens for user`. These enumerations are processed to
-# determine the list of tokens to revoke. Only disable if
-# you are switching to using the Revoke extension with a
-# backend other than KVS, which stores events in memory.
-# (boolean value)
-#revoke_by_id=true
-
-
-[trust]
-
-#
-# Options defined in keystone
-#
-
-# delegation and impersonation features can be optionally
-# disabled. (boolean value)
-#enabled=true
-
-# Keystone Trust backend driver. (string value)
-#driver=keystone.trust.backends.sql.Trust
-
-
-[extra_headers]
-Distribution = Ubuntu
-
diff --git a/compass/deploy/ansible/roles/keystone/templates/keystone_init b/compass/deploy/ansible/roles/keystone/templates/keystone_init
deleted file mode 100644
index 729669b..0000000
--- a/compass/deploy/ansible/roles/keystone/templates/keystone_init
+++ /dev/null
@@ -1,43 +0,0 @@
-# create an administrative user
-
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 role-create --name=admin
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=admin --description="Admin Tenant"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=admin --tenant=admin --role=admin
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=admin --role=_member_ --tenant=admin
-
-# create a normal user
-
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=demo --pass={{ DEMO_PASS }} --email=DEMO_EMAIL
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=demo --description="Demo Tenant"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=demo --role=_member_ --tenant=demo
-
-# create a service tenant
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=service --description="Service Tenant"
-
-# regist keystone
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=keystone --type=identity --description="OpenStack Identity"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service_id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ identity / {print $2}') --publicurl=http://{{ HA_VIP }}:5000/v2.0 --internalurl=http://{{ HA_VIP }}:5000/v2.0 --adminurl=http://{{ HA_VIP }}:35357/v2.0
-
-# Create a glance user that the Image Service can use to authenticate with the Identity service
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=glance --pass={{ GLANCE_PASS }} --email=glance@example.com
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=glance --tenant=service --role=admin
-
-#Register the Image Service with the Identity service so that other OpenStack services can locate it
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=glance --type=image --description="OpenStack Image Service"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ image / {print $2}') --publicurl=http://{{ HA_VIP }}:9292 --internalurl=http://{{ HA_VIP }}:9292 --adminurl=http://{{ HA_VIP }}:9292
-
-#Create a nova user that Compute uses to authenticate with the Identity Service
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=nova --pass={{ NOVA_PASS }} --email=nova@example.com
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=nova --tenant=service --role=admin
-
-# register Compute with the Identity Service so that other OpenStack services can locate it
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=nova --type=compute --description="OpenStack Compute"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ compute / {print $2}') --publicurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s --internalurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s --adminurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s
-
-# register netron user, role and service
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name neutron --pass {{ NEUTRON_PASS }} --email neutron@example.com
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user neutron --tenant service --role admin
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name neutron --type network --description "OpenStack Networking"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ network / {print $2}') --publicurl http://{{ HA_VIP }}:9696 --adminurl http://{{ HA_VIP }}:9696 --internalurl http://{{ HA_VIP }}:9696
diff --git a/compass/deploy/ansible/roles/monitor/files/check_service.sh b/compass/deploy/ansible/roles/monitor/files/check_service.sh
deleted file mode 100644
index d309673..0000000
--- a/compass/deploy/ansible/roles/monitor/files/check_service.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-services=`cat /opt/service | uniq`
-for service in $services; do
- if [ `/sbin/initctl list|awk '/stop\/waiting/{print $1}'|uniq | grep $service` ]; then
- /sbin/start $service
- fi
-done
diff --git a/compass/deploy/ansible/roles/monitor/files/root b/compass/deploy/ansible/roles/monitor/files/root
deleted file mode 100644
index 9c55c4f..0000000
--- a/compass/deploy/ansible/roles/monitor/files/root
+++ /dev/null
@@ -1 +0,0 @@
-* * * * * /usr/local/bin/check_service.sh >> /var/log/check_service.log 2>&1
diff --git a/compass/deploy/ansible/roles/monitor/tasks/main.yml b/compass/deploy/ansible/roles/monitor/tasks/main.yml
deleted file mode 100644
index e5b93f3..0000000
--- a/compass/deploy/ansible/roles/monitor/tasks/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: copy service check file
- copy: src=check_service.sh dest=/usr/local/bin/check_service.sh mode=0777
-
-- name: copy cron file
- copy: src=root dest=/var/spool/cron/crontabs/root mode=0600
-
-- name: restart cron
- service: name=cron state=restarted
-
-
diff --git a/compass/deploy/ansible/roles/mq/tasks/main.yml b/compass/deploy/ansible/roles/mq/tasks/main.yml
deleted file mode 100644
index 4ae4065..0000000
--- a/compass/deploy/ansible/roles/mq/tasks/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- include: rabbitmq.yml
-
-#- include: rabbitmq_cluster.yml
-# when: HA_CLUSTER is defined
diff --git a/compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml b/compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml
deleted file mode 100644
index 5714406..0000000
--- a/compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-- name: create rabbitmq directory
- file: path=/etc/rabbitmq state=directory mode=0755
-
-- name: copy rabbitmq config file
- template: src=rabbitmq-env.conf dest=/etc/rabbitmq/rabbitmq-env.conf mode=0755
-
-- name: install rabbitmq-server
- apt: name=rabbitmq-server state=present
-
-- name: stop rabbitmq-server
- service: name=rabbitmq-server
- state=stopped
-
-- name: update .erlang.cookie
- template: src=.erlang.cookie dest=/var/lib/rabbitmq/.erlang.cookie
- group=rabbitmq
- owner=rabbitmq
- mode=0400
- when: ERLANG_TOKEN is defined
-
-- name: start and enable rabbitmq-server
- service: name=rabbitmq-server
- state=started
- enabled=yes
-
-- name: generate mq service list
- shell: echo {{ item }} >> /opt/service
- with_items:
- - rabbitmq-server
-
-- name: modify rabbitmq password
- command: rabbitmqctl change_password guest {{ RABBIT_PASS }}
- when: "RABBIT_USER is defined and RABBIT_USER == 'guest'"
- ignore_errors: True
-
-- name: add rabbitmq user
- command: rabbitmqctl add_user {{ RABBIT_USER }} {{ RABBIT_PASS }}
- when: "RABBIT_USER is defined and RABBIT_USER != 'guest'"
- ignore_errors: True
-
-- name: set rabbitmq user permission
- command: rabbitmqctl set_permissions -p / {{ RABBIT_USER }} ".*" ".*" ".*"
- when: "RABBIT_USER is defined and RABBIT_USER != 'guest'"
-
diff --git a/compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml b/compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml
deleted file mode 100644
index afd4c77..0000000
--- a/compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- name: stop rabbitmq app
- command: rabbitmqctl stop_app
- when: HA_CLUSTER[inventory_hostname] != ''
-
-- name: rabbitmqctl reset
- command: rabbitmqctl reset
- when: HA_CLUSTER[inventory_hostname] != ''
-
-- name: stop rabbitmq
- shell: rabbitmqctl stop
-
-- name: set detach
- shell: rabbitmq-server -detached
-
-- name: join cluster
- command: rabbitmqctl join_cluster rabbit@{{ item }}
- when: item != inventory_hostname and HA_CLUSTER[item] == ''
- with_items:
- groups['controller']
-
-- name: start rabbitmq app
- command: rabbitmqctl start_app
-
-- name: set the HA policy
- rabbitmq_policy: name=ha-all pattern='^(?!amq\.).*' tags="ha-mode=all"
-
diff --git a/compass/deploy/ansible/roles/mq/templates/.erlang.cookie b/compass/deploy/ansible/roles/mq/templates/.erlang.cookie
deleted file mode 100644
index cadcfaf..0000000
--- a/compass/deploy/ansible/roles/mq/templates/.erlang.cookie
+++ /dev/null
@@ -1 +0,0 @@
-{{ ERLANG_TOKEN }}
diff --git a/compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf b/compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf
deleted file mode 100644
index 6dd7349..0000000
--- a/compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf
+++ /dev/null
@@ -1 +0,0 @@
-RABBITMQ_NODE_IP_ADDRESS={{ HA_VIP }}
diff --git a/compass/deploy/ansible/roles/neutron-common/handlers/main.yml b/compass/deploy/ansible/roles/neutron-common/handlers/main.yml
deleted file mode 100644
index 36d779d..0000000
--- a/compass/deploy/ansible/roles/neutron-common/handlers/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: restart neutron-plugin-openvswitch-agent
- service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
- when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: restart neutron-l3-agent
- service: name=neutron-l3-agent state=restarted enabled=yes
-
-- name: restart neutron-dhcp-agent
- service: name=neutron-dhcp-agent state=restarted enabled=yes
-
-- name: restart neutron-metadata-agent
- service: name=neutron-metadata-agent state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/neutron-compute/defaults/main.yml b/compass/deploy/ansible/roles/neutron-compute/defaults/main.yml
deleted file mode 100644
index 825178b..0000000
--- a/compass/deploy/ansible/roles/neutron-compute/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-neutron_ovs_bridge_mappings: ""
diff --git a/compass/deploy/ansible/roles/neutron-compute/handlers/main.yml b/compass/deploy/ansible/roles/neutron-compute/handlers/main.yml
deleted file mode 100644
index 36d779d..0000000
--- a/compass/deploy/ansible/roles/neutron-compute/handlers/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: restart neutron-plugin-openvswitch-agent
- service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
- when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: restart neutron-l3-agent
- service: name=neutron-l3-agent state=restarted enabled=yes
-
-- name: restart neutron-dhcp-agent
- service: name=neutron-dhcp-agent state=restarted enabled=yes
-
-- name: restart neutron-metadata-agent
- service: name=neutron-metadata-agent state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/neutron-compute/tasks/main.yml b/compass/deploy/ansible/roles/neutron-compute/tasks/main.yml
deleted file mode 100644
index 93ee46f..0000000
--- a/compass/deploy/ansible/roles/neutron-compute/tasks/main.yml
+++ /dev/null
@@ -1,55 +0,0 @@
----
-
-- name: activate ipv4 forwarding
- sysctl: name=net.ipv4.ip_forward value=1
- state=present reload=yes
-
-- name: deactivate ipv4 rp filter
- sysctl: name=net.ipv4.conf.all.rp_filter value=0
- state=present reload=yes
-
-- name: deactivate ipv4 default rp filter
- sysctl: name=net.ipv4.conf.default.rp_filter
- value=0 state=present reload=yes
-
-- name: install compute-related neutron packages
- apt: name={{ item }} state=present force=yes
- with_items:
- - neutron-common
- - neutron-plugin-ml2
- - openvswitch-datapath-dkms
- - openvswitch-switch
-
-- name: generate neutron computer service list
- shell: echo {{ item }} >> /opt/service
- with_items:
- - neutron-plugin-openvswitch-agent
-
-- name: install neutron openvswitch agent
- apt: name=neutron-plugin-openvswitch-agent
- state=present force=yes
- when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: config neutron
- template: src=neutron-network.conf
- dest=/etc/neutron/neutron.conf backup=yes
- notify:
- - restart neutron-plugin-openvswitch-agent
-
-- name: config ml2 plugin
- template: src=ml2_conf.ini
- dest=/etc/neutron/plugins/ml2/ml2_conf.ini
- backup=yes
- notify:
- - restart neutron-plugin-openvswitch-agent
-
-- name: add br-int
- openvswitch_bridge: bridge=br-int state=present
- notify:
- - restart neutron-plugin-openvswitch-agent
- - restart nova-compute
-
-- include: ../../neutron-network/tasks/odl.yml
- when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- meta: flush_handlers
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini
deleted file mode 100644
index 19eb62e..0000000
--- a/compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini
+++ /dev/null
@@ -1,90 +0,0 @@
-[DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-# debug = False
-verbose = True
-
-# The DHCP agent will resync its state with Neutron to recover from any
-# transient notification or rpc errors. The interval is number of
-# seconds between attempts.
-resync_interval = 5
-
-# The DHCP agent requires an interface driver be set. Choose the one that best
-# matches your plugin.
-# interface_driver =
-
-# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
-# BigSwitch/Floodlight)
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-
-# Name of Open vSwitch bridge to use
-# ovs_integration_bridge = br-int
-
-# Use veth for an OVS interface or not.
-# Support kernels with limited namespace support
-# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
-ovs_use_veth = False
-
-# Example of interface_driver option for LinuxBridge
-# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
-
-# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
-# no additional setup of the DHCP server.
-dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
-
-# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
-# iproute2 package that supports namespaces).
-use_namespaces = True
-
-# The DHCP server can assist with providing metadata support on isolated
-# networks. Setting this value to True will cause the DHCP server to append
-# specific host routes to the DHCP request. The metadata service will only
-# be activated when the subnet does not contain any router port. The guest
-# instance must be configured to request host routes via DHCP (Option 121).
-enable_isolated_metadata = False
-
-# Allows for serving metadata requests coming from a dedicated metadata
-# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
-# is connected to a Neutron router from which the VMs send metadata
-# request. In this case DHCP Option 121 will not be injected in VMs, as
-# they will be able to reach 169.254.169.254 through a router.
-# This option requires enable_isolated_metadata = True
-enable_metadata_network = False
-
-# Number of threads to use during sync process. Should not exceed connection
-# pool size configured on server.
-# num_sync_threads = 4
-
-# Location to store DHCP server config files
-# dhcp_confs = $state_path/dhcp
-
-# Domain to use for building the hostnames
-dhcp_domain = openstacklocal
-
-# Override the default dnsmasq settings with this file
-# dnsmasq_config_file =
-dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
-
-# Comma-separated list of DNS servers which will be used by dnsmasq
-# as forwarders.
-# dnsmasq_dns_servers =
-
-# Limit number of leases to prevent a denial-of-service.
-dnsmasq_lease_max = 16777216
-
-# Location to DHCP lease relay UNIX domain socket
-# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
-
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
-
-# dhcp_delete_namespaces, which is false by default, can be set to True if
-# namespaces can be deleted cleanly on the host running the dhcp agent.
-# Do not enable this until you understand the problem with the Linux iproute
-# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
-# you are sure that your version of iproute does not suffer from the problem.
-# If True, namespaces will be deleted when a dhcp server is disabled.
-# dhcp_delete_namespaces = False
-
-# Timeout for ovs-vsctl commands.
-# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
-# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf
deleted file mode 100644
index 7bcbd9d..0000000
--- a/compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-dhcp-option-force=26,1454
-
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot
deleted file mode 100644
index 32caf96..0000000
--- a/compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot
+++ /dev/null
@@ -1,25 +0,0 @@
-interfaces {
- restore-original-config-on-shutdown: false
- interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
- description: "Internal pNodes interface"
- disable: false
- default-system-config
- }
-}
-
-protocols {
- igmp {
- disable: false
- interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
- vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
- disable: false
- version: 3
- }
- }
- traceoptions {
- flag all {
- disable: false
- }
- }
- }
-}
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini b/compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini
deleted file mode 100644
index b394c00..0000000
--- a/compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini
+++ /dev/null
@@ -1,81 +0,0 @@
-[DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-# debug = False
-verbose = True
-
-# L3 requires that an interface driver be set. Choose the one that best
-# matches your plugin.
-# interface_driver =
-
-# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
-# that supports L3 agent
-# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-
-# Use veth for an OVS interface or not.
-# Support kernels with limited namespace support
-# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
-# ovs_use_veth = False
-
-# Example of interface_driver option for LinuxBridge
-# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
-
-# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
-# iproute2 package that supports namespaces).
-use_namespaces = True
-
-# If use_namespaces is set as False then the agent can only configure one router.
-
-# This is done by setting the specific router_id.
-# router_id =
-
-# When external_network_bridge is set, each L3 agent can be associated
-# with no more than one external network. This value should be set to the UUID
-# of that external network. To allow L3 agent support multiple external
-# networks, both the external_network_bridge and gateway_external_network_id
-# must be left empty.
-# gateway_external_network_id =
-
-# Indicates that this L3 agent should also handle routers that do not have
-# an external network gateway configured. This option should be True only
-# for a single agent in a Neutron deployment, and may be False for all agents
-# if all routers must have an external network gateway
-handle_internal_only_routers = True
-
-# Name of bridge used for external network traffic. This should be set to
-# empty value for the linux bridge. when this parameter is set, each L3 agent
-# can be associated with no more than one external network.
-external_network_bridge = br-ex
-
-# TCP Port used by Neutron metadata server
-metadata_port = 9697
-
-# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
-# to disable this feature.
-send_arp_for_ha = 3
-
-# seconds between re-sync routers' data if needed
-periodic_interval = 40
-
-# seconds to start to sync routers' data after
-# starting agent
-periodic_fuzzy_delay = 5
-
-# enable_metadata_proxy, which is true by default, can be set to False
-# if the Nova metadata server is not available
-# enable_metadata_proxy = True
-
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
-
-# router_delete_namespaces, which is false by default, can be set to True if
-# namespaces can be deleted cleanly on the host running the L3 agent.
-# Do not enable this until you understand the problem with the Linux iproute
-# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
-# you are sure that your version of iproute does not suffer from the problem.
-# If True, namespaces will be deleted when a router is destroyed.
-# router_delete_namespaces = False
-
-# Timeout for ovs-vsctl commands.
-# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
-# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini b/compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini
deleted file mode 100644
index 6badf28..0000000
--- a/compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini
+++ /dev/null
@@ -1,46 +0,0 @@
-[DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-debug = True
-
-# The Neutron user information for accessing the Neutron API.
-auth_url = http://{{ HA_VIP }}:5000/v2.0
-auth_region = RegionOne
-# Turn off verification of the certificate for ssl
-# auth_insecure = False
-# Certificate Authority public key (CA cert) file for ssl
-# auth_ca_cert =
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-
-# Network service endpoint type to pull from the keystone catalog
-# endpoint_type = adminURL
-
-# IP address used by Nova metadata server
-nova_metadata_ip = {{ HA_VIP }}
-
-# TCP Port used by Nova metadata server
-nova_metadata_port = 8775
-
-# When proxying metadata requests, Neutron signs the Instance-ID header with a
-# shared secret to prevent spoofing. You may select any string for a secret,
-# but it must match here and in the configuration used by the Nova Metadata
-# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
-metadata_proxy_shared_secret = {{ METADATA_SECRET }}
-
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
-
-# Number of separate worker processes for metadata server
-# metadata_workers = 0
-
-# Number of backlog requests to configure the metadata server socket with
-# metadata_backlog = 128
-
-# URL to connect to the cache backend.
-# Example of URL using memory caching backend
-# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
-# default_ttl=0 parameter will cause cache entries to never expire.
-# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
-# No cache is used in case no value is passed.
-# cache_url =
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini b/compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini
deleted file mode 100644
index a790069..0000000
--- a/compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini
+++ /dev/null
@@ -1,108 +0,0 @@
-[ml2]
-# (ListOpt) List of network type driver entrypoints to be loaded from
-# the neutron.ml2.type_drivers namespace.
-#
-# type_drivers = local,flat,vlan,gre,vxlan
-# Example: type_drivers = flat,vlan,gre,vxlan
-type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
-
-# (ListOpt) Ordered list of network_types to allocate as tenant
-# networks. The default value 'local' is useful for single-box testing
-# but provides no connectivity between hosts.
-#
-# tenant_network_types = local
-# Example: tenant_network_types = vlan,gre,vxlan
-tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
-
-# (ListOpt) Ordered list of networking mechanism driver entrypoints
-# to be loaded from the neutron.ml2.mechanism_drivers namespace.
-# mechanism_drivers =
-# Example: mechanism_drivers = openvswitch,mlnx
-# Example: mechanism_drivers = arista
-# Example: mechanism_drivers = cisco,logger
-# Example: mechanism_drivers = openvswitch,brocade
-# Example: mechanism_drivers = linuxbridge,brocade
-mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
-
-[ml2_type_flat]
-# (ListOpt) List of physical_network names with which flat networks
-# can be created. Use * to allow flat networks with arbitrary
-# physical_network names.
-#
-flat_networks = external
-# Example:flat_networks = physnet1,physnet2
-# Example:flat_networks = *
-
-[ml2_type_vlan]
-# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
-# specifying physical_network names usable for VLAN provider and
-# tenant networks, as well as ranges of VLAN tags on each
-# physical_network available for allocation as tenant networks.
-#
-network_vlan_ranges =
-# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
-
-[ml2_type_gre]
-# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
-tunnel_id_ranges = 1:1000
-
-[ml2_type_vxlan]
-# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
-# ranges of VXLAN VNI IDs that are available for tenant network allocation.
-#
-vni_ranges = 1001:4095
-
-# (StrOpt) Multicast group for the VXLAN interface. When configured, will
-# enable sending all broadcast traffic to this multicast group. When left
-# unconfigured, will disable multicast VXLAN mode.
-#
-vxlan_group = 239.1.1.1
-# Example: vxlan_group = 239.1.1.1
-
-[securitygroup]
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-# enable_security_group = True
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-enable_security_group = True
-
-[database]
-connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
-
-[ovs]
-local_ip = {{ internal_ip }}
-{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
-integration_bridge = br-int
-tunnel_bridge = br-tun
-tunnel_id_ranges = 1001:4095
-tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
-{% endif %}
-
-[agent]
-root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
-tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
-vxlan_udp_port = 4789
-{% endif %}
-l2_population = False
-
-[odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-network_vlan_ranges = 1001:4095
-tunnel_id_ranges = 1001:4095
-tun_peer_patch_port = patch-int
-int_peer_patch_port = patch-tun
-tenant_network_type = vxlan
-tunnel_bridge = br-tun
-integration_bridge = br-int
-controllers = 10.1.0.15:8080:admin:admin
-{% endif %}
-
-[ml2_odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-username = {{ odl_username }}
-password = {{ odl_password }}
-url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
-{% endif %}
-
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf b/compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf
deleted file mode 100644
index 93be9cb..0000000
--- a/compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf
+++ /dev/null
@@ -1,465 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ DEBUG }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ HA_VIP }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = RegionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf b/compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf
deleted file mode 100644
index 1575367..0000000
--- a/compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf
+++ /dev/null
@@ -1,466 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ VERBOSE }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ HA_VIP }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = RegionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
-
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh b/compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh
deleted file mode 100644
index b92e202..0000000
--- a/compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
-
-# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
-
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/nova.conf b/compass/deploy/ansible/roles/neutron-compute/templates/nova.conf
deleted file mode 100644
index 4988cb0..0000000
--- a/compass/deploy/ansible/roles/neutron-compute/templates/nova.conf
+++ /dev/null
@@ -1,73 +0,0 @@
-[DEFAULT]
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/var/lock/nova
-force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
-enabled_apis=ec2,osapi_compute,metadata
-
-vif_plugging_is_fatal: false
-vif_plugging_timeout: 0
-
-auth_strategy = keystone
-
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
-
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-
-[database]
-# The SQLAlchemy connection string used to connect to the database
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-
-[conductor]
-manager = nova.conductor.manager.ConductorManager
-topic = conductor
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-
-[glance]
-host = {{ HA_VIP }}
-
-[neutron]
-url = http://{{ HA_VIP }}:9696
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/roles/neutron-controller/handlers/main.yml b/compass/deploy/ansible/roles/neutron-controller/handlers/main.yml
deleted file mode 100644
index b4c1585..0000000
--- a/compass/deploy/ansible/roles/neutron-controller/handlers/main.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: restart nova-api
- service: name=nova-api state=restarted enabled=yes
-
-- name: restart nova-cert
- service: name=nova-cert state=restarted enabled=yes
-
-- name: restart nova-consoleauth
- service: name=nova-consoleauth state=restarted enabled=yes
-
-- name: restart nova-scheduler
- service: name=nova-scheduler state=restarted enabled=yes
-
-- name: restart nova-conductor
- service: name=nova-conductor state=restarted enabled=yes
-
-- name: restart nova-novncproxy
- service: name=nova-novncproxy state=restarted enabled=yes
-
-- name: remove nova-sqlite-db
- shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed
-
-- name: restart neutron-server
- service: name=neutron-server state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/neutron-controller/tasks/main.yml b/compass/deploy/ansible/roles/neutron-controller/tasks/main.yml
deleted file mode 100644
index 9c04d74..0000000
--- a/compass/deploy/ansible/roles/neutron-controller/tasks/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- include: neutron_install.yml
- tags:
- - install
- - neutron_install
- - neutron
-
-- include: neutron_config.yml
- when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
- tags:
- - config
- - neutron_config
- - neutron
diff --git a/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml b/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml
deleted file mode 100644
index 77cc29a..0000000
--- a/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: neutron-db-manage upgrade to Juno
- shell: neutron-db-manage --config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini upgrade head
- register: result
- until: result.rc == 0
- retries: 5
- delay: 3
- notify:
- - restart neutron-server
-
diff --git a/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml b/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml
deleted file mode 100644
index 6165299..0000000
--- a/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-- name: install controller-related neutron packages
- apt: name={{ item }} state=present force=yes
- with_items:
- - neutron-server
- - neutron-plugin-ml2
-
-- name: generate neutron controll service list
- shell: echo {{ item }} >> /opt/service
- with_items:
- - neutron-server
- - neutron-plugin-ml2
-
-- name: get tenant id to fill neutron.conf
- shell: keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-get service | grep id | awk '{print $4}'
- register: NOVA_ADMIN_TENANT_ID
-
-- name: update neutron conf
- template: src=neutron.conf dest=/etc/neutron/neutron.conf backup=yes
- notify:
- - restart neutron-server
-
-- name: update ml2 plugin conf
- template: src=ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes
- notify:
- - restart neutron-server
-
-- meta: flush_handlers
-
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini
deleted file mode 100644
index 19eb62e..0000000
--- a/compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini
+++ /dev/null
@@ -1,90 +0,0 @@
-[DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-# debug = False
-verbose = True
-
-# The DHCP agent will resync its state with Neutron to recover from any
-# transient notification or rpc errors. The interval is number of
-# seconds between attempts.
-resync_interval = 5
-
-# The DHCP agent requires an interface driver be set. Choose the one that best
-# matches your plugin.
-# interface_driver =
-
-# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
-# BigSwitch/Floodlight)
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-
-# Name of Open vSwitch bridge to use
-# ovs_integration_bridge = br-int
-
-# Use veth for an OVS interface or not.
-# Support kernels with limited namespace support
-# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
-ovs_use_veth = False
-
-# Example of interface_driver option for LinuxBridge
-# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
-
-# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
-# no additional setup of the DHCP server.
-dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
-
-# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
-# iproute2 package that supports namespaces).
-use_namespaces = True
-
-# The DHCP server can assist with providing metadata support on isolated
-# networks. Setting this value to True will cause the DHCP server to append
-# specific host routes to the DHCP request. The metadata service will only
-# be activated when the subnet does not contain any router port. The guest
-# instance must be configured to request host routes via DHCP (Option 121).
-enable_isolated_metadata = False
-
-# Allows for serving metadata requests coming from a dedicated metadata
-# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
-# is connected to a Neutron router from which the VMs send metadata
-# request. In this case DHCP Option 121 will not be injected in VMs, as
-# they will be able to reach 169.254.169.254 through a router.
-# This option requires enable_isolated_metadata = True
-enable_metadata_network = False
-
-# Number of threads to use during sync process. Should not exceed connection
-# pool size configured on server.
-# num_sync_threads = 4
-
-# Location to store DHCP server config files
-# dhcp_confs = $state_path/dhcp
-
-# Domain to use for building the hostnames
-dhcp_domain = openstacklocal
-
-# Override the default dnsmasq settings with this file
-# dnsmasq_config_file =
-dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
-
-# Comma-separated list of DNS servers which will be used by dnsmasq
-# as forwarders.
-# dnsmasq_dns_servers =
-
-# Limit number of leases to prevent a denial-of-service.
-dnsmasq_lease_max = 16777216
-
-# Location to DHCP lease relay UNIX domain socket
-# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
-
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
-
-# dhcp_delete_namespaces, which is false by default, can be set to True if
-# namespaces can be deleted cleanly on the host running the dhcp agent.
-# Do not enable this until you understand the problem with the Linux iproute
-# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
-# you are sure that your version of iproute does not suffer from the problem.
-# If True, namespaces will be deleted when a dhcp server is disabled.
-# dhcp_delete_namespaces = False
-
-# Timeout for ovs-vsctl commands.
-# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
-# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf
deleted file mode 100644
index 7bcbd9d..0000000
--- a/compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-dhcp-option-force=26,1454
-
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot
deleted file mode 100644
index 32caf96..0000000
--- a/compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot
+++ /dev/null
@@ -1,25 +0,0 @@
-interfaces {
- restore-original-config-on-shutdown: false
- interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
- description: "Internal pNodes interface"
- disable: false
- default-system-config
- }
-}
-
-protocols {
- igmp {
- disable: false
- interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
- vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
- disable: false
- version: 3
- }
- }
- traceoptions {
- flag all {
- disable: false
- }
- }
- }
-}
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini b/compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini
deleted file mode 100644
index b394c00..0000000
--- a/compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini
+++ /dev/null
@@ -1,81 +0,0 @@
-[DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-# debug = False
-verbose = True
-
-# L3 requires that an interface driver be set. Choose the one that best
-# matches your plugin.
-# interface_driver =
-
-# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
-# that supports L3 agent
-# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-
-# Use veth for an OVS interface or not.
-# Support kernels with limited namespace support
-# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
-# ovs_use_veth = False
-
-# Example of interface_driver option for LinuxBridge
-# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
-
-# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
-# iproute2 package that supports namespaces).
-use_namespaces = True
-
-# If use_namespaces is set as False then the agent can only configure one router.
-
-# This is done by setting the specific router_id.
-# router_id =
-
-# When external_network_bridge is set, each L3 agent can be associated
-# with no more than one external network. This value should be set to the UUID
-# of that external network. To allow L3 agent support multiple external
-# networks, both the external_network_bridge and gateway_external_network_id
-# must be left empty.
-# gateway_external_network_id =
-
-# Indicates that this L3 agent should also handle routers that do not have
-# an external network gateway configured. This option should be True only
-# for a single agent in a Neutron deployment, and may be False for all agents
-# if all routers must have an external network gateway
-handle_internal_only_routers = True
-
-# Name of bridge used for external network traffic. This should be set to
-# empty value for the linux bridge. when this parameter is set, each L3 agent
-# can be associated with no more than one external network.
-external_network_bridge = br-ex
-
-# TCP Port used by Neutron metadata server
-metadata_port = 9697
-
-# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
-# to disable this feature.
-send_arp_for_ha = 3
-
-# seconds between re-sync routers' data if needed
-periodic_interval = 40
-
-# seconds to start to sync routers' data after
-# starting agent
-periodic_fuzzy_delay = 5
-
-# enable_metadata_proxy, which is true by default, can be set to False
-# if the Nova metadata server is not available
-# enable_metadata_proxy = True
-
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
-
-# router_delete_namespaces, which is false by default, can be set to True if
-# namespaces can be deleted cleanly on the host running the L3 agent.
-# Do not enable this until you understand the problem with the Linux iproute
-# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
-# you are sure that your version of iproute does not suffer from the problem.
-# If True, namespaces will be deleted when a router is destroyed.
-# router_delete_namespaces = False
-
-# Timeout for ovs-vsctl commands.
-# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
-# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini b/compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini
deleted file mode 100644
index 6badf28..0000000
--- a/compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini
+++ /dev/null
@@ -1,46 +0,0 @@
-[DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-debug = True
-
-# The Neutron user information for accessing the Neutron API.
-auth_url = http://{{ HA_VIP }}:5000/v2.0
-auth_region = RegionOne
-# Turn off verification of the certificate for ssl
-# auth_insecure = False
-# Certificate Authority public key (CA cert) file for ssl
-# auth_ca_cert =
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-
-# Network service endpoint type to pull from the keystone catalog
-# endpoint_type = adminURL
-
-# IP address used by Nova metadata server
-nova_metadata_ip = {{ HA_VIP }}
-
-# TCP Port used by Nova metadata server
-nova_metadata_port = 8775
-
-# When proxying metadata requests, Neutron signs the Instance-ID header with a
-# shared secret to prevent spoofing. You may select any string for a secret,
-# but it must match here and in the configuration used by the Nova Metadata
-# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
-metadata_proxy_shared_secret = {{ METADATA_SECRET }}
-
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
-
-# Number of separate worker processes for metadata server
-# metadata_workers = 0
-
-# Number of backlog requests to configure the metadata server socket with
-# metadata_backlog = 128
-
-# URL to connect to the cache backend.
-# Example of URL using memory caching backend
-# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
-# default_ttl=0 parameter will cause cache entries to never expire.
-# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
-# No cache is used in case no value is passed.
-# cache_url =
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini b/compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini
deleted file mode 100644
index a790069..0000000
--- a/compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini
+++ /dev/null
@@ -1,108 +0,0 @@
-[ml2]
-# (ListOpt) List of network type driver entrypoints to be loaded from
-# the neutron.ml2.type_drivers namespace.
-#
-# type_drivers = local,flat,vlan,gre,vxlan
-# Example: type_drivers = flat,vlan,gre,vxlan
-type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
-
-# (ListOpt) Ordered list of network_types to allocate as tenant
-# networks. The default value 'local' is useful for single-box testing
-# but provides no connectivity between hosts.
-#
-# tenant_network_types = local
-# Example: tenant_network_types = vlan,gre,vxlan
-tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
-
-# (ListOpt) Ordered list of networking mechanism driver entrypoints
-# to be loaded from the neutron.ml2.mechanism_drivers namespace.
-# mechanism_drivers =
-# Example: mechanism_drivers = openvswitch,mlnx
-# Example: mechanism_drivers = arista
-# Example: mechanism_drivers = cisco,logger
-# Example: mechanism_drivers = openvswitch,brocade
-# Example: mechanism_drivers = linuxbridge,brocade
-mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
-
-[ml2_type_flat]
-# (ListOpt) List of physical_network names with which flat networks
-# can be created. Use * to allow flat networks with arbitrary
-# physical_network names.
-#
-flat_networks = external
-# Example:flat_networks = physnet1,physnet2
-# Example:flat_networks = *
-
-[ml2_type_vlan]
-# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
-# specifying physical_network names usable for VLAN provider and
-# tenant networks, as well as ranges of VLAN tags on each
-# physical_network available for allocation as tenant networks.
-#
-network_vlan_ranges =
-# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
-
-[ml2_type_gre]
-# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
-tunnel_id_ranges = 1:1000
-
-[ml2_type_vxlan]
-# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
-# ranges of VXLAN VNI IDs that are available for tenant network allocation.
-#
-vni_ranges = 1001:4095
-
-# (StrOpt) Multicast group for the VXLAN interface. When configured, will
-# enable sending all broadcast traffic to this multicast group. When left
-# unconfigured, will disable multicast VXLAN mode.
-#
-vxlan_group = 239.1.1.1
-# Example: vxlan_group = 239.1.1.1
-
-[securitygroup]
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-# enable_security_group = True
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-enable_security_group = True
-
-[database]
-connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
-
-[ovs]
-local_ip = {{ internal_ip }}
-{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
-integration_bridge = br-int
-tunnel_bridge = br-tun
-tunnel_id_ranges = 1001:4095
-tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
-{% endif %}
-
-[agent]
-root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
-tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
-vxlan_udp_port = 4789
-{% endif %}
-l2_population = False
-
-[odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-network_vlan_ranges = 1001:4095
-tunnel_id_ranges = 1001:4095
-tun_peer_patch_port = patch-int
-int_peer_patch_port = patch-tun
-tenant_network_type = vxlan
-tunnel_bridge = br-tun
-integration_bridge = br-int
-controllers = 10.1.0.15:8080:admin:admin
-{% endif %}
-
-[ml2_odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-username = {{ odl_username }}
-password = {{ odl_password }}
-url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
-{% endif %}
-
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf b/compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf
deleted file mode 100644
index 93be9cb..0000000
--- a/compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf
+++ /dev/null
@@ -1,465 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ DEBUG }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ HA_VIP }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = RegionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf b/compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf
deleted file mode 100644
index 2a66e94..0000000
--- a/compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf
+++ /dev/null
@@ -1,466 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ VERBOSE }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ HA_VIP }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = RegionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
-
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh b/compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh
deleted file mode 100644
index b92e202..0000000
--- a/compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
-
-# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
-
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/nova.conf b/compass/deploy/ansible/roles/neutron-controller/templates/nova.conf
deleted file mode 100644
index 9587073..0000000
--- a/compass/deploy/ansible/roles/neutron-controller/templates/nova.conf
+++ /dev/null
@@ -1,69 +0,0 @@
-[DEFAULT]
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/var/lock/nova
-force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
-enabled_apis=ec2,osapi_compute,metadata
-
-vif_plugging_is_fatal: false
-vif_plugging_timeout: 0
-
-auth_strategy = keystone
-
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
-
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-
-[database]
-# The SQLAlchemy connection string used to connect to the database
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-
-[glance]
-host = {{ HA_VIP }}
-
-[neutron]
-url = http://{{ HA_VIP }}:9696
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/roles/neutron-network/handlers/main.yml b/compass/deploy/ansible/roles/neutron-network/handlers/main.yml
deleted file mode 100644
index d6c5cc8..0000000
--- a/compass/deploy/ansible/roles/neutron-network/handlers/main.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: restart neutron-plugin-openvswitch-agent
- service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
- when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: restart neutron-l3-agent
- service: name=neutron-l3-agent state=restarted enabled=yes
-
-- name: kill dnsmasq
- command: killall dnsmasq
- ignore_errors: True
-
-- name: restart neutron-dhcp-agent
- service: name=neutron-dhcp-agent state=restarted enabled=yes
-
-- name: restart neutron-metadata-agent
- service: name=neutron-metadata-agent state=restarted enabled=yes
-
-- name: restart xorp
- service: name=xorp state=restarted enabled=yes sleep=10
- ignore_errors: True
diff --git a/compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml b/compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml
deleted file mode 100644
index d6f38a0..0000000
--- a/compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Install XORP to provide IGMP router functionality
- apt: pkg=xorp
-
-- name: configure xorp
- template: src=etc/xorp/config.boot dest=/etc/xorp/config.boot
- notify:
- - restart xorp
-
-- name: set xorp defaults
- lineinfile: dest=/etc/default/xorp regexp=^RUN= line=RUN=yes
- notify:
- - restart xorp
-
-- meta: flush_handlers
-
-- name: start and enable xorp service
- service: name=xorp state=started enabled=yes
- retries: 2
- delay: 10
diff --git a/compass/deploy/ansible/roles/neutron-network/tasks/main.yml b/compass/deploy/ansible/roles/neutron-network/tasks/main.yml
deleted file mode 100644
index 1d4b591..0000000
--- a/compass/deploy/ansible/roles/neutron-network/tasks/main.yml
+++ /dev/null
@@ -1,114 +0,0 @@
----
-- name: activate ipv4 forwarding
- sysctl: name=net.ipv4.ip_forward value=1
- state=present reload=yes
-
-- name: deactivate ipv4 rp filter
- sysctl: name=net.ipv4.conf.all.rp_filter value=0
- state=present reload=yes
-
-- name: deactivate ipv4 default rp filter
- sysctl: name=net.ipv4.conf.default.rp_filter
- value=0 state=present reload=yes
-
-- name: install neutron network related packages
- apt: name={{ item }} state=present force=yes
- with_items:
- - neutron-plugin-ml2
- - openvswitch-datapath-dkms
- - openvswitch-switch
- - neutron-l3-agent
- - neutron-dhcp-agent
-
-- name: generate neutron service list
- shell: echo {{ item }} >> /opt/service
- with_items:
- - openvswitch-switch
- - neutron-l3-agent
- - neutron-dhcp-agent
- - neutron-plugin-openvswitch-agent
- - neutron-metadata-agent
- - xorp
-
-- name: install neutron openvswitch agent
- apt: name=neutron-plugin-openvswitch-agent
- state=present force=yes
- when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: config neutron
- template: src=neutron-network.conf
- dest=/etc/neutron/neutron.conf backup=yes
- notify:
- - restart neutron-plugin-openvswitch-agent
- - restart neutron-l3-agent
- - kill dnsmasq
- - restart neutron-dhcp-agent
- - restart neutron-metadata-agent
-
-- name: config l3 agent
- template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini
- backup=yes
- notify:
- - restart neutron-l3-agent
-
-- name: config dhcp agent
- template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini
- backup=yes
- notify:
- - kill dnsmasq
- - restart neutron-dhcp-agent
-
-- name: update dnsmasq-neutron.conf
- template: src=dnsmasq-neutron.conf
- dest=/etc/neutron/dnsmasq-neutron.conf
- notify:
- - kill dnsmasq
- - restart neutron-dhcp-agent
-
-- name: config metadata agent
- template: src=metadata_agent.ini
- dest=/etc/neutron/metadata_agent.ini backup=yes
- notify:
- - restart neutron-metadata-agent
-
-- name: config ml2 plugin
- template: src=ml2_conf.ini
- dest=/etc/neutron/plugins/ml2/ml2_conf.ini
- backup=yes
- notify:
- - restart neutron-plugin-openvswitch-agent
-
-- meta: flush_handlers
-
-- name: add br-int
- openvswitch_bridge: bridge=br-int state=present
-
-- name: add br-ex
- openvswitch_bridge: bridge=br-ex state=present
- when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: assign a port to br-ex for physical ext interface
- openvswitch_port: bridge=br-ex port={{ INTERFACE_NAME }}
- state=present
- when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- include: igmp-router.yml
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-
-- name: assert kernel support for vxlan
- command: modinfo -F version vxlan
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-
-- name: assert iproute2 suppport for vxlan
- command: ip link add type vxlan help
- register: iproute_out
- failed_when: iproute_out.rc == 255
- when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-
-- include: odl.yml
- when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
-- name: restart ovs service
- service: name=openvswitch-switch state=restarted enabled=yes
-
-- meta: flush_handlers
diff --git a/compass/deploy/ansible/roles/neutron-network/tasks/odl.yml b/compass/deploy/ansible/roles/neutron-network/tasks/odl.yml
deleted file mode 100644
index a2b449c..0000000
--- a/compass/deploy/ansible/roles/neutron-network/tasks/odl.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: ovs set manager
- command: ovs-vsctl set-manager tcp:{{ controller }}:6640
-
-- name: get ovs uuid
- shell: ovs-vsctl get Open_vSwitch . _uuid
- register: ovs_uuid
-
-- name: set bridge_mappings
- command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:bridge_mappings=physnet1:{{ INTERFACE_NAME }}
-
-- name: set local ip
- command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:local_ip={{ internal_ip }}
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini
deleted file mode 100644
index 19eb62e..0000000
--- a/compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini
+++ /dev/null
@@ -1,90 +0,0 @@
-[DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-# debug = False
-verbose = True
-
-# The DHCP agent will resync its state with Neutron to recover from any
-# transient notification or rpc errors. The interval is number of
-# seconds between attempts.
-resync_interval = 5
-
-# The DHCP agent requires an interface driver be set. Choose the one that best
-# matches your plugin.
-# interface_driver =
-
-# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
-# BigSwitch/Floodlight)
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-
-# Name of Open vSwitch bridge to use
-# ovs_integration_bridge = br-int
-
-# Use veth for an OVS interface or not.
-# Support kernels with limited namespace support
-# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
-ovs_use_veth = False
-
-# Example of interface_driver option for LinuxBridge
-# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
-
-# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
-# no additional setup of the DHCP server.
-dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
-
-# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
-# iproute2 package that supports namespaces).
-use_namespaces = True
-
-# The DHCP server can assist with providing metadata support on isolated
-# networks. Setting this value to True will cause the DHCP server to append
-# specific host routes to the DHCP request. The metadata service will only
-# be activated when the subnet does not contain any router port. The guest
-# instance must be configured to request host routes via DHCP (Option 121).
-enable_isolated_metadata = False
-
-# Allows for serving metadata requests coming from a dedicated metadata
-# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
-# is connected to a Neutron router from which the VMs send metadata
-# request. In this case DHCP Option 121 will not be injected in VMs, as
-# they will be able to reach 169.254.169.254 through a router.
-# This option requires enable_isolated_metadata = True
-enable_metadata_network = False
-
-# Number of threads to use during sync process. Should not exceed connection
-# pool size configured on server.
-# num_sync_threads = 4
-
-# Location to store DHCP server config files
-# dhcp_confs = $state_path/dhcp
-
-# Domain to use for building the hostnames
-dhcp_domain = openstacklocal
-
-# Override the default dnsmasq settings with this file
-# dnsmasq_config_file =
-dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
-
-# Comma-separated list of DNS servers which will be used by dnsmasq
-# as forwarders.
-# dnsmasq_dns_servers =
-
-# Limit number of leases to prevent a denial-of-service.
-dnsmasq_lease_max = 16777216
-
-# Location to DHCP lease relay UNIX domain socket
-# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
-
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
-
-# dhcp_delete_namespaces, which is false by default, can be set to True if
-# namespaces can be deleted cleanly on the host running the dhcp agent.
-# Do not enable this until you understand the problem with the Linux iproute
-# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
-# you are sure that your version of iproute does not suffer from the problem.
-# If True, namespaces will be deleted when a dhcp server is disabled.
-# dhcp_delete_namespaces = False
-
-# Timeout for ovs-vsctl commands.
-# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
-# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf
deleted file mode 100644
index 7bcbd9d..0000000
--- a/compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-dhcp-option-force=26,1454
-
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot
deleted file mode 100644
index 32caf96..0000000
--- a/compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot
+++ /dev/null
@@ -1,25 +0,0 @@
-interfaces {
- restore-original-config-on-shutdown: false
- interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
- description: "Internal pNodes interface"
- disable: false
- default-system-config
- }
-}
-
-protocols {
- igmp {
- disable: false
- interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
- vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
- disable: false
- version: 3
- }
- }
- traceoptions {
- flag all {
- disable: false
- }
- }
- }
-}
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini b/compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini
deleted file mode 100644
index b394c00..0000000
--- a/compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini
+++ /dev/null
@@ -1,81 +0,0 @@
-[DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-# debug = False
-verbose = True
-
-# L3 requires that an interface driver be set. Choose the one that best
-# matches your plugin.
-# interface_driver =
-
-# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
-# that supports L3 agent
-# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-
-# Use veth for an OVS interface or not.
-# Support kernels with limited namespace support
-# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
-# ovs_use_veth = False
-
-# Example of interface_driver option for LinuxBridge
-# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
-
-# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
-# iproute2 package that supports namespaces).
-use_namespaces = True
-
-# If use_namespaces is set as False then the agent can only configure one router.
-
-# This is done by setting the specific router_id.
-# router_id =
-
-# When external_network_bridge is set, each L3 agent can be associated
-# with no more than one external network. This value should be set to the UUID
-# of that external network. To allow L3 agent support multiple external
-# networks, both the external_network_bridge and gateway_external_network_id
-# must be left empty.
-# gateway_external_network_id =
-
-# Indicates that this L3 agent should also handle routers that do not have
-# an external network gateway configured. This option should be True only
-# for a single agent in a Neutron deployment, and may be False for all agents
-# if all routers must have an external network gateway
-handle_internal_only_routers = True
-
-# Name of bridge used for external network traffic. This should be set to
-# empty value for the linux bridge. when this parameter is set, each L3 agent
-# can be associated with no more than one external network.
-external_network_bridge = br-ex
-
-# TCP Port used by Neutron metadata server
-metadata_port = 9697
-
-# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
-# to disable this feature.
-send_arp_for_ha = 3
-
-# seconds between re-sync routers' data if needed
-periodic_interval = 40
-
-# seconds to start to sync routers' data after
-# starting agent
-periodic_fuzzy_delay = 5
-
-# enable_metadata_proxy, which is true by default, can be set to False
-# if the Nova metadata server is not available
-# enable_metadata_proxy = True
-
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
-
-# router_delete_namespaces, which is false by default, can be set to True if
-# namespaces can be deleted cleanly on the host running the L3 agent.
-# Do not enable this until you understand the problem with the Linux iproute
-# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
-# you are sure that your version of iproute does not suffer from the problem.
-# If True, namespaces will be deleted when a router is destroyed.
-# router_delete_namespaces = False
-
-# Timeout for ovs-vsctl commands.
-# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
-# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini b/compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini
deleted file mode 100644
index 6badf28..0000000
--- a/compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini
+++ /dev/null
@@ -1,46 +0,0 @@
-[DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-debug = True
-
-# The Neutron user information for accessing the Neutron API.
-auth_url = http://{{ HA_VIP }}:5000/v2.0
-auth_region = RegionOne
-# Turn off verification of the certificate for ssl
-# auth_insecure = False
-# Certificate Authority public key (CA cert) file for ssl
-# auth_ca_cert =
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-
-# Network service endpoint type to pull from the keystone catalog
-# endpoint_type = adminURL
-
-# IP address used by Nova metadata server
-nova_metadata_ip = {{ HA_VIP }}
-
-# TCP Port used by Nova metadata server
-nova_metadata_port = 8775
-
-# When proxying metadata requests, Neutron signs the Instance-ID header with a
-# shared secret to prevent spoofing. You may select any string for a secret,
-# but it must match here and in the configuration used by the Nova Metadata
-# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
-metadata_proxy_shared_secret = {{ METADATA_SECRET }}
-
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
-
-# Number of separate worker processes for metadata server
-# metadata_workers = 0
-
-# Number of backlog requests to configure the metadata server socket with
-# metadata_backlog = 128
-
-# URL to connect to the cache backend.
-# Example of URL using memory caching backend
-# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
-# default_ttl=0 parameter will cause cache entries to never expire.
-# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
-# No cache is used in case no value is passed.
-# cache_url =
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini b/compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini
deleted file mode 100644
index a790069..0000000
--- a/compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini
+++ /dev/null
@@ -1,108 +0,0 @@
-[ml2]
-# (ListOpt) List of network type driver entrypoints to be loaded from
-# the neutron.ml2.type_drivers namespace.
-#
-# type_drivers = local,flat,vlan,gre,vxlan
-# Example: type_drivers = flat,vlan,gre,vxlan
-type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
-
-# (ListOpt) Ordered list of network_types to allocate as tenant
-# networks. The default value 'local' is useful for single-box testing
-# but provides no connectivity between hosts.
-#
-# tenant_network_types = local
-# Example: tenant_network_types = vlan,gre,vxlan
-tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
-
-# (ListOpt) Ordered list of networking mechanism driver entrypoints
-# to be loaded from the neutron.ml2.mechanism_drivers namespace.
-# mechanism_drivers =
-# Example: mechanism_drivers = openvswitch,mlnx
-# Example: mechanism_drivers = arista
-# Example: mechanism_drivers = cisco,logger
-# Example: mechanism_drivers = openvswitch,brocade
-# Example: mechanism_drivers = linuxbridge,brocade
-mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
-
-[ml2_type_flat]
-# (ListOpt) List of physical_network names with which flat networks
-# can be created. Use * to allow flat networks with arbitrary
-# physical_network names.
-#
-flat_networks = external
-# Example:flat_networks = physnet1,physnet2
-# Example:flat_networks = *
-
-[ml2_type_vlan]
-# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
-# specifying physical_network names usable for VLAN provider and
-# tenant networks, as well as ranges of VLAN tags on each
-# physical_network available for allocation as tenant networks.
-#
-network_vlan_ranges =
-# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
-
-[ml2_type_gre]
-# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
-tunnel_id_ranges = 1:1000
-
-[ml2_type_vxlan]
-# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
-# ranges of VXLAN VNI IDs that are available for tenant network allocation.
-#
-vni_ranges = 1001:4095
-
-# (StrOpt) Multicast group for the VXLAN interface. When configured, will
-# enable sending all broadcast traffic to this multicast group. When left
-# unconfigured, will disable multicast VXLAN mode.
-#
-vxlan_group = 239.1.1.1
-# Example: vxlan_group = 239.1.1.1
-
-[securitygroup]
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-# enable_security_group = True
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-enable_security_group = True
-
-[database]
-connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
-
-[ovs]
-local_ip = {{ internal_ip }}
-{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
-integration_bridge = br-int
-tunnel_bridge = br-tun
-tunnel_id_ranges = 1001:4095
-tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
-{% endif %}
-
-[agent]
-root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
-tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
-vxlan_udp_port = 4789
-{% endif %}
-l2_population = False
-
-[odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-network_vlan_ranges = 1001:4095
-tunnel_id_ranges = 1001:4095
-tun_peer_patch_port = patch-int
-int_peer_patch_port = patch-tun
-tenant_network_type = vxlan
-tunnel_bridge = br-tun
-integration_bridge = br-int
-controllers = 10.1.0.15:8080:admin:admin
-{% endif %}
-
-[ml2_odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-username = {{ odl_username }}
-password = {{ odl_password }}
-url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
-{% endif %}
-
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf b/compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf
deleted file mode 100644
index 93be9cb..0000000
--- a/compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf
+++ /dev/null
@@ -1,465 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ DEBUG }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ HA_VIP }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = RegionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/neutron.conf b/compass/deploy/ansible/roles/neutron-network/templates/neutron.conf
deleted file mode 100644
index 1575367..0000000
--- a/compass/deploy/ansible/roles/neutron-network/templates/neutron.conf
+++ /dev/null
@@ -1,466 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ VERBOSE }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ HA_VIP }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = RegionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
-
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh b/compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh
deleted file mode 100644
index b92e202..0000000
--- a/compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
-
-# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
-
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/nova.conf b/compass/deploy/ansible/roles/neutron-network/templates/nova.conf
deleted file mode 100644
index 9587073..0000000
--- a/compass/deploy/ansible/roles/neutron-network/templates/nova.conf
+++ /dev/null
@@ -1,69 +0,0 @@
-[DEFAULT]
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/var/lock/nova
-force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
-enabled_apis=ec2,osapi_compute,metadata
-
-vif_plugging_is_fatal: false
-vif_plugging_timeout: 0
-
-auth_strategy = keystone
-
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
-
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-
-[database]
-# The SQLAlchemy connection string used to connect to the database
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-
-[glance]
-host = {{ HA_VIP }}
-
-[neutron]
-url = http://{{ HA_VIP }}:9696
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/roles/nova-compute/handlers/main.yml b/compass/deploy/ansible/roles/nova-compute/handlers/main.yml
deleted file mode 100644
index c135003..0000000
--- a/compass/deploy/ansible/roles/nova-compute/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: restart nova-compute
- service: name=nova-compute state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/nova-compute/tasks/main.yml b/compass/deploy/ansible/roles/nova-compute/tasks/main.yml
deleted file mode 100644
index 51c8dfa..0000000
--- a/compass/deploy/ansible/roles/nova-compute/tasks/main.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: install nova-compute related packages
- apt: name=nova-compute-kvm state=present force=yes
-
-- name: update nova-compute conf
- template: src={{ item }} dest=/etc/nova/{{ item }}
- with_items:
- - nova.conf
- - nova-compute.conf
- notify:
- - restart nova-compute
-
-- name: generate neutron controll service list
- shell: echo {{ item }} >> /opt/service
- with_items:
- - nova-compute
-
-- meta: flush_handlers
-
-- name: remove nova sqlite db
- shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed
diff --git a/compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf b/compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf
deleted file mode 100644
index 401dee7..0000000
--- a/compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-[DEFAULT]
-compute_driver=libvirt.LibvirtDriver
-force_raw_images = true
-[libvirt]
-virt_type=qemu
-images_type = raw
-mem_stats_period_seconds=0
diff --git a/compass/deploy/ansible/roles/nova-compute/templates/nova.conf b/compass/deploy/ansible/roles/nova-compute/templates/nova.conf
deleted file mode 100644
index 4988cb0..0000000
--- a/compass/deploy/ansible/roles/nova-compute/templates/nova.conf
+++ /dev/null
@@ -1,73 +0,0 @@
-[DEFAULT]
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/var/lock/nova
-force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
-enabled_apis=ec2,osapi_compute,metadata
-
-vif_plugging_is_fatal: false
-vif_plugging_timeout: 0
-
-auth_strategy = keystone
-
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
-
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-
-[database]
-# The SQLAlchemy connection string used to connect to the database
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-
-[conductor]
-manager = nova.conductor.manager.ConductorManager
-topic = conductor
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-
-[glance]
-host = {{ HA_VIP }}
-
-[neutron]
-url = http://{{ HA_VIP }}:9696
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/roles/nova-controller/handlers/main.yml b/compass/deploy/ansible/roles/nova-controller/handlers/main.yml
deleted file mode 100644
index b4c1585..0000000
--- a/compass/deploy/ansible/roles/nova-controller/handlers/main.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: restart nova-api
- service: name=nova-api state=restarted enabled=yes
-
-- name: restart nova-cert
- service: name=nova-cert state=restarted enabled=yes
-
-- name: restart nova-consoleauth
- service: name=nova-consoleauth state=restarted enabled=yes
-
-- name: restart nova-scheduler
- service: name=nova-scheduler state=restarted enabled=yes
-
-- name: restart nova-conductor
- service: name=nova-conductor state=restarted enabled=yes
-
-- name: restart nova-novncproxy
- service: name=nova-novncproxy state=restarted enabled=yes
-
-- name: remove nova-sqlite-db
- shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed
-
-- name: restart neutron-server
- service: name=neutron-server state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/nova-controller/tasks/main.yml b/compass/deploy/ansible/roles/nova-controller/tasks/main.yml
deleted file mode 100644
index 72a9f4d..0000000
--- a/compass/deploy/ansible/roles/nova-controller/tasks/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- include: nova_install.yml
- tags:
- - install
- - nova_install
- - nova
-
-- include: nova_config.yml
- when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
- tags:
- - config
- - nova_config
- - nova
diff --git a/compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml b/compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml
deleted file mode 100644
index 62351fa..0000000
--- a/compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: nova db sync
- command: su -s /bin/sh -c "nova-manage db sync" nova
- register: result
- until: result.rc == 0
- retries: 5
- delay: 3
- notify:
- - restart nova-api
- - restart nova-cert
- - restart nova-consoleauth
- - restart nova-scheduler
- - restart nova-conductor
- - restart nova-novncproxy
-
-- meta: flush_handlers
diff --git a/compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml b/compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml
deleted file mode 100644
index a1cded5..0000000
--- a/compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- name: install nova related packages
- apt: name={{ item }} state=present force=yes
- with_items:
- - nova-api
- - nova-cert
- - nova-conductor
- - nova-consoleauth
- - nova-novncproxy
- - nova-scheduler
- - python-novaclient
- - python-oslo.rootwrap
-
-- name: generate nova controll service list
- shell: echo {{ item }} >> /opt/service
- with_items:
- - nova-api
- - nova-cert
- - nova-conductor
- - nova-consoleauth
- - nova-novncproxy
- - nova-scheduler
-
-- name: update nova conf
- template: src=nova.conf
- dest=/etc/nova/nova.conf
- backup=yes
- notify:
- - restart nova-api
- - restart nova-cert
- - restart nova-consoleauth
- - restart nova-scheduler
- - restart nova-conductor
- - restart nova-novncproxy
- - remove nova-sqlite-db
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini
deleted file mode 100644
index 19eb62e..0000000
--- a/compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini
+++ /dev/null
@@ -1,90 +0,0 @@
-[DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-# debug = False
-verbose = True
-
-# The DHCP agent will resync its state with Neutron to recover from any
-# transient notification or rpc errors. The interval is number of
-# seconds between attempts.
-resync_interval = 5
-
-# The DHCP agent requires an interface driver be set. Choose the one that best
-# matches your plugin.
-# interface_driver =
-
-# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
-# BigSwitch/Floodlight)
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-
-# Name of Open vSwitch bridge to use
-# ovs_integration_bridge = br-int
-
-# Use veth for an OVS interface or not.
-# Support kernels with limited namespace support
-# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
-ovs_use_veth = False
-
-# Example of interface_driver option for LinuxBridge
-# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
-
-# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
-# no additional setup of the DHCP server.
-dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
-
-# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
-# iproute2 package that supports namespaces).
-use_namespaces = True
-
-# The DHCP server can assist with providing metadata support on isolated
-# networks. Setting this value to True will cause the DHCP server to append
-# specific host routes to the DHCP request. The metadata service will only
-# be activated when the subnet does not contain any router port. The guest
-# instance must be configured to request host routes via DHCP (Option 121).
-enable_isolated_metadata = False
-
-# Allows for serving metadata requests coming from a dedicated metadata
-# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
-# is connected to a Neutron router from which the VMs send metadata
-# request. In this case DHCP Option 121 will not be injected in VMs, as
-# they will be able to reach 169.254.169.254 through a router.
-# This option requires enable_isolated_metadata = True
-enable_metadata_network = False
-
-# Number of threads to use during sync process. Should not exceed connection
-# pool size configured on server.
-# num_sync_threads = 4
-
-# Location to store DHCP server config files
-# dhcp_confs = $state_path/dhcp
-
-# Domain to use for building the hostnames
-dhcp_domain = openstacklocal
-
-# Override the default dnsmasq settings with this file
-# dnsmasq_config_file =
-dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
-
-# Comma-separated list of DNS servers which will be used by dnsmasq
-# as forwarders.
-# dnsmasq_dns_servers =
-
-# Limit number of leases to prevent a denial-of-service.
-dnsmasq_lease_max = 16777216
-
-# Location to DHCP lease relay UNIX domain socket
-# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
-
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
-
-# dhcp_delete_namespaces, which is false by default, can be set to True if
-# namespaces can be deleted cleanly on the host running the dhcp agent.
-# Do not enable this until you understand the problem with the Linux iproute
-# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
-# you are sure that your version of iproute does not suffer from the problem.
-# If True, namespaces will be deleted when a dhcp server is disabled.
-# dhcp_delete_namespaces = False
-
-# Timeout for ovs-vsctl commands.
-# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
-# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf
deleted file mode 100644
index 7bcbd9d..0000000
--- a/compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-dhcp-option-force=26,1454
-
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot
deleted file mode 100644
index 32caf96..0000000
--- a/compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot
+++ /dev/null
@@ -1,25 +0,0 @@
-interfaces {
- restore-original-config-on-shutdown: false
- interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
- description: "Internal pNodes interface"
- disable: false
- default-system-config
- }
-}
-
-protocols {
- igmp {
- disable: false
- interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
- vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
- disable: false
- version: 3
- }
- }
- traceoptions {
- flag all {
- disable: false
- }
- }
- }
-}
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini b/compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini
deleted file mode 100644
index b394c00..0000000
--- a/compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini
+++ /dev/null
@@ -1,81 +0,0 @@
-[DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-# debug = False
-verbose = True
-
-# L3 requires that an interface driver be set. Choose the one that best
-# matches your plugin.
-# interface_driver =
-
-# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
-# that supports L3 agent
-# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-
-# Use veth for an OVS interface or not.
-# Support kernels with limited namespace support
-# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
-# ovs_use_veth = False
-
-# Example of interface_driver option for LinuxBridge
-# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
-
-# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
-# iproute2 package that supports namespaces).
-use_namespaces = True
-
-# If use_namespaces is set as False then the agent can only configure one router.
-
-# This is done by setting the specific router_id.
-# router_id =
-
-# When external_network_bridge is set, each L3 agent can be associated
-# with no more than one external network. This value should be set to the UUID
-# of that external network. To allow L3 agent support multiple external
-# networks, both the external_network_bridge and gateway_external_network_id
-# must be left empty.
-# gateway_external_network_id =
-
-# Indicates that this L3 agent should also handle routers that do not have
-# an external network gateway configured. This option should be True only
-# for a single agent in a Neutron deployment, and may be False for all agents
-# if all routers must have an external network gateway
-handle_internal_only_routers = True
-
-# Name of bridge used for external network traffic. This should be set to
-# empty value for the linux bridge. when this parameter is set, each L3 agent
-# can be associated with no more than one external network.
-external_network_bridge = br-ex
-
-# TCP Port used by Neutron metadata server
-metadata_port = 9697
-
-# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
-# to disable this feature.
-send_arp_for_ha = 3
-
-# seconds between re-sync routers' data if needed
-periodic_interval = 40
-
-# seconds to start to sync routers' data after
-# starting agent
-periodic_fuzzy_delay = 5
-
-# enable_metadata_proxy, which is true by default, can be set to False
-# if the Nova metadata server is not available
-# enable_metadata_proxy = True
-
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
-
-# router_delete_namespaces, which is false by default, can be set to True if
-# namespaces can be deleted cleanly on the host running the L3 agent.
-# Do not enable this until you understand the problem with the Linux iproute
-# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
-# you are sure that your version of iproute does not suffer from the problem.
-# If True, namespaces will be deleted when a router is destroyed.
-# router_delete_namespaces = False
-
-# Timeout for ovs-vsctl commands.
-# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
-# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini b/compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini
deleted file mode 100644
index 6badf28..0000000
--- a/compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini
+++ /dev/null
@@ -1,46 +0,0 @@
-[DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-debug = True
-
-# The Neutron user information for accessing the Neutron API.
-auth_url = http://{{ HA_VIP }}:5000/v2.0
-auth_region = RegionOne
-# Turn off verification of the certificate for ssl
-# auth_insecure = False
-# Certificate Authority public key (CA cert) file for ssl
-# auth_ca_cert =
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-
-# Network service endpoint type to pull from the keystone catalog
-# endpoint_type = adminURL
-
-# IP address used by Nova metadata server
-nova_metadata_ip = {{ HA_VIP }}
-
-# TCP Port used by Nova metadata server
-nova_metadata_port = 8775
-
-# When proxying metadata requests, Neutron signs the Instance-ID header with a
-# shared secret to prevent spoofing. You may select any string for a secret,
-# but it must match here and in the configuration used by the Nova Metadata
-# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
-metadata_proxy_shared_secret = {{ METADATA_SECRET }}
-
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
-
-# Number of separate worker processes for metadata server
-# metadata_workers = 0
-
-# Number of backlog requests to configure the metadata server socket with
-# metadata_backlog = 128
-
-# URL to connect to the cache backend.
-# Example of URL using memory caching backend
-# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
-# default_ttl=0 parameter will cause cache entries to never expire.
-# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
-# No cache is used in case no value is passed.
-# cache_url =
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini b/compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini
deleted file mode 100644
index a790069..0000000
--- a/compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini
+++ /dev/null
@@ -1,108 +0,0 @@
-[ml2]
-# (ListOpt) List of network type driver entrypoints to be loaded from
-# the neutron.ml2.type_drivers namespace.
-#
-# type_drivers = local,flat,vlan,gre,vxlan
-# Example: type_drivers = flat,vlan,gre,vxlan
-type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
-
-# (ListOpt) Ordered list of network_types to allocate as tenant
-# networks. The default value 'local' is useful for single-box testing
-# but provides no connectivity between hosts.
-#
-# tenant_network_types = local
-# Example: tenant_network_types = vlan,gre,vxlan
-tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
-
-# (ListOpt) Ordered list of networking mechanism driver entrypoints
-# to be loaded from the neutron.ml2.mechanism_drivers namespace.
-# mechanism_drivers =
-# Example: mechanism_drivers = openvswitch,mlnx
-# Example: mechanism_drivers = arista
-# Example: mechanism_drivers = cisco,logger
-# Example: mechanism_drivers = openvswitch,brocade
-# Example: mechanism_drivers = linuxbridge,brocade
-mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
-
-[ml2_type_flat]
-# (ListOpt) List of physical_network names with which flat networks
-# can be created. Use * to allow flat networks with arbitrary
-# physical_network names.
-#
-flat_networks = external
-# Example:flat_networks = physnet1,physnet2
-# Example:flat_networks = *
-
-[ml2_type_vlan]
-# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
-# specifying physical_network names usable for VLAN provider and
-# tenant networks, as well as ranges of VLAN tags on each
-# physical_network available for allocation as tenant networks.
-#
-network_vlan_ranges =
-# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
-
-[ml2_type_gre]
-# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
-tunnel_id_ranges = 1:1000
-
-[ml2_type_vxlan]
-# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
-# ranges of VXLAN VNI IDs that are available for tenant network allocation.
-#
-vni_ranges = 1001:4095
-
-# (StrOpt) Multicast group for the VXLAN interface. When configured, will
-# enable sending all broadcast traffic to this multicast group. When left
-# unconfigured, will disable multicast VXLAN mode.
-#
-vxlan_group = 239.1.1.1
-# Example: vxlan_group = 239.1.1.1
-
-[securitygroup]
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-# enable_security_group = True
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-enable_security_group = True
-
-[database]
-connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
-
-[ovs]
-local_ip = {{ internal_ip }}
-{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
-integration_bridge = br-int
-tunnel_bridge = br-tun
-tunnel_id_ranges = 1001:4095
-tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
-{% endif %}
-
-[agent]
-root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
-tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
-vxlan_udp_port = 4789
-{% endif %}
-l2_population = False
-
-[odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-network_vlan_ranges = 1001:4095
-tunnel_id_ranges = 1001:4095
-tun_peer_patch_port = patch-int
-int_peer_patch_port = patch-tun
-tenant_network_type = vxlan
-tunnel_bridge = br-tun
-integration_bridge = br-int
-controllers = 10.1.0.15:8080:admin:admin
-{% endif %}
-
-[ml2_odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-username = {{ odl_username }}
-password = {{ odl_password }}
-url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
-{% endif %}
-
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf b/compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf
deleted file mode 100644
index 93be9cb..0000000
--- a/compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf
+++ /dev/null
@@ -1,465 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ DEBUG }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ HA_VIP }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = RegionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/neutron.conf b/compass/deploy/ansible/roles/nova-controller/templates/neutron.conf
deleted file mode 100644
index 1575367..0000000
--- a/compass/deploy/ansible/roles/nova-controller/templates/neutron.conf
+++ /dev/null
@@ -1,466 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ VERBOSE }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ HA_VIP }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = RegionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
-
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh b/compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh
deleted file mode 100644
index b92e202..0000000
--- a/compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
-
-# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
-
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/nova.conf b/compass/deploy/ansible/roles/nova-controller/templates/nova.conf
deleted file mode 100644
index c8991a3..0000000
--- a/compass/deploy/ansible/roles/nova-controller/templates/nova.conf
+++ /dev/null
@@ -1,72 +0,0 @@
-[DEFAULT]
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/var/lock/nova
-force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
-enabled_apis=osapi_compute,metadata
-
-vif_plugging_is_fatal: false
-vif_plugging_timeout: 0
-
-auth_strategy = keystone
-
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-osapi_compute_listen={{ internal_ip }}
-metadata_listen={{ internal_ip }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
-
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-
-[database]
-# The SQLAlchemy connection string used to connect to the database
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-
-[glance]
-host = {{ HA_VIP }}
-
-[neutron]
-url = http://{{ HA_VIP }}:9696
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/roles/repo/tasks/main.yml b/compass/deploy/ansible/roles/repo/tasks/main.yml
deleted file mode 100644
index 9476f80..0000000
--- a/compass/deploy/ansible/roles/repo/tasks/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: add juno cloudarchive
- apt_repository: repo="{{ juno_cloud_archive }}" state=present
-
-- name: first update pkgs
- apt: update_cache=yes
diff --git a/compass/deploy/ansible/roles/repo/templates/sources.list b/compass/deploy/ansible/roles/repo/templates/sources.list
deleted file mode 100644
index 8b062e7..0000000
--- a/compass/deploy/ansible/roles/repo/templates/sources.list
+++ /dev/null
@@ -1 +0,0 @@
-{{ LOCAL_REPO }}