summaryrefslogtreecommitdiffstats
path: root/deploy
diff options
context:
space:
mode:
authorbaigk <baiguoku@huawei.com>2015-08-14 21:28:53 -0400
committerbaigk <baiguoku@huawei.com>2015-08-17 21:59:16 -0400
commit444407f6ff854b9a3dfa5c7ad50e4ac6495ed9fd (patch)
treed6a1f4faecf1350c7fe7e9c725447965c3c3f6c9 /deploy
parentade17ce57eb08dbf262cb9b6f1893367e2832dc1 (diff)
Neutron-related role in the compass-adapter should use the same template file
JIRA: COMPASS-8 Change-Id: Ia7c2dc8d90a025950f84fdb92383be85e82f0cde Signed-off-by: baigk <baiguoku@huawei.com>
Diffstat (limited to 'deploy')
-rw-r--r--deploy/adapters/ansible/openstack_juno/templates/dnsmasq-neutron.conf (renamed from deploy/adapters/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf)0
-rw-r--r--deploy/adapters/ansible/openstack_juno/templates/ml2_conf.ini (renamed from deploy/adapters/ansible/roles/neutron-compute/templates/ml2_conf.ini)0
-rw-r--r--deploy/adapters/ansible/openstack_juno/templates/neutron-network.conf (renamed from deploy/adapters/ansible/roles/neutron-compute/templates/neutron-network.conf)0
-rw-r--r--deploy/adapters/ansible/openstack_juno/templates/neutron.conf (renamed from deploy/adapters/ansible/roles/neutron-controller/templates/neutron.conf)0
-rw-r--r--deploy/adapters/ansible/openstack_juno/templates/nova.conf (renamed from deploy/adapters/ansible/roles/nova-controller/templates/nova.conf)0
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mysql.yml2
-rw-r--r--deploy/adapters/ansible/roles/database/vars/Debian.yml4
-rw-r--r--deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml2
-rw-r--r--deploy/adapters/ansible/roles/neutron-compute/templates/neutron.conf466
-rw-r--r--deploy/adapters/ansible/roles/neutron-compute/templates/nova.conf73
-rw-r--r--deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml2
-rw-r--r--deploy/adapters/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf2
-rw-r--r--deploy/adapters/ansible/roles/neutron-controller/templates/ml2_conf.ini108
-rw-r--r--deploy/adapters/ansible/roles/neutron-controller/templates/neutron-network.conf465
-rw-r--r--deploy/adapters/ansible/roles/neutron-controller/templates/nova.conf69
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/tasks/main.yml4
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf2
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/templates/ml2_conf.ini108
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/templates/neutron-network.conf465
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/templates/neutron.conf466
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/templates/nova.conf69
-rw-r--r--deploy/adapters/ansible/roles/nova-compute/tasks/main.yml8
-rw-r--r--deploy/adapters/ansible/roles/nova-compute/templates/nova.conf73
-rw-r--r--deploy/adapters/ansible/roles/nova-controller/tasks/nova_install.yml2
-rw-r--r--deploy/adapters/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf2
-rw-r--r--deploy/adapters/ansible/roles/nova-controller/templates/ml2_conf.ini108
-rw-r--r--deploy/adapters/ansible/roles/nova-controller/templates/neutron-network.conf465
-rw-r--r--deploy/adapters/ansible/roles/nova-controller/templates/neutron.conf466
28 files changed, 15 insertions, 3416 deletions
diff --git a/deploy/adapters/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf b/deploy/adapters/ansible/openstack_juno/templates/dnsmasq-neutron.conf
index 7bcbd9df..7bcbd9df 100644
--- a/deploy/adapters/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf
+++ b/deploy/adapters/ansible/openstack_juno/templates/dnsmasq-neutron.conf
diff --git a/deploy/adapters/ansible/roles/neutron-compute/templates/ml2_conf.ini b/deploy/adapters/ansible/openstack_juno/templates/ml2_conf.ini
index a7900693..a7900693 100644
--- a/deploy/adapters/ansible/roles/neutron-compute/templates/ml2_conf.ini
+++ b/deploy/adapters/ansible/openstack_juno/templates/ml2_conf.ini
diff --git a/deploy/adapters/ansible/roles/neutron-compute/templates/neutron-network.conf b/deploy/adapters/ansible/openstack_juno/templates/neutron-network.conf
index df27cd47..df27cd47 100644
--- a/deploy/adapters/ansible/roles/neutron-compute/templates/neutron-network.conf
+++ b/deploy/adapters/ansible/openstack_juno/templates/neutron-network.conf
diff --git a/deploy/adapters/ansible/roles/neutron-controller/templates/neutron.conf b/deploy/adapters/ansible/openstack_juno/templates/neutron.conf
index 73128488..73128488 100644
--- a/deploy/adapters/ansible/roles/neutron-controller/templates/neutron.conf
+++ b/deploy/adapters/ansible/openstack_juno/templates/neutron.conf
diff --git a/deploy/adapters/ansible/roles/nova-controller/templates/nova.conf b/deploy/adapters/ansible/openstack_juno/templates/nova.conf
index 9b4280c1..9b4280c1 100644
--- a/deploy/adapters/ansible/roles/nova-controller/templates/nova.conf
+++ b/deploy/adapters/ansible/openstack_juno/templates/nova.conf
diff --git a/deploy/adapters/ansible/roles/database/tasks/mysql.yml b/deploy/adapters/ansible/roles/database/tasks/mysql.yml
index 809d6172..9e272d1b 100644
--- a/deploy/adapters/ansible/roles/database/tasks/mysql.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/mysql.yml
@@ -7,7 +7,7 @@
file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755
- name: update mysql config file
- copy: src= {{ item }} dest={{ mysql_config_file_path }}/{{ item }} backup=yes
+ copy: src={{ item }} dest={{ mysql_config_file_path }}/{{ item }} backup=yes
with_items: mysql_config_file_name
- name: manually restart mysql server
diff --git a/deploy/adapters/ansible/roles/database/vars/Debian.yml b/deploy/adapters/ansible/roles/database/vars/Debian.yml
index 5c014b4b..79f7008c 100644
--- a/deploy/adapters/ansible/roles/database/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/database/vars/Debian.yml
@@ -9,12 +9,12 @@ maridb_packages:
- libssl0.9.8
- mysql-client-5.5
- python-mysqldb
- - mysql-server-wsrep
+ - mysql-wsrep-server
- galera
services: []
mysql_config_file_path: "/etc/mysql"
-mysql_config_file_name: "my.cnf"
+mysql_config_file_name: ["my.cnf"]
wsrep_config_file_path: "/etc/mysql/conf.d"
wsrep_provider_file: "/usr/lib/galera/libgalera_smm.so"
diff --git a/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml b/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
index fbc41385..6c1f3bd9 100644
--- a/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
@@ -36,7 +36,7 @@
file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
- name: config neutron
- template: src=neutron-network.conf
+ template: src=templates/neutron-network.conf
dest=/etc/neutron/neutron.conf backup=yes
notify:
- restart neutron compute service
diff --git a/deploy/adapters/ansible/roles/neutron-compute/templates/neutron.conf b/deploy/adapters/ansible/roles/neutron-compute/templates/neutron.conf
deleted file mode 100644
index 27b6d3cc..00000000
--- a/deploy/adapters/ansible/roles/neutron-compute/templates/neutron.conf
+++ /dev/null
@@ -1,466 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ VERBOSE }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ HA_VIP }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = regionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
-
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/deploy/adapters/ansible/roles/neutron-compute/templates/nova.conf b/deploy/adapters/ansible/roles/neutron-compute/templates/nova.conf
deleted file mode 100644
index 4706d1db..00000000
--- a/deploy/adapters/ansible/roles/neutron-compute/templates/nova.conf
+++ /dev/null
@@ -1,73 +0,0 @@
-[DEFAULT]
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/var/lib/nova/tmp
-force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
-enabled_apis=ec2,osapi_compute,metadata
-
-vif_plugging_is_fatal: false
-vif_plugging_timeout: 0
-
-auth_strategy = keystone
-
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
-
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-
-[database]
-# The SQLAlchemy connection string used to connect to the database
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-
-[conductor]
-manager = nova.conductor.manager.ConductorManager
-topic = conductor
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-
-[glance]
-host = {{ HA_VIP }}
-
-[neutron]
-url = http://{{ HA_VIP }}:9696
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
index 028419b5..96f17231 100644
--- a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
+++ b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
@@ -14,7 +14,7 @@
register: NOVA_ADMIN_TENANT_ID
- name: update neutron conf
- template: src=neutron.conf dest=/etc/neutron/neutron.conf backup=yes
+ template: src=templates/neutron.conf dest=/etc/neutron/neutron.conf backup=yes
- name: update ml2 plugin conf
template: src=ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes
diff --git a/deploy/adapters/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf b/deploy/adapters/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf
deleted file mode 100644
index 7bcbd9df..00000000
--- a/deploy/adapters/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-dhcp-option-force=26,1454
-
diff --git a/deploy/adapters/ansible/roles/neutron-controller/templates/ml2_conf.ini b/deploy/adapters/ansible/roles/neutron-controller/templates/ml2_conf.ini
deleted file mode 100644
index a7900693..00000000
--- a/deploy/adapters/ansible/roles/neutron-controller/templates/ml2_conf.ini
+++ /dev/null
@@ -1,108 +0,0 @@
-[ml2]
-# (ListOpt) List of network type driver entrypoints to be loaded from
-# the neutron.ml2.type_drivers namespace.
-#
-# type_drivers = local,flat,vlan,gre,vxlan
-# Example: type_drivers = flat,vlan,gre,vxlan
-type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
-
-# (ListOpt) Ordered list of network_types to allocate as tenant
-# networks. The default value 'local' is useful for single-box testing
-# but provides no connectivity between hosts.
-#
-# tenant_network_types = local
-# Example: tenant_network_types = vlan,gre,vxlan
-tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
-
-# (ListOpt) Ordered list of networking mechanism driver entrypoints
-# to be loaded from the neutron.ml2.mechanism_drivers namespace.
-# mechanism_drivers =
-# Example: mechanism_drivers = openvswitch,mlnx
-# Example: mechanism_drivers = arista
-# Example: mechanism_drivers = cisco,logger
-# Example: mechanism_drivers = openvswitch,brocade
-# Example: mechanism_drivers = linuxbridge,brocade
-mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
-
-[ml2_type_flat]
-# (ListOpt) List of physical_network names with which flat networks
-# can be created. Use * to allow flat networks with arbitrary
-# physical_network names.
-#
-flat_networks = external
-# Example:flat_networks = physnet1,physnet2
-# Example:flat_networks = *
-
-[ml2_type_vlan]
-# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
-# specifying physical_network names usable for VLAN provider and
-# tenant networks, as well as ranges of VLAN tags on each
-# physical_network available for allocation as tenant networks.
-#
-network_vlan_ranges =
-# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
-
-[ml2_type_gre]
-# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
-tunnel_id_ranges = 1:1000
-
-[ml2_type_vxlan]
-# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
-# ranges of VXLAN VNI IDs that are available for tenant network allocation.
-#
-vni_ranges = 1001:4095
-
-# (StrOpt) Multicast group for the VXLAN interface. When configured, will
-# enable sending all broadcast traffic to this multicast group. When left
-# unconfigured, will disable multicast VXLAN mode.
-#
-vxlan_group = 239.1.1.1
-# Example: vxlan_group = 239.1.1.1
-
-[securitygroup]
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-# enable_security_group = True
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-enable_security_group = True
-
-[database]
-connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
-
-[ovs]
-local_ip = {{ internal_ip }}
-{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
-integration_bridge = br-int
-tunnel_bridge = br-tun
-tunnel_id_ranges = 1001:4095
-tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
-{% endif %}
-
-[agent]
-root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
-tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
-vxlan_udp_port = 4789
-{% endif %}
-l2_population = False
-
-[odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-network_vlan_ranges = 1001:4095
-tunnel_id_ranges = 1001:4095
-tun_peer_patch_port = patch-int
-int_peer_patch_port = patch-tun
-tenant_network_type = vxlan
-tunnel_bridge = br-tun
-integration_bridge = br-int
-controllers = 10.1.0.15:8080:admin:admin
-{% endif %}
-
-[ml2_odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-username = {{ odl_username }}
-password = {{ odl_password }}
-url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
-{% endif %}
-
diff --git a/deploy/adapters/ansible/roles/neutron-controller/templates/neutron-network.conf b/deploy/adapters/ansible/roles/neutron-controller/templates/neutron-network.conf
deleted file mode 100644
index df27cd47..00000000
--- a/deploy/adapters/ansible/roles/neutron-controller/templates/neutron-network.conf
+++ /dev/null
@@ -1,465 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ DEBUG }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ HA_VIP }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = regionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/deploy/adapters/ansible/roles/neutron-controller/templates/nova.conf b/deploy/adapters/ansible/roles/neutron-controller/templates/nova.conf
deleted file mode 100644
index 2b2bd9bb..00000000
--- a/deploy/adapters/ansible/roles/neutron-controller/templates/nova.conf
+++ /dev/null
@@ -1,69 +0,0 @@
-[DEFAULT]
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/var/lib/nova/tmp
-force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
-enabled_apis=ec2,osapi_compute,metadata
-
-vif_plugging_is_fatal: false
-vif_plugging_timeout: 0
-
-auth_strategy = keystone
-
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
-
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-
-[database]
-# The SQLAlchemy connection string used to connect to the database
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-
-[glance]
-host = {{ HA_VIP }}
-
-[neutron]
-url = http://{{ HA_VIP }}:9696
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml b/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
index 758f3208..6e4ed200 100644
--- a/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
@@ -36,7 +36,7 @@
backup=yes
- name: update dnsmasq-neutron.conf
- template: src=dnsmasq-neutron.conf
+ template: src=templates/dnsmasq-neutron.conf
dest=/etc/neutron/dnsmasq-neutron.conf
- name: config metadata agent
@@ -49,7 +49,7 @@
backup=yes
- name: config neutron
- template: src=neutron-network.conf
+ template: src=templates/neutron-network.conf
dest=/etc/neutron/neutron.conf backup=yes
notify:
- restart common neutron network relation service
diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf b/deploy/adapters/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf
deleted file mode 100644
index 7bcbd9df..00000000
--- a/deploy/adapters/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-dhcp-option-force=26,1454
-
diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/ml2_conf.ini b/deploy/adapters/ansible/roles/neutron-network/templates/ml2_conf.ini
deleted file mode 100644
index a7900693..00000000
--- a/deploy/adapters/ansible/roles/neutron-network/templates/ml2_conf.ini
+++ /dev/null
@@ -1,108 +0,0 @@
-[ml2]
-# (ListOpt) List of network type driver entrypoints to be loaded from
-# the neutron.ml2.type_drivers namespace.
-#
-# type_drivers = local,flat,vlan,gre,vxlan
-# Example: type_drivers = flat,vlan,gre,vxlan
-type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
-
-# (ListOpt) Ordered list of network_types to allocate as tenant
-# networks. The default value 'local' is useful for single-box testing
-# but provides no connectivity between hosts.
-#
-# tenant_network_types = local
-# Example: tenant_network_types = vlan,gre,vxlan
-tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
-
-# (ListOpt) Ordered list of networking mechanism driver entrypoints
-# to be loaded from the neutron.ml2.mechanism_drivers namespace.
-# mechanism_drivers =
-# Example: mechanism_drivers = openvswitch,mlnx
-# Example: mechanism_drivers = arista
-# Example: mechanism_drivers = cisco,logger
-# Example: mechanism_drivers = openvswitch,brocade
-# Example: mechanism_drivers = linuxbridge,brocade
-mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
-
-[ml2_type_flat]
-# (ListOpt) List of physical_network names with which flat networks
-# can be created. Use * to allow flat networks with arbitrary
-# physical_network names.
-#
-flat_networks = external
-# Example:flat_networks = physnet1,physnet2
-# Example:flat_networks = *
-
-[ml2_type_vlan]
-# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
-# specifying physical_network names usable for VLAN provider and
-# tenant networks, as well as ranges of VLAN tags on each
-# physical_network available for allocation as tenant networks.
-#
-network_vlan_ranges =
-# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
-
-[ml2_type_gre]
-# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
-tunnel_id_ranges = 1:1000
-
-[ml2_type_vxlan]
-# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
-# ranges of VXLAN VNI IDs that are available for tenant network allocation.
-#
-vni_ranges = 1001:4095
-
-# (StrOpt) Multicast group for the VXLAN interface. When configured, will
-# enable sending all broadcast traffic to this multicast group. When left
-# unconfigured, will disable multicast VXLAN mode.
-#
-vxlan_group = 239.1.1.1
-# Example: vxlan_group = 239.1.1.1
-
-[securitygroup]
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-# enable_security_group = True
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-enable_security_group = True
-
-[database]
-connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
-
-[ovs]
-local_ip = {{ internal_ip }}
-{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
-integration_bridge = br-int
-tunnel_bridge = br-tun
-tunnel_id_ranges = 1001:4095
-tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
-{% endif %}
-
-[agent]
-root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
-tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
-vxlan_udp_port = 4789
-{% endif %}
-l2_population = False
-
-[odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-network_vlan_ranges = 1001:4095
-tunnel_id_ranges = 1001:4095
-tun_peer_patch_port = patch-int
-int_peer_patch_port = patch-tun
-tenant_network_type = vxlan
-tunnel_bridge = br-tun
-integration_bridge = br-int
-controllers = 10.1.0.15:8080:admin:admin
-{% endif %}
-
-[ml2_odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-username = {{ odl_username }}
-password = {{ odl_password }}
-url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
-{% endif %}
-
diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/neutron-network.conf b/deploy/adapters/ansible/roles/neutron-network/templates/neutron-network.conf
deleted file mode 100644
index df27cd47..00000000
--- a/deploy/adapters/ansible/roles/neutron-network/templates/neutron-network.conf
+++ /dev/null
@@ -1,465 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ DEBUG }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ HA_VIP }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = regionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/neutron.conf b/deploy/adapters/ansible/roles/neutron-network/templates/neutron.conf
deleted file mode 100644
index 27b6d3cc..00000000
--- a/deploy/adapters/ansible/roles/neutron-network/templates/neutron.conf
+++ /dev/null
@@ -1,466 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ VERBOSE }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ HA_VIP }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = regionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
-
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/nova.conf b/deploy/adapters/ansible/roles/neutron-network/templates/nova.conf
deleted file mode 100644
index 2b2bd9bb..00000000
--- a/deploy/adapters/ansible/roles/neutron-network/templates/nova.conf
+++ /dev/null
@@ -1,69 +0,0 @@
-[DEFAULT]
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/var/lib/nova/tmp
-force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
-enabled_apis=ec2,osapi_compute,metadata
-
-vif_plugging_is_fatal: false
-vif_plugging_timeout: 0
-
-auth_strategy = keystone
-
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
-
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-
-[database]
-# The SQLAlchemy connection string used to connect to the database
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-
-[glance]
-host = {{ HA_VIP }}
-
-[neutron]
-url = http://{{ HA_VIP }}:9696
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml b/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
index 14c5450d..f4bb373e 100644
--- a/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
@@ -6,9 +6,15 @@
with_items: packages | union(packages_noarch)
- name: update nova-compute conf
- template: src={{ item }} dest=/etc/nova/{{ item }}
+ template: src=templates/{{ item }} dest=/etc/nova/{{ item }}
with_items:
- nova.conf
+ notify:
+ - restart nova-compute services
+
+- name: update nova-compute conf
+ template: src={{ item }} dest=/etc/nova/{{ item }}
+ with_items:
- nova-compute.conf
notify:
- restart nova-compute services
diff --git a/deploy/adapters/ansible/roles/nova-compute/templates/nova.conf b/deploy/adapters/ansible/roles/nova-compute/templates/nova.conf
deleted file mode 100644
index 4706d1db..00000000
--- a/deploy/adapters/ansible/roles/nova-compute/templates/nova.conf
+++ /dev/null
@@ -1,73 +0,0 @@
-[DEFAULT]
-dhcpbridge_flagfile=/etc/nova/nova.conf
-dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/var/lib/nova/tmp
-force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
-enabled_apis=ec2,osapi_compute,metadata
-
-vif_plugging_is_fatal: false
-vif_plugging_timeout: 0
-
-auth_strategy = keystone
-
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
-network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = neutron
-
-instance_usage_audit = True
-instance_usage_audit_period = hour
-notify_on_state_change = vm_and_task_state
-notification_driver = nova.openstack.common.notifier.rpc_notifier
-notification_driver = ceilometer.compute.nova_notifier
-
-[database]
-# The SQLAlchemy connection string used to connect to the database
-connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
-
-[conductor]
-manager = nova.conductor.manager.ConductorManager
-topic = conductor
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = nova
-admin_password = {{ NOVA_PASS }}
-
-[glance]
-host = {{ HA_VIP }}
-
-[neutron]
-url = http://{{ HA_VIP }}:9696
-auth_strategy = keystone
-admin_tenant_name = service
-admin_username = neutron
-admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/deploy/adapters/ansible/roles/nova-controller/tasks/nova_install.yml b/deploy/adapters/ansible/roles/nova-controller/tasks/nova_install.yml
index bb1dbac7..7242fda6 100644
--- a/deploy/adapters/ansible/roles/nova-controller/tasks/nova_install.yml
+++ b/deploy/adapters/ansible/roles/nova-controller/tasks/nova_install.yml
@@ -10,7 +10,7 @@
with_items: services | union(services_noarch)
- name: update nova conf
- template: src=nova.conf
+ template: src=templates/nova.conf
dest=/etc/nova/nova.conf
backup=yes
notify:
diff --git a/deploy/adapters/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf b/deploy/adapters/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf
deleted file mode 100644
index 7bcbd9df..00000000
--- a/deploy/adapters/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-dhcp-option-force=26,1454
-
diff --git a/deploy/adapters/ansible/roles/nova-controller/templates/ml2_conf.ini b/deploy/adapters/ansible/roles/nova-controller/templates/ml2_conf.ini
deleted file mode 100644
index a7900693..00000000
--- a/deploy/adapters/ansible/roles/nova-controller/templates/ml2_conf.ini
+++ /dev/null
@@ -1,108 +0,0 @@
-[ml2]
-# (ListOpt) List of network type driver entrypoints to be loaded from
-# the neutron.ml2.type_drivers namespace.
-#
-# type_drivers = local,flat,vlan,gre,vxlan
-# Example: type_drivers = flat,vlan,gre,vxlan
-type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
-
-# (ListOpt) Ordered list of network_types to allocate as tenant
-# networks. The default value 'local' is useful for single-box testing
-# but provides no connectivity between hosts.
-#
-# tenant_network_types = local
-# Example: tenant_network_types = vlan,gre,vxlan
-tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
-
-# (ListOpt) Ordered list of networking mechanism driver entrypoints
-# to be loaded from the neutron.ml2.mechanism_drivers namespace.
-# mechanism_drivers =
-# Example: mechanism_drivers = openvswitch,mlnx
-# Example: mechanism_drivers = arista
-# Example: mechanism_drivers = cisco,logger
-# Example: mechanism_drivers = openvswitch,brocade
-# Example: mechanism_drivers = linuxbridge,brocade
-mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
-
-[ml2_type_flat]
-# (ListOpt) List of physical_network names with which flat networks
-# can be created. Use * to allow flat networks with arbitrary
-# physical_network names.
-#
-flat_networks = external
-# Example:flat_networks = physnet1,physnet2
-# Example:flat_networks = *
-
-[ml2_type_vlan]
-# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
-# specifying physical_network names usable for VLAN provider and
-# tenant networks, as well as ranges of VLAN tags on each
-# physical_network available for allocation as tenant networks.
-#
-network_vlan_ranges =
-# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
-
-[ml2_type_gre]
-# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
-tunnel_id_ranges = 1:1000
-
-[ml2_type_vxlan]
-# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
-# ranges of VXLAN VNI IDs that are available for tenant network allocation.
-#
-vni_ranges = 1001:4095
-
-# (StrOpt) Multicast group for the VXLAN interface. When configured, will
-# enable sending all broadcast traffic to this multicast group. When left
-# unconfigured, will disable multicast VXLAN mode.
-#
-vxlan_group = 239.1.1.1
-# Example: vxlan_group = 239.1.1.1
-
-[securitygroup]
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-# enable_security_group = True
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-enable_security_group = True
-
-[database]
-connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
-
-[ovs]
-local_ip = {{ internal_ip }}
-{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
-integration_bridge = br-int
-tunnel_bridge = br-tun
-tunnel_id_ranges = 1001:4095
-tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
-{% endif %}
-
-[agent]
-root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
-tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
-{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
-vxlan_udp_port = 4789
-{% endif %}
-l2_population = False
-
-[odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-network_vlan_ranges = 1001:4095
-tunnel_id_ranges = 1001:4095
-tun_peer_patch_port = patch-int
-int_peer_patch_port = patch-tun
-tenant_network_type = vxlan
-tunnel_bridge = br-tun
-integration_bridge = br-int
-controllers = 10.1.0.15:8080:admin:admin
-{% endif %}
-
-[ml2_odl]
-{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
-username = {{ odl_username }}
-password = {{ odl_password }}
-url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
-{% endif %}
-
diff --git a/deploy/adapters/ansible/roles/nova-controller/templates/neutron-network.conf b/deploy/adapters/ansible/roles/nova-controller/templates/neutron-network.conf
deleted file mode 100644
index df27cd47..00000000
--- a/deploy/adapters/ansible/roles/nova-controller/templates/neutron-network.conf
+++ /dev/null
@@ -1,465 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ DEBUG }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ HA_VIP }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = regionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/deploy/adapters/ansible/roles/nova-controller/templates/neutron.conf b/deploy/adapters/ansible/roles/nova-controller/templates/neutron.conf
deleted file mode 100644
index 27b6d3cc..00000000
--- a/deploy/adapters/ansible/roles/nova-controller/templates/neutron.conf
+++ /dev/null
@@ -1,466 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = {{ VERBOSE }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/neutron
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
-core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-service_plugins = router
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
-rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
-rpc_thread_pool_size = 240
-# Size of RPC connection pool
-rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
-rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ HA_VIP }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-nova_region_name = regionOne
-
-# Username for connection to nova in admin context
-nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
-
-# Password for connection to nova in admin context.
-nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
-send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
-
-[quotas]
-# Default driver to use for quota checks
-quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-quota_security_group_rule = 1000
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://{{ HA_VIP }}:5000/v2.0
-identity_uri = http://{{ HA_VIP }}:35357
-admin_tenant_name = service
-admin_user = neutron
-admin_password = {{ NEUTRON_PASS }}
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
-#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
-slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
-idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-pool_timeout = 10
-
-[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
-service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default