aboutsummaryrefslogtreecommitdiffstats
path: root/deploy/adapters/ansible/roles/neutron-network
diff options
context:
space:
mode:
Diffstat (limited to 'deploy/adapters/ansible/roles/neutron-network')
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/handlers/main.yml21
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/tasks/igmp-router.yml20
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/tasks/main.yml110
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/tasks/odl.yml13
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/templates/dhcp_agent.ini90
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf2
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/templates/etc/xorp/config.boot25
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/templates/l3_agent.ini81
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/templates/metadata_agent.ini46
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/templates/ml2_conf.ini108
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/templates/neutron-network.conf465
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/templates/neutron.conf466
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/templates/neutron_init.sh4
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/templates/nova.conf69
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml20
15 files changed, 1540 insertions, 0 deletions
diff --git a/deploy/adapters/ansible/roles/neutron-network/handlers/main.yml b/deploy/adapters/ansible/roles/neutron-network/handlers/main.yml
new file mode 100644
index 00000000..d6c5cc8f
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-network/handlers/main.yml
@@ -0,0 +1,21 @@
+---
+- name: restart neutron-plugin-openvswitch-agent
+ service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
+ when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart neutron-l3-agent
+ service: name=neutron-l3-agent state=restarted enabled=yes
+
+- name: kill dnsmasq
+ command: killall dnsmasq
+ ignore_errors: True
+
+- name: restart neutron-dhcp-agent
+ service: name=neutron-dhcp-agent state=restarted enabled=yes
+
+- name: restart neutron-metadata-agent
+ service: name=neutron-metadata-agent state=restarted enabled=yes
+
+- name: restart xorp
+ service: name=xorp state=restarted enabled=yes sleep=10
+ ignore_errors: True
diff --git a/deploy/adapters/ansible/roles/neutron-network/tasks/igmp-router.yml b/deploy/adapters/ansible/roles/neutron-network/tasks/igmp-router.yml
new file mode 100644
index 00000000..d6f38a0e
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-network/tasks/igmp-router.yml
@@ -0,0 +1,20 @@
+---
+- name: Install XORP to provide IGMP router functionality
+ apt: pkg=xorp
+
+- name: configure xorp
+ template: src=etc/xorp/config.boot dest=/etc/xorp/config.boot
+ notify:
+ - restart xorp
+
+- name: set xorp defaults
+ lineinfile: dest=/etc/default/xorp regexp=^RUN= line=RUN=yes
+ notify:
+ - restart xorp
+
+- meta: flush_handlers
+
+- name: start and enable xorp service
+ service: name=xorp state=started enabled=yes
+ retries: 2
+ delay: 10
diff --git a/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml b/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
new file mode 100644
index 00000000..f0953e0e
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
@@ -0,0 +1,110 @@
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: activate ipv4 forwarding
+ sysctl: name=net.ipv4.ip_forward value=1
+ state=present reload=yes
+
+- name: deactivate ipv4 rp filter
+ sysctl: name=net.ipv4.conf.all.rp_filter value=0
+ state=present reload=yes
+
+- name: deactivate ipv4 default rp filter
+ sysctl: name=net.ipv4.conf.default.rp_filter
+ value=0 state=present reload=yes
+
+- name: install neutron network related packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
+ with_items: packages
+
+- name: install neutron openvswitch agent
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
+ with_items: neutron_plugin_openvswitch_agent_packages
+ when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: generate neutron service list
+ shell: echo {{ item }} >> /opt/service
+ with_items: services
+
+- name: generate neutron-plugin-openvswitch-agent service list
+ shell: echo {{ item }} >> /opt/service
+ with_items: neutron-plugin-openvswitch-agent-services
+ when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: config neutron
+ template: src=neutron-network.conf
+ dest=/etc/neutron/neutron.conf backup=yes
+ notify:
+ - restart neutron-plugin-openvswitch-agent
+ - restart neutron-l3-agent
+ - kill dnsmasq
+ - restart neutron-dhcp-agent
+ - restart neutron-metadata-agent
+
+- name: config l3 agent
+ template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini
+ backup=yes
+ notify:
+ - restart neutron-l3-agent
+
+- name: config dhcp agent
+ template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini
+ backup=yes
+ notify:
+ - kill dnsmasq
+ - restart neutron-dhcp-agent
+
+- name: update dnsmasq-neutron.conf
+ template: src=dnsmasq-neutron.conf
+ dest=/etc/neutron/dnsmasq-neutron.conf
+ notify:
+ - kill dnsmasq
+ - restart neutron-dhcp-agent
+
+- name: config metadata agent
+ template: src=metadata_agent.ini
+ dest=/etc/neutron/metadata_agent.ini backup=yes
+ notify:
+ - restart neutron-metadata-agent
+
+- name: config ml2 plugin
+ template: src=ml2_conf.ini
+ dest=/etc/neutron/plugins/ml2/ml2_conf.ini
+ backup=yes
+ notify:
+ - restart neutron-plugin-openvswitch-agent
+
+- meta: flush_handlers
+
+- name: add br-int
+ openvswitch_bridge: bridge=br-int state=present
+
+- name: add br-ex
+ openvswitch_bridge: bridge=br-ex state=present
+ when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: assign a port to br-ex for physical ext interface
+ openvswitch_port: bridge=br-ex port={{ INTERFACE_NAME }}
+ state=present
+ when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- include: igmp-router.yml
+ when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+
+- name: assert kernel support for vxlan
+ command: modinfo -F version vxlan
+ when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+
+- name: assert iproute2 suppport for vxlan
+ command: ip link add type vxlan help
+ register: iproute_out
+ failed_when: iproute_out.rc == 255
+ when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+
+- include: odl.yml
+ when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart ovs service
+ service: name=openvswitch-switch state=restarted enabled=yes
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/neutron-network/tasks/odl.yml b/deploy/adapters/ansible/roles/neutron-network/tasks/odl.yml
new file mode 100644
index 00000000..a2b449ce
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-network/tasks/odl.yml
@@ -0,0 +1,13 @@
+---
+- name: ovs set manager
+ command: ovs-vsctl set-manager tcp:{{ controller }}:6640
+
+- name: get ovs uuid
+ shell: ovs-vsctl get Open_vSwitch . _uuid
+ register: ovs_uuid
+
+- name: set bridge_mappings
+ command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:bridge_mappings=physnet1:{{ INTERFACE_NAME }}
+
+- name: set local ip
+ command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:local_ip={{ internal_ip }}
diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/dhcp_agent.ini b/deploy/adapters/ansible/roles/neutron-network/templates/dhcp_agent.ini
new file mode 100644
index 00000000..19eb62ec
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-network/templates/dhcp_agent.ini
@@ -0,0 +1,90 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+enable_isolated_metadata = False
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf b/deploy/adapters/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf
new file mode 100644
index 00000000..7bcbd9df
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf
@@ -0,0 +1,2 @@
+dhcp-option-force=26,1454
+
diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/etc/xorp/config.boot b/deploy/adapters/ansible/roles/neutron-network/templates/etc/xorp/config.boot
new file mode 100644
index 00000000..32caf96d
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-network/templates/etc/xorp/config.boot
@@ -0,0 +1,25 @@
+interfaces {
+ restore-original-config-on-shutdown: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ description: "Internal pNodes interface"
+ disable: false
+ default-system-config
+ }
+}
+
+protocols {
+ igmp {
+ disable: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ disable: false
+ version: 3
+ }
+ }
+ traceoptions {
+ flag all {
+ disable: false
+ }
+ }
+ }
+}
diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/l3_agent.ini b/deploy/adapters/ansible/roles/neutron-network/templates/l3_agent.ini
new file mode 100644
index 00000000..b394c008
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-network/templates/l3_agent.ini
@@ -0,0 +1,81 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured. This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/metadata_agent.ini b/deploy/adapters/ansible/roles/neutron-network/templates/metadata_agent.ini
new file mode 100644
index 00000000..375f46c2
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-network/templates/metadata_agent.ini
@@ -0,0 +1,46 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+debug = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://{{ HA_VIP }}:5000/v2.0
+auth_region = regionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+nova_metadata_ip = {{ HA_VIP }}
+
+# TCP Port used by Nova metadata server
+nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing. You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server
+# metadata_workers = 0
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 128
+
+# URL to connect to the cache backend.
+# Example of URL using memory caching backend
+# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url =
diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/ml2_conf.ini b/deploy/adapters/ansible/roles/neutron-network/templates/ml2_conf.ini
new file mode 100644
index 00000000..a7900693
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-network/templates/ml2_conf.ini
@@ -0,0 +1,108 @@
+[ml2]
+# (ListOpt) List of network type driver entrypoints to be loaded from
+# the neutron.ml2.type_drivers namespace.
+#
+# type_drivers = local,flat,vlan,gre,vxlan
+# Example: type_drivers = flat,vlan,gre,vxlan
+type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
+
+# (ListOpt) Ordered list of network_types to allocate as tenant
+# networks. The default value 'local' is useful for single-box testing
+# but provides no connectivity between hosts.
+#
+# tenant_network_types = local
+# Example: tenant_network_types = vlan,gre,vxlan
+tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
+
+# (ListOpt) Ordered list of networking mechanism driver entrypoints
+# to be loaded from the neutron.ml2.mechanism_drivers namespace.
+# mechanism_drivers =
+# Example: mechanism_drivers = openvswitch,mlnx
+# Example: mechanism_drivers = arista
+# Example: mechanism_drivers = cisco,logger
+# Example: mechanism_drivers = openvswitch,brocade
+# Example: mechanism_drivers = linuxbridge,brocade
+mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
+
+[ml2_type_flat]
+# (ListOpt) List of physical_network names with which flat networks
+# can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+#
+flat_networks = external
+# Example:flat_networks = physnet1,physnet2
+# Example:flat_networks = *
+
+[ml2_type_vlan]
+# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
+# specifying physical_network names usable for VLAN provider and
+# tenant networks, as well as ranges of VLAN tags on each
+# physical_network available for allocation as tenant networks.
+#
+network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
+
+[ml2_type_gre]
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
+tunnel_id_ranges = 1:1000
+
+[ml2_type_vxlan]
+# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network allocation.
+#
+vni_ranges = 1001:4095
+
+# (StrOpt) Multicast group for the VXLAN interface. When configured, will
+# enable sending all broadcast traffic to this multicast group. When left
+# unconfigured, will disable multicast VXLAN mode.
+#
+vxlan_group = 239.1.1.1
+# Example: vxlan_group = 239.1.1.1
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+
+[database]
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
+
+[ovs]
+local_ip = {{ internal_ip }}
+{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
+integration_bridge = br-int
+tunnel_bridge = br-tun
+tunnel_id_ranges = 1001:4095
+tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
+{% endif %}
+
+[agent]
+root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
+tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
+vxlan_udp_port = 4789
+{% endif %}
+l2_population = False
+
+[odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+network_vlan_ranges = 1001:4095
+tunnel_id_ranges = 1001:4095
+tun_peer_patch_port = patch-int
+int_peer_patch_port = patch-tun
+tenant_network_type = vxlan
+tunnel_bridge = br-tun
+integration_bridge = br-int
+controllers = 10.1.0.15:8080:admin:admin
+{% endif %}
+
+[ml2_odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+username = {{ odl_username }}
+password = {{ odl_password }}
+url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
+{% endif %}
+
diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/neutron-network.conf b/deploy/adapters/ansible/roles/neutron-network/templates/neutron-network.conf
new file mode 100644
index 00000000..df27cd47
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-network/templates/neutron-network.conf
@@ -0,0 +1,465 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ DEBUG }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = regionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/neutron.conf b/deploy/adapters/ansible/roles/neutron-network/templates/neutron.conf
new file mode 100644
index 00000000..27b6d3cc
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-network/templates/neutron.conf
@@ -0,0 +1,466 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ VERBOSE }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = regionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/neutron_init.sh b/deploy/adapters/ansible/roles/neutron-network/templates/neutron_init.sh
new file mode 100644
index 00000000..b92e202f
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-network/templates/neutron_init.sh
@@ -0,0 +1,4 @@
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
+
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
+
diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/nova.conf b/deploy/adapters/ansible/roles/neutron-network/templates/nova.conf
new file mode 100644
index 00000000..95870732
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-network/templates/nova.conf
@@ -0,0 +1,69 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml b/deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml
new file mode 100644
index 00000000..0350c42b
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml
@@ -0,0 +1,20 @@
+---
+packages:
+ - neutron-plugin-ml2
+ - openvswitch-datapath-dkms
+ - openvswitch-switch
+ - neutron-l3-agent
+ - neutron-dhcp-agent
+
+services:
+ - neutron-plugin-ml2
+ - openvswitch-datapath-dkms
+ - openvswitch-switch
+ - neutron-l3-agent
+ - neutron-dhcp-agent
+
+neutron_plugin_openvswitch_agent_packages:
+ - neutron-plugin-openvswitch-agent
+
+neutron-plugin-openvswitch-agent-services:
+ - neutron-plugin-openvswitch-agent