aboutsummaryrefslogtreecommitdiffstats
path: root/deploy/adapters/ansible/openstack_mitaka_xenial
diff options
context:
space:
mode:
authorchenshuai@huawei.com <chenshuai@huawei.com>2016-07-10 22:38:31 -0400
committerchenshuai@huawei.com <chenshuai@huawei.com>2016-07-11 02:34:23 -0400
commit2774e3141ec319be39a6c37dabae5b30d5e43b6b (patch)
tree72b0ebe494c4d76dd33ba998daabe2485e43fc00 /deploy/adapters/ansible/openstack_mitaka_xenial
parent407ad32e7541fc21773f23bbff790775ffa8af8e (diff)
Improvement: add xenial into make_repo.sh
JIRA: COMPASS-431 Change-Id: I010c3df11bd7fe1fec6ad8034a0ae67bb3e84ac7 Signed-off-by: chenshuai@huawei.com <chenshuai@huawei.com>
Diffstat (limited to 'deploy/adapters/ansible/openstack_mitaka_xenial')
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/.gitkeep0
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml233
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/handlers/main.yml12
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/main.yml29
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/aodh.conf.j247
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/api_paste.ini.j222
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/policy.json.j220
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/Debian.yml22
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/RedHat.yml22
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/main.yml12
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/Debian.yml37
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/RedHat.yml36
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/tasks/install_mon.yml43
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/templates/ceph-mon.service22
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/vars/Debian.yml12
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/ceph_openstack_post.yml19
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/main.yml33
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/vars/Debian.yml30
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-osd/tasks/install_osd.yml37
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_cluster_debian.yml69
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_install.yml70
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/Debian.yml55
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/main.yml37
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/handlers/main.yml29
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/tasks/nfs.yml61
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/Debian.yml21
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/RedHat.yml23
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/Debian.yml22
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/main.yml164
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/tasks/main.yml75
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/vars/Debian.yml19
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/tasks/main.yml117
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/vars/Debian.yml25
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-compute/tasks/main.yml59
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-controller/tasks/nova_config.yml21
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/openvswitch.yml148
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/Debian.yml23
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/handlers/main.yml11
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/main.yml51
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/onos_controller.yml140
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/openvswitch.yml57
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/templates/ml2_conf.sh15
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/Debian.yml14
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/RedHat.yml14
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/main.yml19
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/open-contrail/tasks/uninstall-openvswitch.yml46
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/vars/Debian.yml35
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/setup-network/files/setup_networks/net_init24
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/storage/files/storage10
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/tacker/templates/tacker.j2426
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/templates/neutron.conf486
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/templates/nova.conf96
52 files changed, 3170 insertions, 0 deletions
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/.gitkeep b/deploy/adapters/ansible/openstack_mitaka_xenial/.gitkeep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/.gitkeep
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml
new file mode 100644
index 00000000..c22d1aee
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml
@@ -0,0 +1,233 @@
+---
+- hosts: all
+ remote_user: root
+ pre_tasks:
+ - name: make sure ssh dir exist
+ file:
+ path: '{{ item.path }}'
+ owner: '{{ item.owner }}'
+ group: '{{ item.group }}'
+ state: directory
+ mode: 0755
+ with_items:
+ - path: /root/.ssh
+ owner: root
+ group: root
+
+ - name: write ssh config
+ copy:
+ content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no"
+ dest: '{{ item.dest }}'
+ owner: '{{ item.owner }}'
+ group: '{{ item.group }}'
+ mode: 0600
+ with_items:
+ - dest: /root/.ssh/config
+ owner: root
+ group: root
+
+ - name: generate ssh keys
+ shell: if [ ! -f ~/.ssh/id_rsa.pub ]; then ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ""; else echo "already gen ssh key!"; fi;
+
+ - name: fetch ssh keys
+ fetch: src=/root/.ssh/id_rsa.pub dest=/tmp/ssh-keys-{{ ansible_hostname }} flat=yes
+
+ - authorized_key:
+ user: root
+ key: "{{ lookup('file', 'item') }}"
+ with_fileglob:
+ - /tmp/ssh-keys-*
+ max_fail_percentage: 0
+ roles:
+ - common
+
+- hosts: all
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - setup-network
+
+- hosts: ha
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - ha
+
+- hosts: controller
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - memcached
+ - apache
+ - database
+ - mq
+ - keystone
+ - nova-controller
+ - neutron-controller
+ - cinder-controller
+ - glance
+ - neutron-common
+ - neutron-network
+ - ceilometer_controller
+# - ext-network
+ - dashboard
+ - heat
+ - aodh
+
+- hosts: all
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - storage
+
+- hosts: compute
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - nova-compute
+ - neutron-compute
+ - cinder-volume
+ - ceilometer_compute
+
+- hosts: all
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - secgroup
+
+- hosts: ceph_adm
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles: []
+ # - ceph-deploy
+
+- hosts: ceph
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - ceph-purge
+ - ceph-config
+
+- hosts: ceph_mon
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - ceph-mon
+
+- hosts: ceph_osd
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - ceph-osd
+
+- hosts: ceph
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - ceph-openstack
+
+- hosts: all
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - monitor
+
+
+- hosts: all
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ tasks:
+ - name: set bash to nova
+ user:
+ name: nova
+ shell: /bin/bash
+
+ - name: make sure ssh dir exist
+ file:
+ path: '{{ item.path }}'
+ owner: '{{ item.owner }}'
+ group: '{{ item.group }}'
+ state: directory
+ mode: 0755
+ with_items:
+ - path: /var/lib/nova/.ssh
+ owner: nova
+ group: nova
+
+ - name: copy ssh keys for nova
+ shell: cp -rf /root/.ssh/id_rsa /var/lib/nova/.ssh;
+
+ - name: write ssh config
+ copy:
+ content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no"
+ dest: '{{ item.dest }}'
+ owner: '{{ item.owner }}'
+ group: '{{ item.group }}'
+ mode: 0600
+ with_items:
+ - dest: /var/lib/nova/.ssh/config
+ owner: nova
+ group: nova
+
+ - authorized_key:
+ user: nova
+ key: "{{ lookup('file', 'item') }}"
+ with_fileglob:
+ - /tmp/ssh-keys-*
+
+ - name: chown ssh file
+ shell: chown -R nova:nova /var/lib/nova/.ssh;
+
+
+- hosts: all
+ remote_user: root
+ max_fail_percentage: 0
+ roles:
+ - odl_cluster
+
+- hosts: all
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - onos_cluster
+
+- hosts: all
+ remote_user: root
+ sudo: True
+ max_fail_percentage: 0
+ roles:
+ - open-contrail
+
+- hosts: all
+ remote_user: root
+ serial: 1
+ max_fail_percentage: 0
+ roles:
+ - odl_cluster_neutron
+
+- hosts: all
+ remote_user: root
+ max_fail_percentage: 0
+ roles:
+ - odl_cluster_post
+
+- hosts: controller
+ remote_user: root
+ max_fail_percentage: 0
+ roles:
+ - ext-network
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/handlers/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/handlers/main.yml
new file mode 100644
index 00000000..e1084c83
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/handlers/main.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: restart aodh services
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: services
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/main.yml
new file mode 100644
index 00000000..75b15511
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/tasks/main.yml
@@ -0,0 +1,29 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: install aodh packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: packages | union(packages_noarch)
+
+- name: update aodh conf
+ template: src={{ item }} dest=/etc/aodh/aodh.conf
+ backup=yes
+ with_items:
+ - aodh.conf.j2
+# - api_paste.ini.j2
+# - policy.json.j2
+ notify: restart aodh services
+
+- name: write services to monitor list
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
+ with_items: services
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/aodh.conf.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/aodh.conf.j2
new file mode 100644
index 00000000..85512ed5
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/aodh.conf.j2
@@ -0,0 +1,47 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
+[DEFAULT]
+bind_host = {{ internal_ip }}
+bind_port = 8042
+rpc_backend = rabbit
+auth_strategy = keystone
+debug = True
+verbose = True
+
+[oslo_messaging_rabbit]
+rabbit_hosts = {{ internal_vip.ip }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_use_ssl = false
+
+[database]
+connection = mongodb://aodh:{{ AODH_DBPASS }}@{{ internal_vip.ip }}:27017/aodh
+
+[keystone_authtoken]
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+identity_uri = http://{{ internal_vip.ip }}:35357
+auth_plugin = password
+project_domain_id = default
+user_domain_id = default
+project_name = service
+username = aodh
+password = {{ AODH_PASS }}
+memcached_servers = {{ memcached_servers }}
+token_cache_time = 300
+revocation_cache_time = 60
+
+[service_credentials]
+os_auth_url = http://{{ internal_vip.ip }}:5000/v2.0
+os_username = aodh
+os_tenant_name = service
+os_password = {{ AODH_PASS }}
+os_endpoint_type = internalURL
+os_region_name = regionOne
+
+[api]
+host = {{ internal_ip }}
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/api_paste.ini.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/api_paste.ini.j2
new file mode 100644
index 00000000..151789c4
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/api_paste.ini.j2
@@ -0,0 +1,22 @@
+# aodh API WSGI Pipeline
+# Define the filters that make up the pipeline for processing WSGI requests
+# Note: This pipeline is PasteDeploy's term rather than aodh's pipeline
+# used for processing samples
+
+# Remove authtoken from the pipeline if you don't want to use keystone authentication
+[pipeline:main]
+pipeline = cors request_id authtoken api-server
+
+[app:api-server]
+paste.app_factory = aodh.api.app:app_factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+oslo_config_project = aodh
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware:RequestId.factory
+
+[filter:cors]
+paste.filter_factory = oslo_middleware.cors:filter_factory
+oslo_config_project = aodh
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/policy.json.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/policy.json.j2
new file mode 100644
index 00000000..4fd873e9
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/policy.json.j2
@@ -0,0 +1,20 @@
+{
+ "context_is_admin": "role:admin",
+ "segregation": "rule:context_is_admin",
+ "admin_or_owner": "rule:context_is_admin or project_id:%(project_id)s",
+ "default": "rule:admin_or_owner",
+
+ "telemetry:get_alarm": "rule:admin_or_owner",
+ "telemetry:get_alarms": "rule:admin_or_owner",
+ "telemetry:query_alarm": "rule:admin_or_owner",
+
+ "telemetry:create_alarm": "",
+ "telemetry:change_alarm": "rule:admin_or_owner",
+ "telemetry:delete_alarm": "rule:admin_or_owner",
+
+ "telemetry:get_alarm_state": "rule:admin_or_owner",
+ "telemetry:change_alarm_state": "rule:admin_or_owner",
+
+ "telemetry:alarm_history": "rule:admin_or_owner",
+ "telemetry:query_alarm_history": "rule:admin_or_owner"
+}
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/Debian.yml
new file mode 100644
index 00000000..bdf4655e
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/Debian.yml
@@ -0,0 +1,22 @@
+#############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#############################################################################
+---
+packages:
+ - aodh-api
+ - aodh-evaluator
+ - aodh-notifier
+ - aodh-listener
+ - aodh-expirer
+ - python-ceilometerclient
+
+services:
+ - aodh-api
+ - aodh-notifier
+ - aodh-evaluator
+ - aodh-listener
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/RedHat.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/RedHat.yml
new file mode 100644
index 00000000..a0381c6b
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/RedHat.yml
@@ -0,0 +1,22 @@
+#############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#############################################################################
+---
+packages:
+ - openstack-aodh-api
+ - openstack-aodh-evaluator
+ - openstack-aodh-notifier
+ - openstack-aodh-listener
+ - openstack-aodh-expirer
+ - python-ceilometerclient
+
+services:
+ - openstack-aodh-api
+ - openstack-aodh-notifier
+ - openstack-aodh-evaluator
+ - openstack-aodh-listener
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/main.yml
new file mode 100644
index 00000000..b17f6ed0
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/vars/main.yml
@@ -0,0 +1,12 @@
+##############################################################################
+## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+##
+## All rights reserved. This program and the accompanying materials
+## are made available under the terms of the Apache License, Version 2.0
+## which accompanies this distribution, and is available at
+## http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+---
+packages_noarch: []
+
+services_noarch: []
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/Debian.yml
new file mode 100644
index 00000000..b749ffaa
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/Debian.yml
@@ -0,0 +1,37 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+ceilometer_packages:
+ - ceilometer-api
+ - ceilometer-collector
+ - ceilometer-agent-central
+ - ceilometer-agent-notification
+# - ceilometer-alarm-evaluator
+# - ceilometer-alarm-notifier
+ - python-ceilometerclient
+
+ceilometer_services:
+ - ceilometer-agent-central
+ - ceilometer-agent-notification
+ - ceilometer-api
+ - ceilometer-collector
+# - ceilometer-alarm-evaluator
+# - ceilometer-alarm-notifier
+
+ceilometer_configs_templates:
+ - src: ceilometer.j2
+ dest:
+ - /etc/ceilometer/ceilometer.conf
+ - src: cinder.j2
+ dest:
+ - /etc/cinder/cinder.conf
+ - src: glance.j2
+ dest:
+ - /etc/glance/glance-api.conf
+ - /etc/glance/glance-registry.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/RedHat.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/RedHat.yml
new file mode 100644
index 00000000..6c5f53ec
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/RedHat.yml
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+ceilometer_packages:
+ - openstack-ceilometer-api
+ - openstack-ceilometer-collector
+ - openstack-ceilometer-central
+ - openstack-ceilometer-notification
+# - openstack-ceilometer-alarm
+ - python-ceilometerclient
+
+ceilometer_services:
+ - openstack-ceilometer-central
+ - openstack-ceilometer-notification
+ - openstack-ceilometer-api
+ - openstack-ceilometer-collector
+# - openstack-ceilometer-alarm-evaluator
+# - openstack-ceilometer-alarm-notifier
+
+ceilometer_configs_templates:
+ - src: ceilometer.j2
+ dest:
+ - /etc/ceilometer/ceilometer.conf
+ - src: cinder.j2
+ dest:
+ - /etc/cinder/cinder.conf
+ - src: glance.j2
+ dest:
+ - /etc/glance/glance-api.conf
+ - /etc/glance/glance-registry.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/tasks/install_mon.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/tasks/install_mon.yml
new file mode 100644
index 00000000..1d14c2d2
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/tasks/install_mon.yml
@@ -0,0 +1,43 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: Create a default data directory
+ file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}" state="directory"
+
+- name: Populate the monitor daemon
+ shell: "ceph-mon --mkfs -i {{ inventory_hostname }} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring"
+
+- name: Change ceph/mon dir owner to ceph
+ shell: "chown -R ceph:ceph /var/lib/ceph/mon"
+ when: ansible_os_family == "Debian"
+
+- name: copy templates
+ template:
+ src: ceph-mon.service
+ dest: /lib/systemd/system/ceph-mon.service
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
+- name: Touch the done and auto start file
+ file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}/{{ item }}" state="touch"
+ with_items:
+ - "done"
+ - "{{ ceph_start_type }}"
+
+- name: start mon daemon
+ shell: "{{ ceph_start_script }}"
+
+- name: wait for creating osd keyring
+ wait_for: path=/var/lib/ceph/bootstrap-osd/ceph.keyring
+
+- name: fetch osd keyring
+ fetch: src="/var/lib/ceph/bootstrap-osd/ceph.keyring" dest="/tmp/ceph.osd.keyring" flat=yes
+ run_once: True
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/templates/ceph-mon.service b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/templates/ceph-mon.service
new file mode 100644
index 00000000..5a3cf753
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/templates/ceph-mon.service
@@ -0,0 +1,22 @@
+[Unit]
+Description=Ceph cluster monitor daemon
+Documentation=man:ceph-mon
+
+After=network-online.target local-fs.target ceph-create-keys.service
+Wants=network-online.target local-fs.target ceph-create-keys.service
+
+PartOf=ceph.target
+
+[Service]
+LimitNOFILE=1048576
+LimitNPROC=1048576
+EnvironmentFile=-/etc/default/ceph
+Environment=CLUSTER=ceph
+ExecStart=/usr/bin/ceph-mon -f --cluster ${CLUSTER} --id {{ inventory_hostname }} --setuser ceph --setgroup ceph
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=on-failure
+RestartSec=30
+TasksMax=infinity
+
+[Install]
+WantedBy=multi-user.target
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/vars/Debian.yml
new file mode 100644
index 00000000..a792acad
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-mon/vars/Debian.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+ceph_start_script: "service ceph-mon start"
+ceph_start_type: "systemd"
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/ceph_openstack_post.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/ceph_openstack_post.yml
new file mode 100644
index 00000000..2097ca57
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/ceph_openstack_post.yml
@@ -0,0 +1,19 @@
+##############################################################################
+## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+##
+## All rights reserved. This program and the accompanying materials
+## are made available under the terms of the Apache License, Version 2.0
+## which accompanies this distribution, and is available at
+## http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+---
+- name: get mount info
+ command: mount
+ register: mount_info
+
+- name: try unmount image nfs directory
+ shell: |
+ umount /var/lib/glance/images
+ sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
+ when: mount_info.stdout.find('images') != -1
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/main.yml
new file mode 100644
index 00000000..06c3acb6
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/tasks/main.yml
@@ -0,0 +1,33 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- include_vars: "{{ ansible_os_family }}.yml"
+ tags:
+ - ceph_deploy
+ - ceph_openstack_pre
+ - ceph_openstack_conf
+ - ceph_openstack_post
+ - ceph_openstack
+
+- include: ceph_openstack_pre.yml
+ tags:
+ - ceph_deploy
+ - ceph_openstack_pre
+ - ceph_openstack
+
+- include: ceph_openstack_conf.yml
+ tags:
+ - ceph_deploy
+ - ceph_openstack_conf
+ - ceph_openstack
+
+- include: ceph_openstack_post.yml
+ tags:
+ - ceph_deploy
+ - ceph_openstack_post
+ - ceph_openstack
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/vars/Debian.yml
new file mode 100755
index 00000000..db10bd14
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-openstack/vars/Debian.yml
@@ -0,0 +1,30 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages:
+ - ceph-deploy
+ - python-flask
+ - libgoogle-perftools4
+ - libleveldb1v5
+ - liblttng-ust0
+ - libsnappy1v5
+ - librbd1
+ - librados2
+ - python-ceph
+ - ceph
+ - ceph-mds
+ - ceph-common
+ - ceph-fs-common
+ - gdisk
+
+services: []
+
+cinder_service: cinder-volume
+nova_service: nova-compute
+glance_service: glance-api
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-osd/tasks/install_osd.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-osd/tasks/install_osd.yml
new file mode 100644
index 00000000..16f261ef
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceph-osd/tasks/install_osd.yml
@@ -0,0 +1,37 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: create osd lv and mount it on /var/local/osd
+ script: create_osd.sh
+
+- name: copy osd keyring
+ copy: src="/tmp/ceph.osd.keyring" dest="/var/lib/ceph/bootstrap-osd/ceph.keyring"
+
+- name: prepare osd disk
+ shell: ceph-disk prepare --fs-type xfs /var/local/osd
+
+- name: change local/osd dir owner to ceph
+ shell: chown -R ceph:ceph /var/local/osd
+ when: ansible_os_family == "Debian"
+
+- name: activate osd node
+ shell: ceph-disk activate /var/local/osd
+
+- name: enable ceph service
+ service: name=ceph enabled=yes
+
+- name: rebuild osd after reboot
+ lineinfile: dest=/etc/init/ceph-osd-all-starter.conf insertafter="^task" line="pre-start script\n set -e\n /opt/setup_storage/losetup.sh\n sleep 3\n mount /dev/storage-volumes/ceph0 /var/local/osd\nend script"
+ when: ansible_os_family == "Debian"
+
+- name: rebuild osd after reboot for centos
+ lineinfile: dest=/etc/init.d/ceph insertafter="^### END INIT INFO" line="\nsleep 1\nmount /dev/storage-volumes/ceph0 /var/local/osd"
+ when: ansible_os_family == "RedHat"
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_cluster_debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_cluster_debian.yml
new file mode 100644
index 00000000..442cd18b
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_cluster_debian.yml
@@ -0,0 +1,69 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: get cluster status
+ shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}'
+ register: cluster_status
+ when:
+ - inventory_hostname == haproxy_hosts.keys()[0]
+
+- name: start first node to create new cluster
+ shell: >
+ service mysql bootstrap;
+ service mysql start;
+ when: |
+ inventory_hostname == haproxy_hosts.keys()[0]
+ and not cluster_status.stdout | search("OPERATIONAL")
+
+- name: wait for cluster ready
+ shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}'
+ register: cluster_status
+ until: cluster_status|success
+ failed_when: not cluster_status.stdout | search("OPERATIONAL")
+ retries: 10
+ delay: 3
+ when: |
+ inventory_hostname == haproxy_hosts.keys()[0]
+ and not cluster_status.stdout | search("OPERATIONAL")
+
+- name: if I in the cluster nodes
+ shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_incoming_addresses"'|awk '{print $2}'
+ register: cluster_nodes
+ changed_when: false
+
+- name: restart other nodes and join cluster1
+ shell: service mysql restart;
+ when: |
+ inventory_hostname != haproxy_hosts.keys()[0]
+ and not cluster_nodes.stdout | search( "{{ internal_ip }}" )
+ ignore_errors: True
+
+- name: delay 60 seconds
+ shell: sleep 60
+
+- name: restart other nodes and join cluster2
+ shell: service mysql restart;
+ when: |
+ inventory_hostname != haproxy_hosts.keys()[0]
+ and not cluster_nodes.stdout | search( "{{ internal_ip }}" )
+
+- name: chmod directory
+ shell: >
+ chmod 755 -R /var/lib/mysql/ ;
+ chmod 755 -R /var/log/mysql/ ;
+ chmod 755 -R /etc/mysql/conf.d/;
+
+- name: restart first nodes
+ shell: service mysql restart
+ when: |
+ (inventory_hostname == haproxy_hosts.keys()[0]
+ and haproxy_hosts|length > 1
+ and not cluster_nodes.stdout | search( '{{ internal_ip }}' ))
+
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_install.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_install.yml
new file mode 100644
index 00000000..1b08172d
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/tasks/mariadb_install.yml
@@ -0,0 +1,70 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: disable auto start
+ copy:
+ content: "#!/bin/sh\nexit 101"
+ dest: "/usr/sbin/policy-rc.d"
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
+- name: change open file limit
+ copy:
+ content: "* - nofile 65536 }}"
+ dest: "/etc/security/limits.conf"
+ mode: 0755
+
+- name: install python-mysqldb
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: maridb_packages | union(packages_noarch)
+
+- name: create conf dir for wsrep
+ file: path=/etc/my.cnf.d state=directory mode=0755
+ when: ansible_os_family == "RedHat"
+
+- name: update mariadb config file
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ backup: yes
+ mode: 0644
+ with_items: mysql_config
+
+- name: bugfix for rsync version 3.1
+ lineinfile:
+ dest: /usr/bin/wsrep_sst_rsync
+ state: absent
+ regexp: '{{ item }}'
+ with_items:
+ - "\\s*uid = \\$MYUID$"
+ - "\\s*gid = \\$MYGID$"
+
+- name: enable auto start
+ file:
+ path=/usr/sbin/policy-rc.d
+ state=absent
+ when: ansible_os_family == "Debian"
+
+- name: set owner
+ file: path=/var/lib/mysql owner=mysql group=mysql recurse=yes state=directory mode=0755
+
+- name: get logfile stat
+ stat: path='{{ mysql_data_dir }}/ib_logfile0'
+ register: logfile_stat
+
+- debug: msg='{{ logfile_stat.stat.exists}}'
+- debug: msg='{{ logfile_stat.stat.size }}'
+ when: logfile_stat.stat.exists
+
+- name: rm logfile if exist and size mismatch
+ shell: 'rm -rf {{ mysql_data_dir }}/ib_logfile*'
+ when: |
+ logfile_stat.stat.exists
+ and logfile_stat.stat.size != 1073741824
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/Debian.yml
new file mode 100644
index 00000000..1021524d
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/Debian.yml
@@ -0,0 +1,55 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+mongodb_packages:
+ - mongodb-server
+ - mongodb-clients
+ - python-pymongo
+
+mysql_packages:
+ - python-mysqldb
+ - mysql-server
+
+maridb_packages:
+ - apt-transport-https
+ - debconf-utils
+ - libaio1
+ - libc6
+ - libdbd-mysql-perl
+ - libgcc1
+ - libgcrypt20
+ - libstdc++6
+ - python-software-properties
+ - mariadb-client
+ - galera-3
+ - rsync
+ - socat
+ - mariadb-galera-server-10.0
+ - python-mysqldb
+
+pip_packages: []
+
+services: []
+
+mongodb_service: mongodb
+mysql_config:
+ - dest: /etc/mysql/my.cnf
+ src: my.cnf
+ - dest: /etc/mysql/conf.d/wsrep.cnf
+ src: wsrep.cnf
+
+mysql_config_dir: /etc/mysql/conf.d
+mysql_data_dir: /var/lib/mysql
+
+mongodb_config:
+ dest: /etc/mongodb.conf
+ src: mongodb.conf
+ journal: /var/lib/mongodb/journal/*
+
+wsrep_provider_file: "/usr/lib/galera/libgalera_smm.so"
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/main.yml
new file mode 100644
index 00000000..b4ff7ba5
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/database/vars/main.yml
@@ -0,0 +1,37 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages_noarch: []
+
+services_noarch:
+ - mysql
+
+credentials:
+ - user: keystone
+ db: keystone
+ password: "{{ KEYSTONE_DBPASS }}"
+ - user: neutron
+ db: neutron
+ password: "{{ NEUTRON_DBPASS }}"
+ - user: glance
+ db: glance
+ password: "{{ GLANCE_DBPASS }}"
+ - user: nova
+ db: nova_api
+ password: "{{ NOVA_DBPASS }}"
+ - user: nova
+ db: nova
+ password: "{{ NOVA_DBPASS }}"
+ - user: cinder
+ db: cinder
+ password: "{{ CINDER_DBPASS }}"
+ - user: heat
+ db: heat
+ password: "{{ HEAT_DBPASS }}"
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/handlers/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/handlers/main.yml
new file mode 100644
index 00000000..36e39072
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ext-network/handlers/main.yml
@@ -0,0 +1,29 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: restart neutron-plugin-openvswitch-agent
+ service: name=neutron-openvswitch-agent state=restarted enabled=yes
+ when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart neutron-l3-agent
+ service: name=neutron-l3-agent state=restarted enabled=yes
+
+- name: kill dnsmasq
+ command: killall dnsmasq
+ ignore_errors: True
+
+- name: restart neutron-dhcp-agent
+ service: name=neutron-dhcp-agent state=restarted enabled=yes
+
+- name: restart neutron-metadata-agent
+ service: name=neutron-metadata-agent state=restarted enabled=yes
+
+- name: restart xorp
+ service: name=xorp state=restarted enabled=yes sleep=10
+ ignore_errors: True
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/tasks/nfs.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/tasks/nfs.yml
new file mode 100644
index 00000000..07dfacdd
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/tasks/nfs.yml
@@ -0,0 +1,61 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: install nfs packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: nfs_packages
+
+- name: install nfs
+ local_action: yum name={{ item }} state=present
+ with_items:
+ - rpcbind
+ - nfs-utils
+ run_once: True
+
+- name: create image directory
+ local_action: file path=/opt/images state=directory mode=0777
+ run_once: True
+
+- name: remove nfs config item if exist
+ local_action: lineinfile dest=/etc/exports state=absent
+ regexp="^/opt/images"
+ run_once: True
+
+- name: update nfs config
+ local_action: lineinfile dest=/etc/exports state=present
+ line="/opt/images *(rw,insecure,sync,all_squash)"
+ run_once: True
+
+- name: restart compass nfs service
+ local_action: service name={{ item }} state=restarted enabled=yes
+ with_items:
+ - rpcbind
+ - nfs-server
+ run_once: True
+
+- name: get mount info
+ command: mount
+ register: mount_info
+
+- name: get nfs server
+ shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
+ register: ip_info
+
+- name: restart host nfs service
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: '{{ nfs_services }}'
+
+- name: mount image directory
+ shell: |
+ mount -t nfs -onfsvers=3 {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images
+ sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
+ echo {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab
+ when: mount_info.stdout.find('images') == -1
+ retries: 5
+ delay: 3
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/Debian.yml
new file mode 100644
index 00000000..d1825012
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/Debian.yml
@@ -0,0 +1,21 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages:
+ - glance
+ - nfs-common
+
+nfs_packages:
+ - nfs-common
+
+nfs_services: []
+
+services:
+ - glance-registry
+ - glance-api
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/RedHat.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/RedHat.yml
new file mode 100644
index 00000000..2987d0c4
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/glance/vars/RedHat.yml
@@ -0,0 +1,23 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages:
+ - openstack-glance
+ - rpcbind
+
+nfs_packages:
+ - nfs-utils
+ - rpcbind
+
+nfs_services:
+ - rpcbind
+
+services:
+ - openstack-glance-api
+ - openstack-glance-registry
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/Debian.yml
new file mode 100644
index 00000000..aac51b42
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/Debian.yml
@@ -0,0 +1,22 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+cron_path: "/var/spool/cron/crontabs"
+
+packages:
+ - keystone
+ - apache2
+ - libapache2-mod-wsgi
+
+services:
+ - apache2
+
+apache_config_dir: /etc/apache2
+http_service_name: apache2
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/main.yml
new file mode 100644
index 00000000..58751dfd
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/vars/main.yml
@@ -0,0 +1,164 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages_noarch:
+ - python-keystoneclient
+
+services_noarch: []
+os_services:
+ - name: keystone
+ type: identity
+ region: regionOne
+ description: "OpenStack Identity"
+ publicurl: "http://{{ public_vip.ip }}:5000/v2.0"
+ internalurl: "http://{{ internal_vip.ip }}:5000/v2.0"
+ adminurl: "http://{{ internal_vip.ip }}:35357/v2.0"
+
+ - name: glance
+ type: image
+ region: regionOne
+ description: "OpenStack Image Service"
+ publicurl: "http://{{ public_vip.ip }}:9292"
+ internalurl: "http://{{ internal_vip.ip }}:9292"
+ adminurl: "http://{{ internal_vip.ip }}:9292"
+
+ - name: nova
+ type: compute
+ region: regionOne
+ description: "OpenStack Compute"
+ publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s"
+ internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
+ adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
+
+ - name: neutron
+ type: network
+ region: regionOne
+ description: "OpenStack Networking"
+ publicurl: "http://{{ public_vip.ip }}:9696"
+ internalurl: "http://{{ internal_vip.ip }}:9696"
+ adminurl: "http://{{ internal_vip.ip }}:9696"
+
+ - name: ceilometer
+ type: metering
+ region: regionOne
+ description: "OpenStack Telemetry"
+ publicurl: "http://{{ public_vip.ip }}:8777"
+ internalurl: "http://{{ internal_vip.ip }}:8777"
+ adminurl: "http://{{ internal_vip.ip }}:8777"
+
+ - name: aodh
+ type: alarming
+ region: regionOne
+ description: "OpenStack Telemetry"
+ publicurl: "http://{{ public_vip.ip }}:8042"
+ internalurl: "http://{{ internal_vip.ip }}:8042"
+ adminurl: "http://{{ internal_vip.ip }}:8042"
+
+ - name: cinder
+ type: volume
+ region: regionOne
+ description: "OpenStack Block Storage"
+ publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s"
+ internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
+ adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
+
+ - name: cinderv2
+ type: volumev2
+ region: regionOne
+ description: "OpenStack Block Storage v2"
+ publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s"
+ internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
+ adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
+
+ - name: heat
+ type: orchestration
+ region: regionOne
+ description: "OpenStack Orchestration"
+ publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s"
+ internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
+ adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
+
+ - name: heat-cfn
+ type: cloudformation
+ region: regionOne
+ description: "OpenStack CloudFormation Orchestration"
+ publicurl: "http://{{ public_vip.ip }}:8000/v1"
+ internalurl: "http://{{ internal_vip.ip }}:8000/v1"
+ adminurl: "http://{{ internal_vip.ip }}:8000/v1"
+
+os_users:
+ - user: admin
+ password: "{{ ADMIN_PASS }}"
+ email: admin@admin.com
+ role: admin
+ tenant: admin
+ tenant_description: "Admin Tenant"
+
+ - user: glance
+ password: "{{ GLANCE_PASS }}"
+ email: glance@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: nova
+ password: "{{ NOVA_PASS }}"
+ email: nova@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: keystone
+ password: "{{ KEYSTONE_PASS }}"
+ email: keystone@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: neutron
+ password: "{{ NEUTRON_PASS }}"
+ email: neutron@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: ceilometer
+ password: "{{ CEILOMETER_PASS }}"
+ email: ceilometer@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: cinder
+ password: "{{ CINDER_PASS }}"
+ email: cinder@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: aodh
+ password: "{{ AODH_PASS }}"
+ email: aodh@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: heat
+ password: "{{ HEAT_PASS }}"
+ email: heat@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: demo
+ password: ""
+ email: heat@demo.com
+ role: heat_stack_user
+ tenant: demo
+ tenant_description: "Demo Tenant"
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/tasks/main.yml
new file mode 100644
index 00000000..fd3e51d3
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/tasks/main.yml
@@ -0,0 +1,75 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: activate ipv4 forwarding
+ sysctl: name=net.ipv4.ip_forward value=1
+ state=present reload=yes
+
+- name: deactivate ipv4 rp filter
+ sysctl: name=net.ipv4.conf.all.rp_filter value=0
+ state=present reload=yes
+
+- name: deactivate ipv4 default rp filter
+ sysctl: name=net.ipv4.conf.default.rp_filter
+ value=0 state=present reload=yes
+
+- name: disable auto start
+ copy:
+ content: "#!/bin/sh\nexit 101"
+ dest: "/usr/sbin/policy-rc.d"
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
+- name: install compute-related neutron packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: packages | union(packages_noarch)
+
+- name: enable auto start
+ file:
+ path=/usr/sbin/policy-rc.d
+ state=absent
+ when: ansible_os_family == "Debian"
+
+- name: fix openstack neutron plugin config file
+ shell: |
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
+ systemctl daemon-reload
+ when: ansible_os_family == 'RedHat'
+
+- name: fix openstack neutron plugin config file ubuntu
+ shell: |
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
+ when: ansible_os_family == "Debian"
+
+- name: generate neutron compute service list
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
+ with_items: services | union(services_noarch)
+
+- name: config ml2 plugin
+ template: src=templates/ml2_conf.ini
+ dest=/etc/neutron/plugins/ml2/ml2_conf.ini
+ backup=yes
+
+- name: ln plugin.ini
+ file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
+
+- name: config neutron
+ template: src=templates/neutron.conf
+ dest=/etc/neutron/neutron.conf backup=yes
+ notify:
+ - restart neutron compute service
+ - restart nova-compute services
+
+- meta: flush_handlers
+
+- include: ../../neutron-network/tasks/odl.yml
+ when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/vars/Debian.yml
new file mode 100644
index 00000000..83d7f323
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-compute/vars/Debian.yml
@@ -0,0 +1,19 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+packages:
+ - neutron-common
+ - neutron-plugin-ml2
+ - openvswitch-switch-dpdk
+ - openvswitch-switch
+ - neutron-plugin-openvswitch-agent
+
+services:
+ - neutron-openvswitch-agent
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/tasks/main.yml
new file mode 100644
index 00000000..31f7f17c
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/tasks/main.yml
@@ -0,0 +1,117 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: activate ipv4 forwarding
+ sysctl: name=net.ipv4.ip_forward value=1
+ state=present reload=yes
+
+- name: deactivate ipv4 rp filter
+ sysctl: name=net.ipv4.conf.all.rp_filter value=0
+ state=present reload=yes
+
+- name: deactivate ipv4 default rp filter
+ sysctl: name=net.ipv4.conf.default.rp_filter
+ value=0 state=present reload=yes
+
+- name: assert kernel support for vxlan
+ command: modinfo -F version vxlan
+ when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+
+- name: assert iproute2 suppport for vxlan
+ command: ip link add type vxlan help
+ register: iproute_out
+ failed_when: iproute_out.rc == 255
+ when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+
+- name: disable auto start
+ copy:
+ content: "#!/bin/sh\nexit 101"
+ dest: "/usr/sbin/policy-rc.d"
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
+- name: install neutron network related packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: packages | union(packages_noarch)
+
+- name: enable auto start
+ file:
+ path=/usr/sbin/policy-rc.d
+ state=absent
+ when: ansible_os_family == "Debian"
+
+- name: generate neutron network service list
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
+ with_items: services | union(services_noarch)
+
+- name: fix openstack neutron plugin config file
+ shell: |
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
+ systemctl daemon-reload
+ when: ansible_os_family == 'RedHat'
+
+- name: fix openstack neutron plugin config file ubuntu
+ shell: |
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
+ when: ansible_os_family == "Debian"
+
+- name: config l3 agent
+ template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini
+ backup=yes
+
+- name: config dhcp agent
+ template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini
+ backup=yes
+
+- name: update dnsmasq-neutron.conf
+ template: src=templates/dnsmasq-neutron.conf
+ dest=/etc/neutron/dnsmasq-neutron.conf
+
+- name: config metadata agent
+ template: src=metadata_agent.ini
+ dest=/etc/neutron/metadata_agent.ini backup=yes
+
+- name: config ml2 plugin
+ template: src=templates/ml2_conf.ini
+ dest=/etc/neutron/plugins/ml2/ml2_conf.ini
+ backup=yes
+
+- name: ln plugin.ini
+ file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
+
+- name: config neutron
+ template: src=templates/neutron.conf
+ dest=/etc/neutron/neutron.conf backup=yes
+
+- name: force mtu to 1450 for vxlan
+ lineinfile:
+ dest: /etc/neutron/dnsmasq-neutron.conf
+ regexp: '^dhcp-option-force'
+ line: 'dhcp-option-force=26,1450'
+ when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+
+- include: firewall.yml
+ when: enable_fwaas == True
+
+- include: vpn.yml
+ when: enable_vpnaas == True
+
+- include: odl.yml
+ when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart neutron network relation service
+ service: name={{ item }} state=restarted enabled=yes
+ with_flattened:
+ - services_noarch
+ - services
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/vars/Debian.yml
new file mode 100644
index 00000000..1a78ca8c
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/neutron-network/vars/Debian.yml
@@ -0,0 +1,25 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages:
+ - neutron-plugin-ml2
+ - openvswitch-switch-dpdk
+ - openvswitch-switch
+ - neutron-l3-agent
+ - neutron-dhcp-agent
+ - neutron-plugin-openvswitch-agent
+
+services:
+ - openvswitch-switch
+ - neutron-openvswitch-agent
+
+openvswitch_agent: neutron-plugin-openvswitch-agent
+
+xorp_packages:
+ - xorp
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-compute/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-compute/tasks/main.yml
new file mode 100644
index 00000000..c177001d
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-compute/tasks/main.yml
@@ -0,0 +1,59 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: disable auto start
+ copy:
+ content: "#!/bin/sh\nexit 101"
+ dest: "/usr/sbin/policy-rc.d"
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
+- name: install nova-compute related packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: packages | union(packages_noarch)
+
+- name: restart virtlogd
+ service: name=virtlogd state=started enabled=yes
+ when: ansible_os_family == "Debian"
+
+- name: enable auto start
+ file:
+ path=/usr/sbin/policy-rc.d
+ state=absent
+ when: ansible_os_family == "Debian"
+
+- name: update nova-compute conf
+ template: src=templates/{{ item }} dest=/etc/nova/{{ item }}
+ with_items:
+ - nova.conf
+ notify:
+ - restart nova-compute services
+
+- name: update nova-compute conf
+ template: src={{ item }} dest=/etc/nova/{{ item }}
+ with_items:
+ - nova-compute.conf
+ notify:
+ - restart nova-compute services
+
+- name: generate neutron control service list
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
+ with_items: services | union(services_noarch)
+#'
+- name: remove nova sqlite db
+ shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed
+
+- meta: flush_handlers
+
+- name: restart nova-compute and libvirt-bin
+ shell: >
+ service nova-compute restart;
+ service libvirt-bin restart;
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-controller/tasks/nova_config.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-controller/tasks/nova_config.yml
new file mode 100644
index 00000000..f332c97a
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/nova-controller/tasks/nova_config.yml
@@ -0,0 +1,21 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: nova api db sync
+ shell: su -s /bin/sh -c "nova-manage api_db sync" nova
+ ignore_errors: True
+ notify:
+ - restart nova service
+
+- name: nova db sync
+ nova_manage: action=dbsync
+ notify:
+ - restart nova service
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/openvswitch.yml
new file mode 100755
index 00000000..33099104
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/openvswitch.yml
@@ -0,0 +1,148 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+#- name: Install Crudini
+# apt: name={{ item }} state=present
+# with_items:
+# - crudini
+
+- name: install compute packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: compute_packages | union(compute_packages_noarch)
+
+- name: remove neutron-openvswitch-agent service daemon
+ shell: sed -i '/{{ service_ovs_agent_name }}/d' /opt/service ;
+
+- name: shut down and disable Neutron's openvswitch agent services
+ service: name={{ service_ovs_agent_name }} state=stopped enabled=no
+
+- name: remove Neutron's openvswitch agent services
+ shell: >
+ update-rc.d -f {{ service_ovs_agent_name }} remove;
+ mv /etc/init.d/{{ service_ovs_agent_name }} /home/{{ service_ovs_agent_name }};
+ mv /etc/init/{{ service_ovs_agent_name }}.conf /home/{{ service_ovs_agent_name }}.conf;
+ when: ansible_os_family == "Debian"
+
+
+- name: Stop the Open vSwitch service and clear existing OVSDB
+ shell: >
+ service {{ service_ovs_name }} stop ;
+ rm -rf /var/log/openvswitch/* ;
+ rm -rf /etc/openvswitch/conf.db ;
+ service {{ service_ovs_name }} start ;
+
+- name: set opendaylight as the manager
+ command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ internal_vip.ip }}:6640;"
+
+- name: check br-int
+ shell: ovs-vsctl list-br | grep br-int; while [ $? -ne 0 ]; do sleep 10; ovs-vsctl list-br | grep br-int; done
+
+- name: set local ip in openvswitch
+ shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config={'local_ip'=' {{ internal_ip }} '};
+
+#'
+
+##################################################################
+########### Recover External network for odl l3 #################
+##################################################################
+
+- name: check br-ex
+ shell: ovs-vsctl list-br | grep br-ex; while [ $? -ne 0 ]; do sleep 10; ovs-vsctl list-br | grep br-ex; done
+ when: odl_l3_agent == "Enable"
+
+- name: add ovs uplink
+ openvswitch_port: bridge=br-ex port={{ item["interface"] }} state=present
+ with_items: "{{ network_cfg['provider_net_mappings'] }}"
+ when: item["type"] == "ovs" and odl_l3_agent == "Enable"
+
+- name: wait 10 seconds
+ shell: sleep 10
+ when: odl_l3_agent == "Enable"
+
+- name: set external nic in openvswitch
+ shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config:provider_mappings=br-ex:{{ item["interface"] }}
+ with_items: "{{ network_cfg['provider_net_mappings'] }}"
+ when: item["type"] == "ovs" and odl_l3_agent == "Enable"
+
+- name: copy recovery script
+ copy: src={{ item }} dest=/opt/setup_networks
+ with_items:
+ - recover_network_odl_l3.py
+ - setup_networks_odl_l3.py
+ when: odl_l3_agent == "Enable"
+
+- name: recover external script
+ shell: python /opt/setup_networks/recover_network_odl_l3.py
+ when: odl_l3_agent == "Enable"
+
+- name: update keepalived info
+ template: src=keepalived.conf dest=/etc/keepalived/keepalived.conf
+ when: inventory_hostname in groups['odl'] and odl_l3_agent == "Enable"
+
+- name: modify net-init
+ shell: sed -i 's/setup_networks.py/setup_networks_odl_l3.py/g' /etc/init.d/net_init
+ when: odl_l3_agent == "Enable"
+
+##################################################################
+########### Recover External network for odl l2 #################
+##################################################################
+
+- name: add ovs bridge
+ openvswitch_bridge: bridge={{ item["name"] }} state=present
+ with_items: "{{ network_cfg['provider_net_mappings'] }}"
+ when: item["type"] == "ovs" and odl_l3_agent == "Disable"
+
+- name: add ovs uplink
+ openvswitch_port: bridge={{ item["name"] }} port={{ item["interface"] }} state=present
+ with_items: "{{ network_cfg['provider_net_mappings'] }}"
+ when: item["type"] == "ovs" and odl_l3_agent == "Disable"
+
+- name: copy recovery script
+ copy: src={{ item }} dest=/opt/setup_networks
+ with_items:
+ - recover_network.py
+ when: odl_l3_agent == "Disable"
+
+- name: recover external script
+ shell: python /opt/setup_networks/recover_network.py
+ when: odl_l3_agent == "Disable"
+
+##################################################################
+
+
+- name: restart keepalived to recover external IP
+ shell: service keepalived restart
+ when: inventory_hostname in groups['odl']
+ ignore_errors: True
+
+
+
+##################################################################
+##################################################################
+##################################################################
+- name: configure opendaylight -> ml2
+ shell: >
+ crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight;
+ crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
+ crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs enable_tunneling True;
+
+#- name: Adjust Service Daemon
+# shell: >
+# sed -i '/neutron-openvswitch-agent/d' /opt/service ;
+# echo opendaylight >> /opt/service ;
+
+- name: copy ml2 configuration script
+ template:
+ src: ml2_conf.sh
+ dest: "/opt/ml2_conf.sh"
+ mode: 0777
+
+- name: execute ml2 configuration script
+ command: su -s /bin/sh -c "/opt/ml2_conf.sh;"
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/Debian.yml
new file mode 100755
index 00000000..a3d5dd02
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/Debian.yml
@@ -0,0 +1,23 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+controller_packages:
+# - openjdk-7-jdk
+ - crudini
+
+compute_packages:
+ - crudini
+
+service_ovs_name: openvswitch-switch
+service_ovs_agent_name: neutron-openvswitch-agent
+
+service_file:
+ src: opendaylight.conf
+ dst: /etc/init/opendaylight.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/handlers/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/handlers/main.yml
new file mode 100755
index 00000000..e099fcf4
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/handlers/main.yml
@@ -0,0 +1,11 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: restart onos service
+ service: name=onos state=restarted enabled=yes
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/main.yml
new file mode 100755
index 00000000..c8ce1155
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/main.yml
@@ -0,0 +1,51 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: remove neutron-plugin-openvswitch-agent auto start
+ shell: >
+ update-rc.d neutron-plugin-openvswitch-agent remove;
+ sed -i /neutron-plugin-openvswitch-agent/d /opt/service
+ when: groups['onos']|length !=0
+ ignore_errors: True
+
+- name: shut down and disable Neutron's agent services
+ service: name=neutron-plugin-openvswitch-agent state=stopped
+ when: groups['onos']|length !=0
+ ignore_errors: True
+
+- name: remove neutron-l3-agent auto start
+ shell: >
+ update-rc.d neutron-l3-agent remove;
+ sed -i /neutron-l3-agent/d /opt/service
+ when: inventory_hostname in groups['onos']
+ ignore_errors: True
+
+- name: shut down and disable Neutron's l3 agent services
+ service: name=neutron-l3-agent state=stopped
+ when: inventory_hostname in groups['onos']
+ ignore_errors: True
+
+- name: Stop the Open vSwitch service and clear existing OVSDB
+ shell: >
+ ovs-vsctl del-br br-int ;
+ ovs-vsctl del-br br-tun ;
+ ovs-vsctl del-manager ;
+ ip link delete onos_port1 type veth peer name onos_port2;
+ when: groups['onos']|length !=0
+ ignore_errors: True
+
+- name: Install ONOS Cluster on Controller
+ include: onos_controller.yml
+ when: inventory_hostname in groups['onos']
+
+- name: Config ONOS Cluster
+ include: openvswitch.yml
+ when: groups['onos']|length !=0
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/onos_controller.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/onos_controller.yml
new file mode 100755
index 00000000..d51151a9
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/onos_controller.yml
@@ -0,0 +1,140 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: get image http server
+ shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
+ register: http_server
+
+- name: download onos driver packages
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_driver }}" dest=/opt/
+
+- name: upload onos sfc driver package
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_sfc_driver }}" dest=/opt/
+
+- name: unarchive onos driver package
+ command: su -s /bin/sh -c "tar xvf /opt/networking-onos.tar -C /opt/"
+
+- name: upload onos sfc driver package
+ command: su -s /bin/sh -c "tar xvf /opt/networking-sfc.tar -C /opt/"
+
+- name: install onos driver
+ command: su -s /bin/sh -c "/opt/networking-onos/install_driver.sh"
+
+- name: install onos sfc driver
+ command: su -s /bin/sh -c "/opt/networking-sfc/install_driver.sh"
+
+- name: install onos required packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: packages
+
+- name: download oracle-jdk8 package file
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }}
+
+- name: download oracle-jdk8 script file
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/
+
+- name: unarchive onos driver package
+ command: su -s /bin/sh -c "tar xvf /opt/install_jdk8.tar -C /opt/"
+
+- name: install install_jdk8 package
+ command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh"
+
+- name: create JAVA_HOME environment variable
+ shell: >
+ export J2SDKDIR=/usr/lib/jvm/java-8-oracle;
+ export J2REDIR=/usr/lib/jvm/java-8-oracle/jre;
+ export PATH=$PATH:/usr/lib/jvm/java-8-oracle/bin:/usr/lib/jvm/java-8-oracle/db/bin:/usr/lib/jvm/java-8-oracle/jre/bin;
+ export JAVA_HOME=/usr/lib/jvm/java-8-oracle;
+ export DERBY_HOME=/usr/lib/jvm/java-8-oracle/db;
+
+- name: create onos group
+ group: name=onos system=yes state=present
+
+- name: create onos user
+ user:
+ name: onos
+ group: onos
+ home: "{{ onos_home }}"
+ createhome: "yes"
+ system: "yes"
+ shell: "/bin/false"
+
+- name: download onos package
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_pkg_name }}" dest=/opt/{{ onos_pkg_name }}
+
+- name: create new jar repository
+ command: su -s /bin/sh -c "mkdir ~/.m2"
+ ignore_errors: True
+
+- name: download jar repository
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ repository }}" dest=~/.m2/
+
+- name: extract jar repository
+ command: su -s /bin/sh -c "tar xvf ~/.m2/repository.tar -C ~/.m2/"
+
+- name: extract onos package
+ command: su -s /bin/sh -c "tar xzf /opt/{{ onos_pkg_name }} -C {{ onos_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" onos
+
+- name: configure onos service
+ shell: >
+ echo 'export ONOS_OPTS=debug' > {{ onos_home }}/options;
+ echo 'export ONOS_USER=root' >> {{ onos_home }}/options;
+ mkdir {{ onos_home }}/var;
+ mkdir {{ onos_home }}/config;
+ sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' {{ onos_home }}/init/onos.conf;
+ cp -rf {{ onos_home }}/init/onos.conf /etc/init/;
+ cp -rf {{ onos_home }}/init/onos.conf /etc/init.d/;
+
+- name: configure onos boot feature
+ shell: >
+ sed -i '/^featuresBoot=/c\featuresBoot={{ onos_boot_features }}' {{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg;
+
+- name: wait for config time
+ shell: "sleep 10"
+
+- name: start onos service
+ service: name=onos state=started enabled=yes
+
+- name: wait for onos start time
+ shell: "sleep 200"
+
+- name: add onos auto start
+ shell: >
+ echo "onos">>/opt/service
+
+##########################################################################################################
+################################ ONOS connect with OpenStack ################################
+##########################################################################################################
+- name: Configure Neutron1
+ shell: >
+ crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_sfc.services.sfc.plugin.SfcPlugin, networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin, onos_router;
+ crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers onos_ml2;
+ crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
+ crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers vxlan
+
+- name: Create ML2 Configuration File
+ template:
+ src: ml2_conf.sh
+ dest: "/opt/ml2_conf.sh"
+ mode: 0777
+
+- name: Configure Neutron2
+ command: su -s /bin/sh -c "/opt/ml2_conf.sh;"
+
+- name: Configure Neutron3
+ shell: >
+ mysql -e "drop database if exists neutron_ml2;";
+ mysql -e "create database neutron_ml2 character set utf8;";
+ mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';";
+ su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron;
+ su -s /bin/sh -c "neutron-db-manage --subproject networking-sfc upgrade head" neutron;
+
+- name: Restart neutron-server
+ service: name=neutron-server state=restarted
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/openvswitch.yml
new file mode 100755
index 00000000..aac787ea
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/tasks/openvswitch.yml
@@ -0,0 +1,57 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: set veth port
+ shell: >
+ ip link add onos_port1 type veth peer name onos_port2;
+ ifconfig onos_port1 up;
+ ifconfig onos_port2 up;
+ ignore_errors: True
+
+- name: add openflow-base feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow-base'";
+ when: inventory_hostname in groups['onos']
+
+- name: add openflow feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow'";
+ when: inventory_hostname in groups['onos']
+
+- name: add ovsdatabase feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdatabase'";
+ when: inventory_hostname in groups['onos']
+
+- name: add ovsdb-base feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-base'";
+ when: inventory_hostname in groups['onos']
+
+- name: add onos driver ovsdb feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-drivers-ovsdb'";
+ when: inventory_hostname in groups['onos']
+
+- name: add ovsdb provider host feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-provider-host'";
+ when: inventory_hostname in groups['onos']
+
+- name: add vtn feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-app-vtn-onosfw'";
+ when: inventory_hostname in groups['onos']
+
+- name: set public eth card start
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'externalportname-set -n onos_port2'"
+ when: inventory_hostname in groups['onos']
+
+- name: Set ONOS as the manager
+ command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:6640;"
+
+- name: delete default gateway
+ shell: >
+ route delete default;
+ when: inventory_hostname not in groups['onos']
+ ignore_errors: True
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/templates/ml2_conf.sh b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/templates/ml2_conf.sh
new file mode 100755
index 00000000..8af03df4
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/templates/ml2_conf.sh
@@ -0,0 +1,15 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+cat <<EOT>> /etc/neutron/plugins/ml2/ml2_conf.ini
+[onos]
+password = admin
+username = admin
+url_path = http://{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:8181/onos/vtn
+EOT
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/Debian.yml
new file mode 100755
index 00000000..59a4dbd9
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/Debian.yml
@@ -0,0 +1,14 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages:
+ - software-properties-common
+ - crudini
+
+services: []
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/RedHat.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/RedHat.yml
new file mode 100755
index 00000000..59a4dbd9
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/RedHat.yml
@@ -0,0 +1,14 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages:
+ - software-properties-common
+ - crudini
+
+services: []
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/main.yml
new file mode 100755
index 00000000..f11f1102
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/onos_cluster/vars/main.yml
@@ -0,0 +1,19 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+onos_pkg_name: onos-1.6.0.tar.gz
+onos_home: /opt/onos/
+karaf_dist: apache-karaf-3.0.5
+jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz
+jdk8_script_name: install_jdk8.tar
+onos_driver: networking-onos.tar
+onos_sfc_driver: networking-sfc.tar
+repository: repository.tar
+onos_boot_features: config,standard,region,package,kar,ssh,management,webconsole,onos-api,onos-core,onos-incubator,onos-cli,onos-rest,onos-gui,onos-openflow-base, onos-openflow, onos-ovsdatabase, onos-ovsdb-base, onos-drivers-ovsdb, onos-ovsdb-provider-host, onos-app-vtn-onosfw
+
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/open-contrail/tasks/uninstall-openvswitch.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/open-contrail/tasks/uninstall-openvswitch.yml
new file mode 100755
index 00000000..836cb78b
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/open-contrail/tasks/uninstall-openvswitch.yml
@@ -0,0 +1,46 @@
+---
+- name: del ovs bridge
+ shell: ovs-vsctl del-br br-int; ovs-vsctl del-br br-tun; ovs-vsctl del-br br-prv;
+
+- name: remove ovs and ovs-plugin daeman
+ shell: >
+ sed -i '/neutron-openvswitch-agent/d' /opt/service ;
+ sed -i '/openvswitch-switch/d' /opt/service ;
+
+- name: stop ovs and ovs-plugin
+ shell: service openvswitch-switch stop; service neutron-openvswitch-agent stop;
+
+- name: remove ovs and ovs-plugin files
+ shell: >
+ update-rc.d -f neutron-openvswitch-agent remove;
+ mv /etc/init.d/neutron-openvswitch-agent /home/neutron-openvswitch-agent;
+ mv /etc/init/neutron-openvswitch-agent.conf /home/neutron-openvswitch-agent.conf;
+ update-rc.d -f openvswitch-switch remove ;
+ mv /etc/init.d/openvswitch-switch /home/openvswitch-switch ;
+ mv /etc/init/openvswitch-switch.conf /home/openvswitch-switch.conf ;
+ update-rc.d -f neutron-ovs-cleanup remove ;
+ mv /etc/init.d/neutron-ovs-cleanup /home/neutron-ovs-cleanup ;
+ mv /etc/init/neutron-ovs-cleanup.conf /home/neutron-ovs-cleanup.conf ;
+
+- name: remove ovs kernel module
+ shell: rmmod vport_vxlan; rmmod openvswitch;
+ ignore_errors: True
+
+- name: copy recovery script
+ copy: src={{ item }} dest=/opt/setup_networks
+ with_items:
+# - recover_network_opencontrail.py
+ - setup_networks_opencontrail.py
+
+#- name: recover external script
+# shell: python /opt/setup_networks/recover_network_opencontrail.py
+
+- name: modify net-init
+ shell: sed -i 's/setup_networks.py/setup_networks_opencontrail.py/g' /etc/init.d/net_init
+
+- name: resolve dual NIC problem
+ shell: >
+ echo "net.ipv4.conf.all.arp_ignore=1" >> /etc/sysctl.conf ;
+ /sbin/sysctl -p ;
+ echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore ;
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/vars/Debian.yml
new file mode 100644
index 00000000..221a3d92
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/vars/Debian.yml
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+configs_templates:
+ - src: nova.j2
+ dest:
+ - /etc/nova/nova.conf
+ - src: neutron.j2
+ dest:
+ - /etc/neutron/plugins/ml2/ml2_conf.ini
+ - /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
+ - /etc/neutron/plugins/ml2/restproxy.ini
+
+controller_services:
+ - nova-api
+ - nova-cert
+ - nova-conductor
+ - nova-consoleauth
+ - nova-novncproxy
+ - nova-scheduler
+ - neutron-server
+ - neutron-openvswitch-agent
+ - neutron-l3-agent
+ - neutron-dhcp-agent
+ - neutron-metadata-agent
+
+compute_services:
+ - nova-compute
+ - neutron-openvswitch-agent
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/setup-network/files/setup_networks/net_init b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/setup-network/files/setup_networks/net_init
new file mode 100755
index 00000000..41ccb988
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/setup-network/files/setup_networks/net_init
@@ -0,0 +1,24 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides: anamon.init
+# Required-Start: $network
+# Required-Stop:
+# Should-Start:
+# Should-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Starts the cobbler anamon boot notification program
+# Description: anamon runs the first time a machine is booted after installation.
+### END INIT INFO
+
+
+
+#
+# anamon.init: Starts the cobbler post-install boot notification program
+#
+# chkconfig: 35 0 6
+#
+# description: anamon runs the first time a machine is booted after
+# installation.
+#
+python /opt/setup_networks/setup_networks.py
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/storage/files/storage b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/storage/files/storage
new file mode 100755
index 00000000..3acc6115
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/storage/files/storage
@@ -0,0 +1,10 @@
+#! /bin/bash
+### BEGIN INIT INFO
+# Provides: Storage
+# Required-Start: $remote_fs $network
+# Required-Stop: $remote_fs $network
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Description: Storage
+### END INIT INFO
+loop_dev=`sh /opt/setup_storage/losetup.sh`
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/tacker/templates/tacker.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/tacker/templates/tacker.j2
new file mode 100644
index 00000000..f1d9125b
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/tacker/templates/tacker.j2
@@ -0,0 +1,426 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = True
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = True
+
+# Where to store Tacker state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/tacker
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+auth_strategy = keystone
+policy_file = /usr/local/etc/tacker/policy.json
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+# log_dir =
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ internal_ip }}
+
+# Port the bind the API server to
+bind_port = 8888
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of tacker.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Tacker core plugin entrypoint to be loaded from the
+# tacker.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the tacker source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+# core_plugin =
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# tacker.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the tacker source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+
+service_plugins = vnfm,nfvo
+
+# Paste configuration file
+# api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+# auth_strategy = keystone
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Tacker is
+# being used in conjunction with nova security groups
+# allow_overlapping_ips = False
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = tacker.openstack.common.rpc.impl_kombu
+# Size of RPC thread pool
+# rpc_thread_pool_size = 64
+# Size of RPC connection pool
+# rpc_conn_pool_size = 30
+# Seconds to wait for a response from call or multicall
+# rpc_response_timeout = 60
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+# rpc_cast_timeout = 30
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = tacker.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = tacker
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# IP address of the RabbitMQ installation
+# rabbit_host = localhost
+# Password of the RabbitMQ server
+# rabbit_password = guest
+# Port where RabbitMQ server is running/listening
+# rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+# rabbit_userid = guest
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+
+# QPID
+# rpc_backend=tacker.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=tacker.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = tacker.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = tacker.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = tacker.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+# default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+# notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+# agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# tacker server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to tacker server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+# api_workers = 0
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+# rpc_workers = 0
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== tacker nova interactions ==========
+# Send notification to nova when port status is active.
+# notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+# notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+# nova_url = http://127.0.0.1:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+# nova_region_name =
+
+# Username for connection to nova in admin context
+# nova_admin_username =
+
+# The uuid of the admin nova tenant
+# nova_admin_tenant_id =
+
+# Password for connection to nova in admin context.
+# nova_admin_password =
+
+# Authorization URL for connection to nova in admin context.
+# nova_admin_auth_url =
+
+# CA file for novaclient to verify server certificates
+# nova_ca_certificates_file =
+
+# Boolean to control ignoring SSL errors on the nova url
+# nova_api_insecure = False
+
+# Number of seconds between sending events to nova if there are any events to send
+# send_events_interval = 2
+
+# ======== end of tacker nova interactions ==========
+
+[agent]
+# Use "sudo tacker-rootwrap /etc/tacker/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = sudo /usr/local/bin/tacker-rootwrap /usr/local/etc/tacker/rootwrap.conf
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+# report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+signing_dir = /var/cache/tacker
+#cafile = /opt/stack/data/ca-bundle.pem
+#project_domain_id = default
+project_name = service
+#user_domain_id = default
+password = console
+username = tacker
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_plugin = password
+identity_uri = http://{{ internal_vip.ip }}:5000
+auth_uri = http://{{ internal_vip.ip }}:5000
+
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/tacker
+connection = mysql://tacker:TACKER_DBPASS@{{ internal_vip.ip }}:3306/tacker?charset=utf8
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main tacker server. (Leave it as is if the database runs on this host.)
+# connection = sqlite://
+# NOTE: In deployment the [database] section and its connection attribute may
+# be set in the corresponding core plugin '.ini' file. However, it is suggested
+# to put the [database] section and its connection attribute in this
+# configuration file.
+
+# Database engine for which script will be generated when using offline
+# migration
+# engine =
+
+# The SQLAlchemy connection string used to connect to the slave database
+# slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+# max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+# retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+# min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# max_pool_size = 10
+
+# Timeout in seconds before idle sql connections are reaped
+# idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+# max_overflow = 20
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+# connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+# connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# pool_timeout = 10
+
+[tacker]
+# Specify drivers for hosting device
+# infra_driver = heat,nova,noop
+
+# Specify drivers for mgmt
+# mgmt_driver = noop,openwrt
+
+# Specify drivers for monitoring
+# monitor_driver = ping, http_ping
+
+[nfvo_vim]
+# Supported VIM drivers, resource orchestration controllers such as OpenStack, kvm
+#Default VIM driver is OpenStack
+#vim_drivers = openstack
+#Default VIM placement if vim id is not provided
+default_vim = VIM0
+
+[vim_keys]
+#openstack = /etc/tacker/vim/fernet_keys
+[tacker_nova]
+# parameters for novaclient to talk to nova
+region_name = RegionOne
+#project_domain_id = default
+project_name = service
+#user_domain_id = default
+password = console
+username = nova
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_plugin = password
+
+[tacker_heat]
+heat_uri = http://{{ internal_vip.ip }}:8004/v1
+stack_retries = 60
+stack_retry_wait = 5
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/templates/neutron.conf b/deploy/adapters/ansible/openstack_mitaka_xenial/templates/neutron.conf
new file mode 100644
index 00000000..5e33d3f7
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/templates/neutron.conf
@@ -0,0 +1,486 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ VERBOSE }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+notify_nova_on_port_status_changes = True
+notify_nova_on_port_data_changes = True
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ internal_vip.ip }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = regionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+{% if NOVA_ADMIN_TENANT_ID|default('') %}
+nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
+{% endif %}
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+identity_uri = http://{{ internal_vip.ip }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 30
+use_db_reconnect = True
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewllDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
+
+{% if enable_fwaas %}
+[fwaas]
+driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
+enabled = True
+{% endif %}
+
+[nova]
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = nova
+password = {{ NOVA_PASS }}
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/templates/nova.conf b/deploy/adapters/ansible/openstack_mitaka_xenial/templates/nova.conf
new file mode 100644
index 00000000..3a5735cf
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/templates/nova.conf
@@ -0,0 +1,96 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lib/nova/tmp
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=osapi_compute,metadata
+
+default_floating_pool={{ public_net_info.network }}
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+osapi_compute_listen={{ internal_ip }}
+metadata_listen={{ internal_ip }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+memcached_servers = {{ memcached_servers }}
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+idle_timeout = 30
+use_db_reconnect = True
+pool_timeout = 10
+
+[api_database]
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova_api
+idle_timeout = 30
+use_db_reconnect = True
+pool_timeout = 10
+
+[keystone_authtoken]
+auth_uri = http://{{ internal_vip.ip }}:5000/2.0
+identity_uri = http://{{ internal_vip.ip }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+memcached_servers = {{ memcached_servers }}
+
+[glance]
+host = {{ internal_vip.ip }}
+
+[neutron]
+url = http://{{ internal_vip.ip }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
+service_metadata_proxy = True
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+auth_type = password
+auth_url = http://{{ internal_vip.ip }}:35357
+password = {{ NEUTRON_PASS }}
+username = neutron
+project_domain_name = default
+user_domain_name = default