aboutsummaryrefslogtreecommitdiffstats
path: root/deploy
diff options
context:
space:
mode:
authorchenshuai@huawei.com <chenshuai@huawei.com>2016-07-28 09:42:27 +0000
committerGerrit Code Review <gerrit@172.30.200.206>2016-07-28 09:42:27 +0000
commit9e6d6e9715bf4c1e772b74d3886eea88860526f4 (patch)
tree9c513e2ddda5a1795d4d2fd8e3b3a759ecc63ed2 /deploy
parent8adc3eab76774dc5b1486aae8194708a39eabec7 (diff)
parentdab3f653a973223dfcddc3d1b506266d7b83a6e1 (diff)
Merge "add swift and moon in Compass"
Diffstat (limited to 'deploy')
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml103
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/Debian.yml3
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/ha/templates/haproxy.cfg217
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/files/deb.conf11
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/files/deb.conf.bak11
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/files/get_deb_depends.py22
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/handlers/main.yml12
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/tasks/main.yml212
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/admin-openrc.sh15
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/demo-openrc.sh13
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/keystone-paste.ini96
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/keystone.conf59
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/wsgi-keystone.conf.j246
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/vars/Debian.yml168
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/vars/main.yml172
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/files/deb.conf11
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/files/get_deb_depends.py22
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/handlers/main.yml12
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/tasks/main.yml18
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/tasks/moon-compute.yml17
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/tasks/moon-controller.yml61
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/admin-openrc.sh15
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/api-paste.ini106
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/demo-openrc.sh13
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/keystone-paste.ini96
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/keystone.conf59
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/wsgi-keystone.conf.j246
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/vars/Debian.yml33
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/vars/main.yml172
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/main.yml37
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-compute1.yml66
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-controller1.yml34
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-controller2.yml93
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/account-server.conf200
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/container-server.conf229
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/object-server.conf347
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/proxy-server.conf771
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/rsyncd.conf23
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/swift.conf183
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/Debian.yml27
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/main.yml15
41 files changed, 3819 insertions, 47 deletions
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml
index 82888e36..61cf1c69 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml
@@ -64,25 +64,11 @@
- apache
- database
- mq
- - keystone
+# - keystone
+ - moon-controller
- nova-controller
- neutron-controller
- - cinder-controller
- - glance
- - neutron-common
- - neutron-network
- - ceilometer_controller
-# - ext-network
- - dashboard
- - heat
- - aodh
-
-- hosts: all
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles:
- - storage
+# - cinder-controller
- hosts: compute
remote_user: root
@@ -90,52 +76,88 @@
max_fail_percentage: 0
roles:
- nova-compute
- - neutron-compute
- - cinder-volume
- - ceilometer_compute
- hosts: all
remote_user: root
accelerate: true
max_fail_percentage: 0
roles:
- - secgroup
-
-- hosts: ceph_adm
- remote_user: root
- accelerate: true
- max_fail_percentage: 0
- roles: []
- # - ceph-deploy
+ - swift
+ - moon-post
-- hosts: ceph
+- hosts: controller
remote_user: root
accelerate: true
max_fail_percentage: 0
roles:
- - ceph-purge
- - ceph-config
+ - glance
+ - neutron-common
+ - neutron-network
+ - ceilometer_controller
+# - ext-network
+ - dashboard
+ - heat
+ - aodh
-- hosts: ceph_mon
+- hosts: all
remote_user: root
accelerate: true
max_fail_percentage: 0
roles:
- - ceph-mon
+ - storage
-- hosts: ceph_osd
+- hosts: compute
remote_user: root
accelerate: true
max_fail_percentage: 0
roles:
- - ceph-osd
+# - nova-compute
+ - neutron-compute
+# - cinder-volume
+ - ceilometer_compute
-- hosts: ceph
+- hosts: all
remote_user: root
accelerate: true
max_fail_percentage: 0
roles:
- - ceph-openstack
+ - secgroup
+
+#- hosts: ceph_adm
+# remote_user: root
+# accelerate: true
+# max_fail_percentage: 0
+# roles: []
+# # - ceph-deploy
+#
+#- hosts: ceph
+# remote_user: root
+# accelerate: true
+# max_fail_percentage: 0
+# roles:
+# - ceph-purge
+# - ceph-config
+#
+#- hosts: ceph_mon
+# remote_user: root
+# accelerate: true
+# max_fail_percentage: 0
+# roles:
+# - ceph-mon
+#
+#- hosts: ceph_osd
+# remote_user: root
+# accelerate: true
+# max_fail_percentage: 0
+# roles:
+# - ceph-osd
+#
+#- hosts: ceph
+# remote_user: root
+# accelerate: true
+# max_fail_percentage: 0
+# roles:
+# - ceph-openstack
- hosts: all
remote_user: root
@@ -228,13 +250,6 @@
roles:
- odl_cluster_post
-#- hosts: controller
-# remote_user: root
-# accelerate: true
-# max_fail_percentage: 0
-# roles:
-# - moon
-
- hosts: controller
remote_user: root
accelerate: true
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/Debian.yml
index b749ffaa..2a3c3249 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/Debian.yml
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ceilometer_controller/vars/Debian.yml
@@ -28,9 +28,6 @@ ceilometer_configs_templates:
- src: ceilometer.j2
dest:
- /etc/ceilometer/ceilometer.conf
- - src: cinder.j2
- dest:
- - /etc/cinder/cinder.conf
- src: glance.j2
dest:
- /etc/glance/glance-api.conf
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ha/templates/haproxy.cfg b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ha/templates/haproxy.cfg
new file mode 100644
index 00000000..4632c25d
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/ha/templates/haproxy.cfg
@@ -0,0 +1,217 @@
+
+global
+ #chroot /var/run/haproxy
+ daemon
+ user haproxy
+ group haproxy
+ maxconn 4000
+ pidfile /var/run/haproxy/haproxy.pid
+ #log 127.0.0.1 local0
+ tune.bufsize 1000000
+ stats socket /var/run/haproxy.sock
+ stats timeout 2m
+
+defaults
+ log global
+ maxconn 8000
+ option redispatch
+ option dontlognull
+ option splice-auto
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 50s
+ timeout server 50s
+ timeout check 10s
+ retries 3
+
+listen proxy-mysql
+ bind {{ internal_vip.ip }}:3306
+ option tcpka
+ option tcplog
+ balance source
+{% for host, ip in haproxy_hosts.items() %}
+{% if loop.index == 1 %}
+ server {{ host }} {{ ip }}:3306 weight 1 check inter 2000 rise 2 fall 5
+{% else %}
+ server {{ host }} {{ ip }}:3306 weight 1 check inter 2000 rise 2 fall 5 backup
+{% endif %}
+{% endfor %}
+
+listen proxy-rabbit
+ bind {{ internal_vip.ip }}:5672
+ bind {{ public_vip.ip }}:5672
+
+ option tcpka
+ option tcplog
+ timeout client 3h
+ timeout server 3h
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:5672 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-glance_registry_cluster
+ bind {{ internal_vip.ip }}:9191
+ bind {{ public_vip.ip }}:9191
+ option tcpka
+ option tcplog
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:9191 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-glance_api_cluster
+ bind {{ internal_vip.ip }}:9292
+ bind {{ public_vip.ip }}:9292
+ option tcpka
+ option tcplog
+ option httpchk
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:9292 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-nova-novncproxy
+ bind {{ internal_vip.ip }}:6080
+ bind {{ public_vip.ip }}:6080
+ option tcpka
+ option tcplog
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:6080 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-network
+ bind {{ internal_vip.ip }}:9696
+ bind {{ public_vip.ip }}:9696
+ option tcpka
+ option tcplog
+ balance source
+ option httpchk
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:9696 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-volume
+ bind {{ internal_vip.ip }}:8776
+ bind {{ public_vip.ip }}:8776
+ option tcpka
+ option httpchk
+ option tcplog
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-keystone_admin_cluster
+ bind {{ internal_vip.ip }}:35357
+ bind {{ public_vip.ip }}:35357
+ option tcpka
+ option httpchk
+ option tcplog
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:35357 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-keystone_public_internal_cluster
+ bind {{ internal_vip.ip }}:5000
+ bind {{ public_vip.ip }}:5000
+ option tcpka
+ option httpchk
+ option tcplog
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:5000 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-nova_compute_api_cluster
+ bind {{ internal_vip.ip }}:8774
+ bind {{ public_vip.ip }}:8774
+ mode tcp
+ option httpchk
+ option tcplog
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:8774 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-nova_metadata_api_cluster
+ bind {{ internal_vip.ip }}:8775
+ bind {{ public_vip.ip }}:8775
+ option tcpka
+ option tcplog
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:8775 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-cinder_api_cluster
+ bind {{ internal_vip.ip }}:8776
+ bind {{ public_vip.ip }}:8776
+ mode tcp
+ option httpchk
+ option tcplog
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-swift-proxy
+ bind {{ internal_vip.ip }}:8080
+ bind {{ public_vip.ip }}:8080
+ mode tcp
+ option httpchk
+ option tcplog
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:8080 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-ceilometer_api_cluster
+ bind {{ internal_vip.ip }}:8777
+ bind {{ public_vip.ip }}:8777
+ mode tcp
+ option tcp-check
+ option tcplog
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:8777 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-aodh_api_cluster
+ bind {{ internal_vip.ip }}:8042
+ bind {{ public_vip.ip }}:8042
+ mode tcp
+ option tcp-check
+ option tcplog
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:8042 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-dashboarad
+ bind {{ public_vip.ip }}:80
+ mode http
+ balance source
+ capture cookie vgnvisitor= len 32
+ cookie SERVERID insert indirect nocache
+ option forwardfor
+ option httpchk
+ option httpclose
+ rspidel ^Set-cookie:\ IP=
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:80 cookie {{ host }} weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen stats
+ mode http
+ bind 0.0.0.0:9999
+ stats enable
+ stats refresh 30s
+ stats uri /
+ stats realm Global\ statistics
+ stats auth admin:admin
+
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/files/deb.conf b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/files/deb.conf
new file mode 100644
index 00000000..6e1159a1
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/files/deb.conf
@@ -0,0 +1,11 @@
+keystone/admin-password: password
+keystone/auth-token: password
+keystone/admin-password-confirm: password
+keystone/admin-email: root@localhost
+keystone/admin-role-name: admin
+keystone/admin-user: admin
+keystone/create-admin-tenant: false
+keystone/region-name: Orange
+keystone/admin-tenant-name: admin
+keystone/register-endpoint: false
+keystone/configure_db: false
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/files/deb.conf.bak b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/files/deb.conf.bak
new file mode 100644
index 00000000..6e1159a1
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/files/deb.conf.bak
@@ -0,0 +1,11 @@
+keystone/admin-password: password
+keystone/auth-token: password
+keystone/admin-password-confirm: password
+keystone/admin-email: root@localhost
+keystone/admin-role-name: admin
+keystone/admin-user: admin
+keystone/create-admin-tenant: false
+keystone/region-name: Orange
+keystone/admin-tenant-name: admin
+keystone/register-endpoint: false
+keystone/configure_db: false
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/files/get_deb_depends.py b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/files/get_deb_depends.py
new file mode 100644
index 00000000..05fc5d46
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/files/get_deb_depends.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+import sys
+import subprocess
+
+pkts = []
+
+for arg in sys.argv[1:]:
+ proc = subprocess.Popen(["dpkg-deb", "--info", arg], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out = proc.stdout.read()
+ err = proc.stderr.read()
+ if err:
+ print("An error occurred with {} ({})".format(arg, err))
+ continue
+ for line in out.splitlines():
+ line = line.decode('utf-8')
+ if " Depends:" in line:
+ line = line.replace(" Depends:", "")
+ for _dep in line.split(','):
+ pkts.append(_dep.split()[0])
+
+print(" ".join(pkts))
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/handlers/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/handlers/main.yml
new file mode 100755
index 00000000..608a8a09
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/handlers/main.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: restart keystone services
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: services | union(services_noarch)
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/tasks/main.yml
new file mode 100644
index 00000000..437a63c2
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/tasks/main.yml
@@ -0,0 +1,212 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+# install all packages
+- name: install keystone packages
+ shell: apt-get install -y python-pip unzip
+
+# download master.zip
+- name: get image http server
+ shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
+ register: http_server
+
+- name: download keystone-moon packages
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/moon/master.zip" dest=/tmp/master.zip mode=0444
+
+- name: extract keystone-moon packages
+ unarchive: src=/tmp/master.zip dest=/tmp copy=no
+
+# install all dependencies
+- name: copy scripts
+ copy: src=get_deb_depends.py dest=/tmp/get_deb_depends.py
+
+- name: install keystone-moon dependencies
+ shell: "apt-get install `python /tmp/get_deb_depends.py /tmp/moon-bin-master/*.deb`"
+ when: ansible_os_family == "Debian"
+
+
+# install keystone moon
+- name: copy scripts
+ copy: src=deb.conf dest=/tmp/deb.conf
+
+- name: install keystone moon
+ shell: >
+ export DEBIAN_FRONTEND="noninteractive";
+ sudo -E dpkg -i /tmp/moon-bin-master/*moon*.deb;
+
+#- name: install keystone moon
+# shell: >
+# export DEBIAN_FRONTEND="noninteractive";
+# sudo -E debconf-set-selections python-keystone < /tmp/deb.conf;
+# sudo -E dpkg -i /tmp/moon-bin-master/*moon*.deb;
+
+- name: stop keystone task
+ shell: >
+ service keystone stop;
+ mv /etc/init.d/keystone /home/;
+ mv /etc/init/keystone.conf /home/;
+ mv /lib/systemd/system/keystone.service /home/;
+
+# config keystone and apache2
+- name: delete sqlite database
+ file:
+ path: /var/lib/keystone/keystone.db
+ state: absent
+
+- name: update keystone conf
+ template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
+
+#- name: initialize fernet keys
+# shell: keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
+
+- name: assure listen port exist
+ lineinfile:
+ dest: '{{ apache_config_dir }}/ports.conf'
+ regexp: '{{ item.regexp }}'
+ line: '{{ item.line}}'
+ with_items:
+ - regexp: "^Listen {{ internal_ip }}:5000"
+ line: "Listen {{ internal_ip }}:5000"
+ - regexp: "^Listen {{ internal_ip }}:35357"
+ line: "Listen {{ internal_ip }}:35357"
+
+- name: update apache2 configs
+ template:
+ src: wsgi-keystone.conf.j2
+ dest: '{{ apache_config_dir }}/sites-available/wsgi-keystone.conf'
+ when: ansible_os_family == 'Debian'
+
+- name: enable keystone server
+ file:
+ src: "{{ apache_config_dir }}/sites-available/wsgi-keystone.conf"
+ dest: "{{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf"
+ state: "link"
+ when: ansible_os_family == 'Debian'
+
+- name: keystone source files
+ template: src={{ item }} dest=/opt/{{ item }}
+ with_items:
+ - admin-openrc.sh
+ - demo-openrc.sh
+
+# keystone paste ini
+- name: keystone paste ini 1
+ shell: sudo cp /etc/keystone/keystone-paste.ini /etc/keystone/keystone-paste.ini.bak;
+
+- name: keystone paste ini 2
+ shell: sudo sed "3i[pipeline:moon_pipeline]\npipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension moon_service\n\n[app:moon_service]\nuse = egg:keystone#moon_service\n" /etc/keystone/keystone-paste.ini > /tmp/keystone-paste.ini;
+
+- name: keystone paste ini 3
+ shell: sudo cp /tmp/keystone-paste.ini /etc/keystone/keystone-paste.ini;
+
+- name: keystone paste ini 4
+ shell: sudo sed "s/use = egg:Paste#urlmap/use = egg:Paste#urlmap\n\/moon = moon_pipeline/" /etc/keystone/keystone-paste.ini > /tmp/keystone-paste.ini;
+
+- name: keystone paste ini 5
+ shell: sudo cp /tmp/keystone-paste.ini /etc/keystone/keystone-paste.ini;
+
+# moon log
+- name: moon log
+ shell: >
+ sudo mkdir /var/log/moon/;
+ sudo chown keystone /var/log/moon/;
+ sudo addgroup moonlog;
+ sudo chgrp moonlog /var/log/moon/;
+ sudo touch /var/log/moon/keystonemiddleware.log;
+ sudo touch /var/log/moon/system.log;
+ sudo chgrp moonlog /var/log/moon/keystonemiddleware.log;
+ sudo chgrp moonlog /var/log/moon/system.log;
+ sudo chmod g+rw /var/log/moon;
+ sudo chmod g+rw /var/log/moon/keystonemiddleware.log;
+ sudo chmod g+rw /var/log/moon/system.log;
+ sudo adduser keystone moonlog;
+
+
+# keystone db sync
+- name: keystone db sync
+ shell: >
+ sudo /usr/bin/keystone-manage db_sync;
+ sudo /usr/bin/keystone-manage db_sync --extension moon;
+ when: inventory_hostname == haproxy_hosts.keys()[0]
+
+
+#############################################
+- name: wait for keystone ready
+ wait_for: port=35357 delay=3 timeout=10 host={{ internal_vip.ip }}
+
+- name: cron job to purge expired tokens hourly
+ cron:
+ name: 'purge expired tokens'
+ special_time: hourly
+ job: '/usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1'
+
+#############################################
+
+
+# apache2 restart
+- name: restart apache2
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: services | union(services_noarch)
+
+# install moonclient
+- name: install moon client
+ shell: sudo pip install /tmp/moon-bin-master/python-moonclient-0.1.tar.gz
+
+###################################################
+
+
+- name: add tenants
+ keystone_user:
+ token: "{{ ADMIN_TOKEN }}"
+ endpoint: "http://{{ internal_ip }}:35357/v2.0"
+ tenant: "{{ item.tenant }}"
+ tenant_description: "{{ item.tenant_description }}"
+ with_items: "{{ os_users }}"
+ when: inventory_hostname == groups['controller'][0]
+
+- name: add users
+ keystone_user:
+ token: "{{ ADMIN_TOKEN }}"
+ endpoint: "http://{{ internal_ip }}:35357/v2.0"
+ user: "{{ item.user }}"
+ tenant: "{{ item.tenant }}"
+ password: "{{ item.password }}"
+ email: "{{ item.email }}"
+ with_items: "{{ os_users }}"
+ when: inventory_hostname == groups['controller'][0]
+
+- name: grant roles
+ keystone_user:
+ token: "{{ ADMIN_TOKEN }}"
+ endpoint: "http://{{ internal_ip }}:35357/v2.0"
+ user: "{{ item.user }}"
+ role: "{{ item.role }}"
+ tenant: "{{ item.tenant }}"
+ with_items: "{{ os_users }}"
+ when: inventory_hostname == groups['controller'][0]
+
+- name: add endpoints
+ keystone_service:
+ token: "{{ ADMIN_TOKEN }}"
+ endpoint: "http://{{ internal_ip }}:35357/v2.0"
+ name: "{{ item.name }}"
+ type: "{{ item.type }}"
+ region: "{{ item.region}}"
+ description: "{{ item.description }}"
+ publicurl: "{{ item.publicurl }}"
+ internalurl: "{{ item.internalurl }}"
+ adminurl: "{{ item.adminurl }}"
+ with_items: "{{ os_services }}"
+ when: inventory_hostname == groups['controller'][0]
+
+
+###################################################
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/admin-openrc.sh b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/admin-openrc.sh
new file mode 100644
index 00000000..6ba620ff
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/admin-openrc.sh
@@ -0,0 +1,15 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# Verify the Identity Service installation
+export OS_PASSWORD={{ ADMIN_PASS }}
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
+export OS_USERNAME=admin
+export OS_VOLUME_API_VERSION=2
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/demo-openrc.sh b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/demo-openrc.sh
new file mode 100644
index 00000000..5807e868
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/demo-openrc.sh
@@ -0,0 +1,13 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+export OS_USERNAME=demo
+export OS_PASSWORD={{ DEMO_PASS }}
+export OS_TENANT_NAME=demo
+export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/keystone-paste.ini b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/keystone-paste.ini
new file mode 100644
index 00000000..cd9ebede
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/keystone-paste.ini
@@ -0,0 +1,96 @@
+# Keystone PasteDeploy configuration file.
+
+[pipeline:moon_pipeline]
+pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension moon_service
+
+[app:moon_service]
+use = egg:keystone#moon_service
+
+[filter:debug]
+use = egg:oslo.middleware#debug
+
+[filter:request_id]
+use = egg:oslo.middleware#request_id
+
+[filter:build_auth_context]
+use = egg:keystone#build_auth_context
+
+[filter:token_auth]
+use = egg:keystone#token_auth
+
+[filter:admin_token_auth]
+# This is deprecated in the M release and will be removed in the O release.
+# Use `keystone-manage bootstrap` and remove this from the pipelines below.
+use = egg:keystone#admin_token_auth
+
+[filter:json_body]
+use = egg:keystone#json_body
+
+[filter:cors]
+use = egg:oslo.middleware#cors
+oslo_config_project = keystone
+
+[filter:ec2_extension]
+use = egg:keystone#ec2_extension
+
+[filter:ec2_extension_v3]
+use = egg:keystone#ec2_extension_v3
+
+[filter:s3_extension]
+use = egg:keystone#s3_extension
+
+[filter:url_normalize]
+use = egg:keystone#url_normalize
+
+[filter:sizelimit]
+use = egg:oslo.middleware#sizelimit
+
+[app:public_service]
+use = egg:keystone#public_service
+
+[app:service_v3]
+use = egg:keystone#service_v3
+
+[app:admin_service]
+use = egg:keystone#admin_service
+
+[pipeline:public_api]
+# The last item in this pipeline must be public_service or an equivalent
+# application. It cannot be a filter.
+pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension public_service
+
+[pipeline:admin_api]
+# The last item in this pipeline must be admin_service or an equivalent
+# application. It cannot be a filter.
+pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension s3_extension admin_service
+
+[pipeline:api_v3]
+# The last item in this pipeline must be service_v3 or an equivalent
+# application. It cannot be a filter.
+pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension_v3 s3_extension service_v3
+
+[app:public_version_service]
+use = egg:keystone#public_version_service
+
+[app:admin_version_service]
+use = egg:keystone#admin_version_service
+
+[pipeline:public_version_api]
+pipeline = cors sizelimit url_normalize public_version_service
+
+[pipeline:admin_version_api]
+pipeline = cors sizelimit url_normalize admin_version_service
+
+[composite:main]
+use = egg:Paste#urlmap
+/moon = moon_pipeline
+/v2.0 = public_api
+/v3 = api_v3
+/ = public_version_api
+
+[composite:admin]
+use = egg:Paste#urlmap
+/moon = moon_pipeline
+/v2.0 = admin_api
+/v3 = api_v3
+/ = admin_version_api
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/keystone.conf b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/keystone.conf
new file mode 100644
index 00000000..649fc32c
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/keystone.conf
@@ -0,0 +1,59 @@
+{% set memcached_servers = [] %}
+{% set rabbitmq_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% set _ = rabbitmq_servers.append('%s:5672'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+{% set rabbitmq_servers = rabbitmq_servers|join(',') %}
+[DEFAULT]
+admin_token={{ ADMIN_TOKEN }}
+debug={{ DEBUG }}
+log_dir = /var/log/keystone
+
+[cache]
+backend=keystone.cache.memcache_pool
+memcache_servers={{ memcached_servers}}
+enabled=true
+
+[revoke]
+driver=sql
+expiration_buffer=3600
+caching=true
+
+[database]
+connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone?charset=utf8
+idle_timeout=30
+min_pool_size=5
+max_pool_size=120
+pool_timeout=30
+
+
+[identity]
+default_domain_id=default
+driver=sql
+
+[assignment]
+driver=sql
+
+[resource]
+driver=sql
+caching=true
+cache_time=3600
+
+[token]
+enforce_token_bind=permissive
+expiration=43200
+provider=uuid
+driver=sql
+caching=true
+cache_time=3600
+
+[eventlet_server]
+public_bind_host= {{ identity_host }}
+admin_bind_host= {{ identity_host }}
+
+[oslo_messaging_rabbit]
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_hosts = {{ rabbitmq_servers }}
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/wsgi-keystone.conf.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/wsgi-keystone.conf.j2
new file mode 100644
index 00000000..64d864af
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/templates/wsgi-keystone.conf.j2
@@ -0,0 +1,46 @@
+ {% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
+<VirtualHost {{ internal_ip }}:5000>
+ WSGIDaemonProcess keystone-public processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
+ WSGIProcessGroup keystone-public
+ WSGIScriptAlias / /usr/bin/keystone-wsgi-public
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/{{ http_service_name }}/keystone.log
+ CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined
+
+ <Directory /usr/bin>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
+
+<VirtualHost {{ internal_ip }}:35357>
+ WSGIDaemonProcess keystone-admin processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
+ WSGIProcessGroup keystone-admin
+ WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/{{ http_service_name }}/keystone.log
+ CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined
+
+ <Directory /usr/bin>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/vars/Debian.yml
new file mode 100644
index 00000000..0da81179
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/vars/Debian.yml
@@ -0,0 +1,168 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+packages:
+ - adduser
+ - dbconfig-common
+ - init-system-helpers
+ - python-keystone
+ - q-text-as-data
+ - sqlite3
+ - ssl-cert
+ - debconf
+ - lsb-base
+ - python:any
+ - libjs-sphinxdoc
+ - python-pip
+ - unzip
+ - apache2
+ - libapache2-mod-wsgi
+
+dependency_packages:
+ - python-cryptography
+ - python-dateutil
+ - python-dogpile.cache
+ - python-eventlet
+ - python-greenlet
+ - python-jsonschema
+ - python-keystoneclient
+ - python-keystonemiddleware
+ - python-ldap
+ - python-ldappool
+ - python-lxml
+ - python-memcache
+ - python-migrate
+ - python-msgpack
+ - python-mysqldb
+ - python-oauthlib
+ - python-openstackclient
+ - python-oslo.cache
+ - python-oslo.concurrency
+ - python-oslo.config
+ - python-oslo.context
+ - python-oslo.db
+ - python-oslo.i18n
+ - python-oslo.log
+ - python-oslo.messaging
+ - python-oslo.middleware
+ - python-oslo.policy
+ - python-oslo.serialization
+ - python-oslo.service
+ - python-oslo.utils
+ - python-pam
+ - python-passlib
+ - python-paste
+ - python-pastedeploy
+ - python-pbr
+ - python-pycadf
+ - python-pymysql
+ - python-pysaml2
+ - python-pysqlite2
+ - python-routes
+ - python-six
+ - python-sqlalchemy
+ - python-stevedore
+ - python-webob
+ - unzip
+ - python3-keystoneauth1
+ - python3-keystoneclient
+ - python3-oslo.config
+ - python3-oslo.context
+ - python3-oslo.i18n
+ - python3-oslo.serialization
+ - python-oslo.service
+ - python-oslo.utils
+ - python-pam
+ - python-passlib
+ - python-paste
+ - python-pastedeploy
+ - python-pbr
+ - python-pycadf
+ - python-pymysql
+ - python-pysaml2
+ - python-pysqlite2
+ - python-routes
+ - python-six
+ - python-sqlalchemy
+ - python-stevedore
+ - python-webob
+ - unzip
+ - python3-keystoneauth1
+ - python3-keystoneclient
+ - python3-oslo.config
+ - python3-oslo.context
+ - python3-oslo.i18n
+ - python3-oslo.serialization
+ - python3-oslo.utils
+ - apache2
+ - libapache2-mod-wsgi
+ - python3-cryptography
+ - python3-dateutil
+ - python3-dogpile.cache
+ - python3-eventlet
+ - python3-greenlet
+ - python3-jsonschema
+ - python3-keystoneclient
+ - python3-keystonemiddleware
+ - python3-lxml
+ - python3-memcache
+ - python3-migrate
+ - python3-msgpack
+ - python3-mysqldb
+ - python3-oauthlib
+ - python3-openstackclient
+ - python3-oslo.cache
+ - python3-oslo.concurrency
+ - python3-oslo.config
+ - python3-oslo.context
+ - python3-oslo.db
+ - python3-oslo.i18n
+ - python3-oslo.log
+ - python3-oslo.messaging
+ - python3-oslo.middleware
+ - python3-oslo.policy
+ - python3-oslo.serialization
+ - python3-oslo.service
+ - python3-oslo.utils
+ - python3-pam
+ - python3-passlib
+ - python3-paste
+ - python3-pastedeploy
+ - python3-pbr
+ - python3-pycadf
+ - python3-pymysql
+ - python3-pysaml2
+ - python3-routes
+ - python3-six
+ - python3-sqlalchemy
+ - python3-stevedore
+ - python3-webob
+ - python3-oslo.service
+ - python3-oslo.utils
+ - python3-pam
+ - python3-passlib
+ - python3-paste
+ - python3-pastedeploy
+ - python3-pbr
+ - python3-pycadf
+ - python3-pymysql
+ - python3-pysaml2
+ - python3-routes
+ - python3-six
+ - python3-sqlalchemy
+ - python3-stevedore
+ - python3-webob
+
+services:
+ - apache2
+
+
+apache_config_dir: /etc/apache2
+http_service_name: apache2
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/vars/main.yml
new file mode 100644
index 00000000..9db404b9
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-controller/vars/main.yml
@@ -0,0 +1,172 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages_noarch: []
+
+services_noarch: []
+
+os_services:
+ - name: keystone
+ type: identity
+ region: regionOne
+ description: "OpenStack Identity"
+ publicurl: "http://{{ public_vip.ip }}:5000/v2.0"
+ internalurl: "http://{{ internal_vip.ip }}:5000/v2.0"
+ adminurl: "http://{{ internal_vip.ip }}:35357/v2.0"
+
+ - name: glance
+ type: image
+ region: regionOne
+ description: "OpenStack Image Service"
+ publicurl: "http://{{ public_vip.ip }}:9292"
+ internalurl: "http://{{ internal_vip.ip }}:9292"
+ adminurl: "http://{{ internal_vip.ip }}:9292"
+
+ - name: nova
+ type: compute
+ region: regionOne
+ description: "OpenStack Compute"
+ publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s"
+ internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
+ adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
+
+ - name: neutron
+ type: network
+ region: regionOne
+ description: "OpenStack Networking"
+ publicurl: "http://{{ public_vip.ip }}:9696"
+ internalurl: "http://{{ internal_vip.ip }}:9696"
+ adminurl: "http://{{ internal_vip.ip }}:9696"
+
+ - name: ceilometer
+ type: metering
+ region: regionOne
+ description: "OpenStack Telemetry"
+ publicurl: "http://{{ public_vip.ip }}:8777"
+ internalurl: "http://{{ internal_vip.ip }}:8777"
+ adminurl: "http://{{ internal_vip.ip }}:8777"
+
+ - name: aodh
+ type: alarming
+ region: regionOne
+ description: "OpenStack Telemetry"
+ publicurl: "http://{{ public_vip.ip }}:8042"
+ internalurl: "http://{{ internal_vip.ip }}:8042"
+ adminurl: "http://{{ internal_vip.ip }}:8042"
+
+# - name: cinder
+# type: volume
+# region: regionOne
+# description: "OpenStack Block Storage"
+# publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s"
+# internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
+# adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
+#
+# - name: cinderv2
+# type: volumev2
+# region: regionOne
+# description: "OpenStack Block Storage v2"
+# publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s"
+# internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
+# adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
+
+ - name: heat
+ type: orchestration
+ region: regionOne
+ description: "OpenStack Orchestration"
+ publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s"
+ internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
+ adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
+
+ - name: heat-cfn
+ type: cloudformation
+ region: regionOne
+ description: "OpenStack CloudFormation Orchestration"
+ publicurl: "http://{{ public_vip.ip }}:8000/v1"
+ internalurl: "http://{{ internal_vip.ip }}:8000/v1"
+ adminurl: "http://{{ internal_vip.ip }}:8000/v1"
+
+ - name: swift
+ type: object-store
+ region: regionOne
+ description: "OpenStack Object Storage"
+ publicurl: "http://{{ public_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
+ internalurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
+ adminurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
+
+os_users:
+ - user: admin
+ password: "{{ ADMIN_PASS }}"
+ email: admin@admin.com
+ role: admin
+ tenant: admin
+ tenant_description: "Admin Tenant"
+
+ - user: glance
+ password: "{{ GLANCE_PASS }}"
+ email: glance@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: nova
+ password: "{{ NOVA_PASS }}"
+ email: nova@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: keystone
+ password: "{{ KEYSTONE_PASS }}"
+ email: keystone@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: neutron
+ password: "{{ NEUTRON_PASS }}"
+ email: neutron@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: ceilometer
+ password: "{{ CEILOMETER_PASS }}"
+ email: ceilometer@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: cinder
+ password: "{{ CINDER_PASS }}"
+ email: cinder@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: heat
+ password: "{{ HEAT_PASS }}"
+ email: heat@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: demo
+ password: ""
+ email: heat@demo.com
+ role: heat_stack_user
+ tenant: demo
+ tenant_description: "Demo Tenant"
+
+ - user: swift
+ password: "{{ CINDER_PASS }}"
+ email: swift@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/files/deb.conf b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/files/deb.conf
new file mode 100644
index 00000000..6e1159a1
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/files/deb.conf
@@ -0,0 +1,11 @@
+keystone/admin-password: password
+keystone/auth-token: password
+keystone/admin-password-confirm: password
+keystone/admin-email: root@localhost
+keystone/admin-role-name: admin
+keystone/admin-user: admin
+keystone/create-admin-tenant: false
+keystone/region-name: Orange
+keystone/admin-tenant-name: admin
+keystone/register-endpoint: false
+keystone/configure_db: false
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/files/get_deb_depends.py b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/files/get_deb_depends.py
new file mode 100644
index 00000000..05fc5d46
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/files/get_deb_depends.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+import sys
+import subprocess
+
+pkts = []
+
+for arg in sys.argv[1:]:
+ proc = subprocess.Popen(["dpkg-deb", "--info", arg], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out = proc.stdout.read()
+ err = proc.stderr.read()
+ if err:
+ print("An error occurred with {} ({})".format(arg, err))
+ continue
+ for line in out.splitlines():
+ line = line.decode('utf-8')
+ if " Depends:" in line:
+ line = line.replace(" Depends:", "")
+ for _dep in line.split(','):
+ pkts.append(_dep.split()[0])
+
+print(" ".join(pkts))
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/handlers/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/handlers/main.yml
new file mode 100755
index 00000000..608a8a09
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/handlers/main.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: restart keystone services
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: services | union(services_noarch)
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/tasks/main.yml
new file mode 100644
index 00000000..d0809ef0
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/tasks/main.yml
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- include: moon-controller.yml
+ when: inventory_hostname in groups['controller']
+
+- include: moon-compute.yml
+ when: inventory_hostname in groups['compute']
+
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/tasks/moon-compute.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/tasks/moon-compute.yml
new file mode 100644
index 00000000..c5fcf1a7
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/tasks/moon-compute.yml
@@ -0,0 +1,17 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: restart nova task
+ service: name={{ item }} state=restarted enabled=yes
+ with_items:
+ - nova-compute
+
+- name: restart swift task
+ shell: swift-init all start
+ ignore_errors: True
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/tasks/moon-controller.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/tasks/moon-controller.yml
new file mode 100644
index 00000000..bff6397e
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/tasks/moon-controller.yml
@@ -0,0 +1,61 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# moon log
+- name: moon log
+ shell: >
+ sudo adduser swift moonlog;
+ sudo adduser nova moonlog;
+
+# nova api paste
+#- name: nova api paste 1
+# shell: sudo cp /etc/nova/api-paste.ini /etc/nova/api-paste.ini.bak2
+#
+#- name: nova api paste 2
+# shell: sudo sed "/^keystone = / s/keystonecontext/keystonecontext moon/" /etc/nova/api-paste.ini > /tmp/api-paste.ini
+#
+#- name: nova api paste 3
+# shell: sudo cp /tmp/api-paste.ini /etc/nova/api-paste.ini
+#
+#- name: nova api paste 4
+# shell: echo -e "\n[filter:moon]\npaste.filter_factory = keystonemiddleware.moon_agent:filter_factory\nauthz_login=admin\nauthz_password=password\nlogfile=/var/log/moon/keystonemiddleware.log\n" | sudo tee -a /etc/nova/api-paste.ini
+
+- name: update api-paste.ini
+ template: src=api-paste.ini dest=/etc/nova/api-paste.ini backup=yes
+
+# restart nova
+- name: restart nova
+ service: name={{ item }} state=restarted enabled=yes
+ with_items:
+ - nova-api
+ - nova-cert
+ - nova-conductor
+ - nova-consoleauth
+ - nova-scheduler
+
+## swift proxy server
+#- name: swift proxy server 1
+# shell: sudo cp /etc/swift/proxy-server.conf /etc/swift/proxy-server.conf.bak2
+#
+#- name: swift proxy server 2
+# shell: sudo sed "/^pipeline = / s/proxy-server/moon proxy-server/" /etc/swift/proxy-server.conf > /tmp/proxy-server.conf
+#
+#- name: swift proxy server 3
+# shell: sudo cp /tmp/proxy-server.conf /etc/swift/proxy-server.conf
+#
+#- name: swift proxy server 4
+# shell: echo -e "\n[filter:moon]\npaste.filter_factory = keystonemiddleware.moon_agent:filter_factory\nauthz_login=admin\nauthz_password=password\nlogfile=/var/log/moon/keystonemiddleware.log\n" | sudo tee -a /etc/swift/proxy-server.conf
+
+# restart swift
+- name: restart swift
+ service: name={{ item }} state=restarted enabled=yes
+ with_items:
+ - swift-proxy
+ - memcached
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/admin-openrc.sh b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/admin-openrc.sh
new file mode 100644
index 00000000..6ba620ff
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/admin-openrc.sh
@@ -0,0 +1,15 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# Verify the Identity Service installation
+export OS_PASSWORD={{ ADMIN_PASS }}
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
+export OS_USERNAME=admin
+export OS_VOLUME_API_VERSION=2
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/api-paste.ini b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/api-paste.ini
new file mode 100644
index 00000000..f99689b7
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/api-paste.ini
@@ -0,0 +1,106 @@
+############
+# Metadata #
+############
+[composite:metadata]
+use = egg:Paste#urlmap
+/: meta
+
+[pipeline:meta]
+pipeline = cors metaapp
+
+[app:metaapp]
+paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
+
+#############
+# OpenStack #
+#############
+
+[composite:osapi_compute]
+use = call:nova.api.openstack.urlmap:urlmap_factory
+/: oscomputeversions
+# starting in Liberty the v21 implementation replaces the v2
+# implementation and is suggested that you use it as the default. If
+# this causes issues with your clients you can rollback to the
+# *frozen* v2 api by commenting out the above stanza and using the
+# following instead::
+# /v2: openstack_compute_api_legacy_v2
+# if rolling back to v2 fixes your issue please file a critical bug
+# at - https://bugs.launchpad.net/nova/+bugs
+#
+# v21 is an exactly feature match for v2, except it has more stringent
+# input validation on the wsgi surface (prevents fuzzing early on the
+# API). It also provides new features via API microversions which are
+# opt into for clients. Unaware clients will receive the same frozen
+# v2 API feature set, but with some relaxed validation
+/v2: openstack_compute_api_v21_legacy_v2_compatible
+/v2.1: openstack_compute_api_v21
+
+# NOTE: this is deprecated in favor of openstack_compute_api_v21_legacy_v2_compatible
+[composite:openstack_compute_api_legacy_v2]
+use = call:nova.api.auth:pipeline_factory
+noauth2 = cors compute_req_id faultwrap sizelimit noauth2 legacy_ratelimit osapi_compute_app_legacy_v2
+keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext moon legacy_ratelimit osapi_compute_app_legacy_v2
+keystone_nolimit = cors compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_legacy_v2
+
+[composite:openstack_compute_api_v21]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth2 = cors compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
+keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v21
+
+[composite:openstack_compute_api_v21_legacy_v2_compatible]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth2 = cors compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21
+keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext legacy_v2_compatible osapi_compute_app_v21
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware:RequestId.factory
+
+[filter:compute_req_id]
+paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
+
+[filter:noauth2]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+
+[filter:legacy_ratelimit]
+paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = oslo_middleware:RequestBodySizeLimiter.factory
+
+[filter:legacy_v2_compatible]
+paste.filter_factory = nova.api.openstack:LegacyV2CompatibleWrapper.factory
+
+[app:osapi_compute_app_legacy_v2]
+paste.app_factory = nova.api.openstack.compute:APIRouter.factory
+
+[app:osapi_compute_app_v21]
+paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory
+
+[pipeline:oscomputeversions]
+pipeline = faultwrap oscomputeversionapp
+
+[app:oscomputeversionapp]
+paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:cors]
+paste.filter_factory = oslo_middleware.cors:filter_factory
+oslo_config_project = nova
+
+[filter:keystonecontext]
+paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+[filter:moon]
+paste.filter_factory = keystonemiddleware.moon_agent:filter_factory
+authz_login=admin
+authz_password=password
+logfile=/var/log/moon/keystonemiddleware.log
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/demo-openrc.sh b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/demo-openrc.sh
new file mode 100644
index 00000000..5807e868
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/demo-openrc.sh
@@ -0,0 +1,13 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+export OS_USERNAME=demo
+export OS_PASSWORD={{ DEMO_PASS }}
+export OS_TENANT_NAME=demo
+export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/keystone-paste.ini b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/keystone-paste.ini
new file mode 100644
index 00000000..cd9ebede
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/keystone-paste.ini
@@ -0,0 +1,96 @@
+# Keystone PasteDeploy configuration file.
+
+[pipeline:moon_pipeline]
+pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension moon_service
+
+[app:moon_service]
+use = egg:keystone#moon_service
+
+[filter:debug]
+use = egg:oslo.middleware#debug
+
+[filter:request_id]
+use = egg:oslo.middleware#request_id
+
+[filter:build_auth_context]
+use = egg:keystone#build_auth_context
+
+[filter:token_auth]
+use = egg:keystone#token_auth
+
+[filter:admin_token_auth]
+# This is deprecated in the M release and will be removed in the O release.
+# Use `keystone-manage bootstrap` and remove this from the pipelines below.
+use = egg:keystone#admin_token_auth
+
+[filter:json_body]
+use = egg:keystone#json_body
+
+[filter:cors]
+use = egg:oslo.middleware#cors
+oslo_config_project = keystone
+
+[filter:ec2_extension]
+use = egg:keystone#ec2_extension
+
+[filter:ec2_extension_v3]
+use = egg:keystone#ec2_extension_v3
+
+[filter:s3_extension]
+use = egg:keystone#s3_extension
+
+[filter:url_normalize]
+use = egg:keystone#url_normalize
+
+[filter:sizelimit]
+use = egg:oslo.middleware#sizelimit
+
+[app:public_service]
+use = egg:keystone#public_service
+
+[app:service_v3]
+use = egg:keystone#service_v3
+
+[app:admin_service]
+use = egg:keystone#admin_service
+
+[pipeline:public_api]
+# The last item in this pipeline must be public_service or an equivalent
+# application. It cannot be a filter.
+pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension public_service
+
+[pipeline:admin_api]
+# The last item in this pipeline must be admin_service or an equivalent
+# application. It cannot be a filter.
+pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension s3_extension admin_service
+
+[pipeline:api_v3]
+# The last item in this pipeline must be service_v3 or an equivalent
+# application. It cannot be a filter.
+pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension_v3 s3_extension service_v3
+
+[app:public_version_service]
+use = egg:keystone#public_version_service
+
+[app:admin_version_service]
+use = egg:keystone#admin_version_service
+
+[pipeline:public_version_api]
+pipeline = cors sizelimit url_normalize public_version_service
+
+[pipeline:admin_version_api]
+pipeline = cors sizelimit url_normalize admin_version_service
+
+[composite:main]
+use = egg:Paste#urlmap
+/moon = moon_pipeline
+/v2.0 = public_api
+/v3 = api_v3
+/ = public_version_api
+
+[composite:admin]
+use = egg:Paste#urlmap
+/moon = moon_pipeline
+/v2.0 = admin_api
+/v3 = api_v3
+/ = admin_version_api
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/keystone.conf b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/keystone.conf
new file mode 100644
index 00000000..649fc32c
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/keystone.conf
@@ -0,0 +1,59 @@
+{% set memcached_servers = [] %}
+{% set rabbitmq_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% set _ = rabbitmq_servers.append('%s:5672'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+{% set rabbitmq_servers = rabbitmq_servers|join(',') %}
+[DEFAULT]
+admin_token={{ ADMIN_TOKEN }}
+debug={{ DEBUG }}
+log_dir = /var/log/keystone
+
+[cache]
+backend=keystone.cache.memcache_pool
+memcache_servers={{ memcached_servers}}
+enabled=true
+
+[revoke]
+driver=sql
+expiration_buffer=3600
+caching=true
+
+[database]
+connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone?charset=utf8
+idle_timeout=30
+min_pool_size=5
+max_pool_size=120
+pool_timeout=30
+
+
+[identity]
+default_domain_id=default
+driver=sql
+
+[assignment]
+driver=sql
+
+[resource]
+driver=sql
+caching=true
+cache_time=3600
+
+[token]
+enforce_token_bind=permissive
+expiration=43200
+provider=uuid
+driver=sql
+caching=true
+cache_time=3600
+
+[eventlet_server]
+public_bind_host= {{ identity_host }}
+admin_bind_host= {{ identity_host }}
+
+[oslo_messaging_rabbit]
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_hosts = {{ rabbitmq_servers }}
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/wsgi-keystone.conf.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/wsgi-keystone.conf.j2
new file mode 100644
index 00000000..64d864af
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/templates/wsgi-keystone.conf.j2
@@ -0,0 +1,46 @@
+ {% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
+<VirtualHost {{ internal_ip }}:5000>
+ WSGIDaemonProcess keystone-public processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
+ WSGIProcessGroup keystone-public
+ WSGIScriptAlias / /usr/bin/keystone-wsgi-public
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/{{ http_service_name }}/keystone.log
+ CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined
+
+ <Directory /usr/bin>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
+
+<VirtualHost {{ internal_ip }}:35357>
+ WSGIDaemonProcess keystone-admin processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
+ WSGIProcessGroup keystone-admin
+ WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/{{ http_service_name }}/keystone.log
+ CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined
+
+ <Directory /usr/bin>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/vars/Debian.yml
new file mode 100644
index 00000000..6697ec09
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/vars/Debian.yml
@@ -0,0 +1,33 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+cron_path: "/var/spool/cron/crontabs"
+
+packages:
+ - adduser
+ - dbconfig-common
+ - init-system-helpers
+ - python-keystone
+ - q-text-as-data
+ - sqlite3
+ - ssl-cert
+ - debconf
+ - lsb-base
+ - python:any
+ - libjs-sphinxdoc
+ - python-pip
+ - unzip
+
+services:
+ - apache2
+
+
+apache_config_dir: /etc/apache2
+http_service_name: apache2
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/vars/main.yml
new file mode 100644
index 00000000..9db404b9
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/moon-post/vars/main.yml
@@ -0,0 +1,172 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages_noarch: []
+
+services_noarch: []
+
+os_services:
+ - name: keystone
+ type: identity
+ region: regionOne
+ description: "OpenStack Identity"
+ publicurl: "http://{{ public_vip.ip }}:5000/v2.0"
+ internalurl: "http://{{ internal_vip.ip }}:5000/v2.0"
+ adminurl: "http://{{ internal_vip.ip }}:35357/v2.0"
+
+ - name: glance
+ type: image
+ region: regionOne
+ description: "OpenStack Image Service"
+ publicurl: "http://{{ public_vip.ip }}:9292"
+ internalurl: "http://{{ internal_vip.ip }}:9292"
+ adminurl: "http://{{ internal_vip.ip }}:9292"
+
+ - name: nova
+ type: compute
+ region: regionOne
+ description: "OpenStack Compute"
+ publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s"
+ internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
+ adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
+
+ - name: neutron
+ type: network
+ region: regionOne
+ description: "OpenStack Networking"
+ publicurl: "http://{{ public_vip.ip }}:9696"
+ internalurl: "http://{{ internal_vip.ip }}:9696"
+ adminurl: "http://{{ internal_vip.ip }}:9696"
+
+ - name: ceilometer
+ type: metering
+ region: regionOne
+ description: "OpenStack Telemetry"
+ publicurl: "http://{{ public_vip.ip }}:8777"
+ internalurl: "http://{{ internal_vip.ip }}:8777"
+ adminurl: "http://{{ internal_vip.ip }}:8777"
+
+ - name: aodh
+ type: alarming
+ region: regionOne
+ description: "OpenStack Telemetry"
+ publicurl: "http://{{ public_vip.ip }}:8042"
+ internalurl: "http://{{ internal_vip.ip }}:8042"
+ adminurl: "http://{{ internal_vip.ip }}:8042"
+
+# - name: cinder
+# type: volume
+# region: regionOne
+# description: "OpenStack Block Storage"
+# publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s"
+# internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
+# adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
+#
+# - name: cinderv2
+# type: volumev2
+# region: regionOne
+# description: "OpenStack Block Storage v2"
+# publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s"
+# internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
+# adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
+
+ - name: heat
+ type: orchestration
+ region: regionOne
+ description: "OpenStack Orchestration"
+ publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s"
+ internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
+ adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
+
+ - name: heat-cfn
+ type: cloudformation
+ region: regionOne
+ description: "OpenStack CloudFormation Orchestration"
+ publicurl: "http://{{ public_vip.ip }}:8000/v1"
+ internalurl: "http://{{ internal_vip.ip }}:8000/v1"
+ adminurl: "http://{{ internal_vip.ip }}:8000/v1"
+
+ - name: swift
+ type: object-store
+ region: regionOne
+ description: "OpenStack Object Storage"
+ publicurl: "http://{{ public_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
+ internalurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
+ adminurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
+
+os_users:
+ - user: admin
+ password: "{{ ADMIN_PASS }}"
+ email: admin@admin.com
+ role: admin
+ tenant: admin
+ tenant_description: "Admin Tenant"
+
+ - user: glance
+ password: "{{ GLANCE_PASS }}"
+ email: glance@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: nova
+ password: "{{ NOVA_PASS }}"
+ email: nova@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: keystone
+ password: "{{ KEYSTONE_PASS }}"
+ email: keystone@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: neutron
+ password: "{{ NEUTRON_PASS }}"
+ email: neutron@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: ceilometer
+ password: "{{ CEILOMETER_PASS }}"
+ email: ceilometer@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: cinder
+ password: "{{ CINDER_PASS }}"
+ email: cinder@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: heat
+ password: "{{ HEAT_PASS }}"
+ email: heat@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: demo
+ password: ""
+ email: heat@demo.com
+ role: heat_stack_user
+ tenant: demo
+ tenant_description: "Demo Tenant"
+
+ - user: swift
+ password: "{{ CINDER_PASS }}"
+ email: swift@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/main.yml
new file mode 100644
index 00000000..b50126ae
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/main.yml
@@ -0,0 +1,37 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- include: swift-controller1.yml
+ when: inventory_hostname in groups['controller']
+
+- include: swift-compute1.yml
+ when: inventory_hostname in groups['compute']
+
+- include: swift-controller2.yml
+ when: inventory_hostname == haproxy_hosts.keys()[0]
+
+- name: copy swift.conf
+ template: src=swift.conf dest=/etc/swift/swift.conf backup=yes
+
+- name: chown /etc/swift
+ shell: chown -R root:swift /etc/swift
+
+- name: restart tasks on controller
+ service: name={{ item }} state=restarted enabled=yes
+ with_items:
+ - memcached
+ - swift-proxy
+ when: inventory_hostname in groups['controller']
+
+- name: restart tasks on compute
+ shell: swift-init all start
+ when: inventory_hostname in groups['compute']
+ ignore_errors: True
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-compute1.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-compute1.yml
new file mode 100644
index 00000000..06711d3c
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-compute1.yml
@@ -0,0 +1,66 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: disable auto start
+ copy:
+ content: "#!/bin/sh\nexit 101"
+ dest: "/usr/sbin/policy-rc.d"
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
+- name: install swift-compute packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: compute_packages | union(compute_packages_noarch)
+
+- name: enable auto start
+ file:
+ path=/usr/sbin/policy-rc.d
+ state=absent
+ when: ansible_os_family == "Debian"
+
+- name: format devices
+ shell: >
+ dd if=/dev/zero of=/var/swift1 bs=1G count=10;
+ dd if=/dev/zero of=/var/swift2 bs=1G count=10;
+ mkfs.xfs /var/swift1;
+ mkfs.xfs /var/swift2;
+
+- name: create mount point dirertory
+ shell: >
+ mkdir -p /srv/node/swift1;
+ mkdir -p /srv/node/swift2;
+
+- name: edit /etc/fstab
+ shell: >
+ echo "/var/swift1 /srv/node/swift1/ xfs noatime,nodiratime,nobarrier,logbufs=8 0 2" >> /etc/fstab;
+ echo "/var/swift2 /srv/node/swift2/ xfs noatime,nodiratime,nobarrier,logbufs=8 0 2" >> /etc/fstab;
+ mount /srv/node/swift1;
+ mount /srv/node/swift2;
+
+- name: edit /etc/default/rsync
+ shell: sed -i 's/RSYNC_ENABLE=false/RSYNC_ENABLE=true/g' /etc/default/rsync
+
+- name: restart rsync service
+ service: name=rsync state=restarted enabled=yes
+
+- name: copy scripts
+ template: src={{ item }} dest=/etc/swift/ backup=yes
+ with_items:
+ - account-server.conf
+ - container-server.conf
+ - object-server.conf
+
+- name: change directory
+ shell: >
+ chown -R swift:swift /srv/node;
+ mkdir -p /var/cache/swift;
+ chown -R root:swift /var/cache/swift;
+ chmod -R 775 /var/cache/swift;
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-controller1.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-controller1.yml
new file mode 100644
index 00000000..36d05040
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-controller1.yml
@@ -0,0 +1,34 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: disable auto start
+ copy:
+ content: "#!/bin/sh\nexit 101"
+ dest: "/usr/sbin/policy-rc.d"
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
+- name: install swift-controllor packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: controller_packages | union(controller_packages_noarch)
+
+- name: enable auto start
+ file:
+ path=/usr/sbin/policy-rc.d
+ state=absent
+ when: ansible_os_family == "Debian"
+
+- name: make swift directory
+ file: path=/etc/swift state=directory mode=0755
+
+- name: update proxy-server conf
+ template: src=proxy-server.conf dest=/etc/swift/proxy-server.conf backup=yes
+
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-controller2.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-controller2.yml
new file mode 100644
index 00000000..92d4ab22
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift-controller2.yml
@@ -0,0 +1,93 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: create account.builder file
+ shell: >
+ cd /etc/swift ;
+ swift-ring-builder account.builder create 10 3 1;
+
+- name: add each storage node to the ring
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder account.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6002 --device swift1 --weight 100 ;
+ swift-ring-builder account.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6002 --device swift2 --weight 100 ;
+ with_indexed_items: groups['compute']
+
+- name: verify the ring contents 1
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder account.builder;
+
+- name: rebalance the ring
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder account.builder rebalance;
+
+
+#####################
+- name: create contrainer builder file
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder container.builder create 10 3 1;
+
+- name: add each storage node to the ring
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder container.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6001 --device swift1 --weight 100;
+ swift-ring-builder container.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6001 --device swift2 --weight 100;
+ with_indexed_items: groups['compute']
+
+- name: verify the ring contents 2
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder container.builder;
+
+- name: rebalance the ring
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder container.builder rebalance;
+
+#############################
+
+- name: create object builder file
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder object.builder create 10 3 1;
+
+- name: add each storage node to the ring
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder object.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6000 --device swift1 --weight 100;
+ swift-ring-builder object.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6000 --device swift2 --weight 100;
+ with_indexed_items: groups['compute']
+
+- name: verify the ring contents
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder object.builder;
+
+- name: rebalance the ring
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder object.builder rebalance;
+
+##########################
+
+- name: distribute ring configuration files to the other controller
+ shell: >
+ cd /etc/swift;
+ scp account.ring.gz container.ring.gz object.ring.gz root@{{ ip_settings[item.1]['mgmt']['ip'] }}:/etc/swift/;
+ with_indexed_items: groups['controller']
+
+- name: distribute ring configuration files to the all compute
+ shell: >
+ cd /etc/swift;
+ scp account.ring.gz container.ring.gz object.ring.gz root@{{ ip_settings[item.1]['mgmt']['ip'] }}:/etc/swift/;
+ with_indexed_items: groups['compute']
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/account-server.conf b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/account-server.conf
new file mode 100644
index 00000000..ea84799f
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/account-server.conf
@@ -0,0 +1,200 @@
+[DEFAULT]
+bind_ip = {{ internal_ip }}
+bind_port = 6002
+# bind_timeout = 30
+# backlog = 4096
+user = swift
+swift_dir = /etc/swift
+devices = /srv/node
+mount_check = true
+# disable_fallocate = false
+#
+# Use an integer to override the number of pre-forked processes that will
+# accept connections.
+# workers = auto
+#
+# Maximum concurrent requests per worker
+# max_clients = 1024
+#
+# You can specify default log routing here if you want:
+# log_name = swift
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+# The following caps the length of log lines to the value given; no limit if
+# set to 0, the default.
+# log_max_line_length = 0
+#
+# comma separated list of functions to call to setup custom log handlers.
+# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
+# adapted_logger
+# log_custom_handlers =
+#
+# If set, log_udp_host will override log_address
+# log_udp_host =
+# log_udp_port = 514
+#
+# You can enable StatsD logging here:
+# log_statsd_host =
+# log_statsd_port = 8125
+# log_statsd_default_sample_rate = 1.0
+# log_statsd_sample_rate_factor = 1.0
+# log_statsd_metric_prefix =
+#
+# If you don't mind the extra disk space usage in overhead, you can turn this
+# on to preallocate disk space with SQLite databases to decrease fragmentation.
+# db_preallocation = off
+#
+# eventlet_debug = false
+#
+# You can set fallocate_reserve to the number of bytes you'd like fallocate to
+# reserve, whether there is space for the given file size or not.
+# fallocate_reserve = 0
+
+[pipeline:main]
+pipeline = healthcheck recon account-server
+
+[app:account-server]
+use = egg:swift#account
+# You can override the default log routing for this app here:
+# set log_name = account-server
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_requests = true
+# set log_address = /dev/log
+#
+# auto_create_account_prefix = .
+#
+# Configure parameter for creating specific server
+# To handle all verbs, including replication verbs, do not specify
+# "replication_server" (this is the default). To only handle replication,
+# set to a True value (e.g. "True" or "1"). To handle only non-replication
+# verbs, set to "False". Unless you have a separate replication network, you
+# should not specify any value for "replication_server". Default is empty.
+# replication_server = false
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+# An optional filesystem path, which if present, will cause the healthcheck
+# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
+# disable_path =
+
+[filter:recon]
+use = egg:swift#recon
+recon_cache_path = /var/cache/swift
+
+[account-replicator]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = account-replicator
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# Maximum number of database rows that will be sync'd in a single HTTP
+# replication request. Databases with less than or equal to this number of
+# differing rows will always be sync'd using an HTTP replication request rather
+# than using rsync.
+# per_diff = 1000
+#
+# Maximum number of HTTP replication requests attempted on each replication
+# pass for any one container. This caps how long the replicator will spend
+# trying to sync a given database per pass so the other databases don't get
+# starved.
+# max_diffs = 100
+#
+# Number of replication workers to spawn.
+# concurrency = 8
+#
+# Time in seconds to wait between replication passes
+# interval = 30
+# run_pause is deprecated, use interval instead
+# run_pause = 30
+#
+# node_timeout = 10
+# conn_timeout = 0.5
+#
+# The replicator also performs reclamation
+# reclaim_age = 604800
+#
+# Allow rsync to compress data which is transmitted to destination node
+# during sync. However, this is applicable only when destination node is in
+# a different region than the local one.
+# rsync_compress = no
+#
+# Format of the rysnc module where the replicator will send data. See
+# etc/rsyncd.conf-sample for some usage examples.
+# rsync_module = {replication_ip}::account
+#
+# recon_cache_path = /var/cache/swift
+
+[account-auditor]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = account-auditor
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# Will audit each account at most once per interval
+# interval = 1800
+#
+# accounts_per_second = 200
+# recon_cache_path = /var/cache/swift
+
+[account-reaper]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = account-reaper
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# concurrency = 25
+# interval = 3600
+# node_timeout = 10
+# conn_timeout = 0.5
+#
+# Normally, the reaper begins deleting account information for deleted accounts
+# immediately; you can set this to delay its work however. The value is in
+# seconds; 2592000 = 30 days for example.
+# delay_reaping = 0
+#
+# If the account fails to be be reaped due to a persistent error, the
+# account reaper will log a message such as:
+# Account <name> has not been reaped since <date>
+# You can search logs for this message if space is not being reclaimed
+# after you delete account(s).
+# Default is 2592000 seconds (30 days). This is in addition to any time
+# requested by delay_reaping.
+# reap_warn_after = 2592000
+
+# Note: Put it at the beginning of the pipeline to profile all middleware. But
+# it is safer to put this after healthcheck.
+[filter:xprofile]
+use = egg:swift#xprofile
+# This option enable you to switch profilers which should inherit from python
+# standard profiler. Currently the supported value can be 'cProfile',
+# 'eventlet.green.profile' etc.
+# profile_module = eventlet.green.profile
+#
+# This prefix will be used to combine process ID and timestamp to name the
+# profile data file. Make sure the executing user has permission to write
+# into this path (missing path segments will be created, if necessary).
+# If you enable profiling in more than one type of daemon, you must override
+# it with an unique value like: /var/log/swift/profile/account.profile
+# log_filename_prefix = /tmp/log/swift/profile/default.profile
+#
+# the profile data will be dumped to local disk based on above naming rule
+# in this interval.
+# dump_interval = 5.0
+#
+# Be careful, this option will enable profiler to dump data into the file with
+# time stamp which means there will be lots of files piled up in the directory.
+# dump_timestamp = false
+#
+# This is the path of the URL to access the mini web UI.
+# path = /__profile__
+#
+# Clear the data when the wsgi server shutdown.
+# flush_at_shutdown = false
+#
+# unwind the iterator of applications
+# unwind = false
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/container-server.conf b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/container-server.conf
new file mode 100644
index 00000000..88cd2ebb
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/container-server.conf
@@ -0,0 +1,229 @@
+[DEFAULT]
+bind_ip = {{ internal_ip }}
+bind_port = 6001
+# bind_timeout = 30
+# backlog = 4096
+user = swift
+swift_dir = /etc/swift
+devices = /srv/node
+mount_check = true
+# disable_fallocate = false
+#
+# Use an integer to override the number of pre-forked processes that will
+# accept connections.
+# workers = auto
+#
+# Maximum concurrent requests per worker
+# max_clients = 1024
+#
+# This is a comma separated list of hosts allowed in the X-Container-Sync-To
+# field for containers. This is the old-style of using container sync. It is
+# strongly recommended to use the new style of a separate
+# container-sync-realms.conf -- see container-sync-realms.conf-sample
+# allowed_sync_hosts = 127.0.0.1
+#
+# You can specify default log routing here if you want:
+# log_name = swift
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+# The following caps the length of log lines to the value given; no limit if
+# set to 0, the default.
+# log_max_line_length = 0
+#
+# comma separated list of functions to call to setup custom log handlers.
+# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
+# adapted_logger
+# log_custom_handlers =
+#
+# If set, log_udp_host will override log_address
+# log_udp_host =
+# log_udp_port = 514
+#
+# You can enable StatsD logging here:
+# log_statsd_host =
+# log_statsd_port = 8125
+# log_statsd_default_sample_rate = 1.0
+# log_statsd_sample_rate_factor = 1.0
+# log_statsd_metric_prefix =
+#
+# If you don't mind the extra disk space usage in overhead, you can turn this
+# on to preallocate disk space with SQLite databases to decrease fragmentation.
+# db_preallocation = off
+#
+# eventlet_debug = false
+#
+# You can set fallocate_reserve to the number of bytes you'd like fallocate to
+# reserve, whether there is space for the given file size or not.
+# fallocate_reserve = 0
+
+[pipeline:main]
+pipeline = healthcheck recon container-server
+
+[app:container-server]
+use = egg:swift#container
+# You can override the default log routing for this app here:
+# set log_name = container-server
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_requests = true
+# set log_address = /dev/log
+#
+# node_timeout = 3
+# conn_timeout = 0.5
+# allow_versions = false
+# auto_create_account_prefix = .
+#
+# Configure parameter for creating specific server
+# To handle all verbs, including replication verbs, do not specify
+# "replication_server" (this is the default). To only handle replication,
+# set to a True value (e.g. "True" or "1"). To handle only non-replication
+# verbs, set to "False". Unless you have a separate replication network, you
+# should not specify any value for "replication_server".
+# replication_server = false
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+# An optional filesystem path, which if present, will cause the healthcheck
+# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
+# disable_path =
+
+[filter:recon]
+use = egg:swift#recon
+recon_cache_path = /var/cache/swift
+
+[container-replicator]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = container-replicator
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# Maximum number of database rows that will be sync'd in a single HTTP
+# replication request. Databases with less than or equal to this number of
+# differing rows will always be sync'd using an HTTP replication request rather
+# than using rsync.
+# per_diff = 1000
+#
+# Maximum number of HTTP replication requests attempted on each replication
+# pass for any one container. This caps how long the replicator will spend
+# trying to sync a given database per pass so the other databases don't get
+# starved.
+# max_diffs = 100
+#
+# Number of replication workers to spawn.
+# concurrency = 8
+#
+# Time in seconds to wait between replication passes
+# interval = 30
+# run_pause is deprecated, use interval instead
+# run_pause = 30
+#
+# node_timeout = 10
+# conn_timeout = 0.5
+#
+# The replicator also performs reclamation
+# reclaim_age = 604800
+#
+# Allow rsync to compress data which is transmitted to destination node
+# during sync. However, this is applicable only when destination node is in
+# a different region than the local one.
+# rsync_compress = no
+#
+# Format of the rysnc module where the replicator will send data. See
+# etc/rsyncd.conf-sample for some usage examples.
+# rsync_module = {replication_ip}::container
+#
+# recon_cache_path = /var/cache/swift
+
+[container-updater]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = container-updater
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# interval = 300
+# concurrency = 4
+# node_timeout = 3
+# conn_timeout = 0.5
+#
+# slowdown will sleep that amount between containers
+# slowdown = 0.01
+#
+# Seconds to suppress updating an account that has generated an error
+# account_suppression_time = 60
+#
+# recon_cache_path = /var/cache/swift
+
+[container-auditor]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = container-auditor
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# Will audit each container at most once per interval
+# interval = 1800
+#
+# containers_per_second = 200
+# recon_cache_path = /var/cache/swift
+
+[container-sync]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = container-sync
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# If you need to use an HTTP Proxy, set it here; defaults to no proxy.
+# You can also set this to a comma separated list of HTTP Proxies and they will
+# be randomly used (simple load balancing).
+# sync_proxy = http://10.1.1.1:8888,http://10.1.1.2:8888
+#
+# Will sync each container at most once per interval
+# interval = 300
+#
+# Maximum amount of time to spend syncing each container per pass
+# container_time = 60
+#
+# Maximum amount of time in seconds for the connection attempt
+# conn_timeout = 5
+# Server errors from requests will be retried by default
+# request_tries = 3
+#
+# Internal client config file path
+# internal_client_conf_path = /etc/swift/internal-client.conf
+
+# Note: Put it at the beginning of the pipeline to profile all middleware. But
+# it is safer to put this after healthcheck.
+[filter:xprofile]
+use = egg:swift#xprofile
+# This option enable you to switch profilers which should inherit from python
+# standard profiler. Currently the supported value can be 'cProfile',
+# 'eventlet.green.profile' etc.
+# profile_module = eventlet.green.profile
+#
+# This prefix will be used to combine process ID and timestamp to name the
+# profile data file. Make sure the executing user has permission to write
+# into this path (missing path segments will be created, if necessary).
+# If you enable profiling in more than one type of daemon, you must override
+# it with an unique value like: /var/log/swift/profile/container.profile
+# log_filename_prefix = /tmp/log/swift/profile/default.profile
+#
+# the profile data will be dumped to local disk based on above naming rule
+# in this interval.
+# dump_interval = 5.0
+#
+# Be careful, this option will enable profiler to dump data into the file with
+# time stamp which means there will be lots of files piled up in the directory.
+# dump_timestamp = false
+#
+# This is the path of the URL to access the mini web UI.
+# path = /__profile__
+#
+# Clear the data when the wsgi server shutdown.
+# flush_at_shutdown = false
+#
+# unwind the iterator of applications
+# unwind = false
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/object-server.conf b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/object-server.conf
new file mode 100644
index 00000000..effd4f22
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/object-server.conf
@@ -0,0 +1,347 @@
+[DEFAULT]
+bind_ip = {{ internal_ip }}
+bind_port = 6000
+# bind_timeout = 30
+# backlog = 4096
+user = swift
+swift_dir = /etc/swift
+devices = /srv/node
+mount_check = true
+# disable_fallocate = false
+# expiring_objects_container_divisor = 86400
+# expiring_objects_account_name = expiring_objects
+#
+# Use an integer to override the number of pre-forked processes that will
+# accept connections. NOTE: if servers_per_port is set, this setting is
+# ignored.
+# workers = auto
+#
+# Make object-server run this many worker processes per unique port of
+# "local" ring devices across all storage policies. This can help provide
+# the isolation of threads_per_disk without the severe overhead. The default
+# value of 0 disables this feature.
+# servers_per_port = 0
+#
+# Maximum concurrent requests per worker
+# max_clients = 1024
+#
+# You can specify default log routing here if you want:
+# log_name = swift
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+# The following caps the length of log lines to the value given; no limit if
+# set to 0, the default.
+# log_max_line_length = 0
+#
+# comma separated list of functions to call to setup custom log handlers.
+# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
+# adapted_logger
+# log_custom_handlers =
+#
+# If set, log_udp_host will override log_address
+# log_udp_host =
+# log_udp_port = 514
+#
+# You can enable StatsD logging here:
+# log_statsd_host =
+# log_statsd_port = 8125
+# log_statsd_default_sample_rate = 1.0
+# log_statsd_sample_rate_factor = 1.0
+# log_statsd_metric_prefix =
+#
+# eventlet_debug = false
+#
+# You can set fallocate_reserve to the number of bytes you'd like fallocate to
+# reserve, whether there is space for the given file size or not.
+# fallocate_reserve = 0
+#
+# Time to wait while attempting to connect to another backend node.
+# conn_timeout = 0.5
+# Time to wait while sending each chunk of data to another backend node.
+# node_timeout = 3
+# Time to wait while sending a container update on object update.
+# container_update_timeout = 1.0
+# Time to wait while receiving each chunk of data from a client or another
+# backend node.
+# client_timeout = 60
+#
+# network_chunk_size = 65536
+# disk_chunk_size = 65536
+
+[pipeline:main]
+pipeline = healthcheck recon object-server
+
+[app:object-server]
+use = egg:swift#object
+# You can override the default log routing for this app here:
+# set log_name = object-server
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_requests = true
+# set log_address = /dev/log
+#
+# max_upload_time = 86400
+#
+# slow is the total amount of seconds an object PUT/DELETE request takes at
+# least. If it is faster, the object server will sleep this amount of time minus
+# the already passed transaction time. This is only useful for simulating slow
+# devices on storage nodes during testing and development.
+# slow = 0
+#
+# Objects smaller than this are not evicted from the buffercache once read
+# keep_cache_size = 5242880
+#
+# If true, objects for authenticated GET requests may be kept in buffer cache
+# if small enough
+# keep_cache_private = false
+#
+# on PUTs, sync data every n MB
+# mb_per_sync = 512
+#
+# Comma separated list of headers that can be set in metadata on an object.
+# This list is in addition to X-Object-Meta-* headers and cannot include
+# Content-Type, etag, Content-Length, or deleted
+# allowed_headers = Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object
+#
+# auto_create_account_prefix = .
+#
+# A value of 0 means "don't use thread pools". A reasonable starting point is
+# 4.
+# threads_per_disk = 0
+#
+# Configure parameter for creating specific server
+# To handle all verbs, including replication verbs, do not specify
+# "replication_server" (this is the default). To only handle replication,
+# set to a True value (e.g. "True" or "1"). To handle only non-replication
+# verbs, set to "False". Unless you have a separate replication network, you
+# should not specify any value for "replication_server".
+# replication_server = false
+#
+# Set to restrict the number of concurrent incoming SSYNC requests
+# Set to 0 for unlimited
+# Note that SSYNC requests are only used by the object reconstructor or the
+# object replicator when configured to use ssync.
+# replication_concurrency = 4
+#
+# Restricts incoming SSYNC requests to one per device,
+# replication_currency above allowing. This can help control I/O to each
+# device, but you may wish to set this to False to allow multiple SSYNC
+# requests (up to the above replication_concurrency setting) per device.
+# replication_one_per_device = True
+#
+# Number of seconds to wait for an existing replication device lock before
+# giving up.
+# replication_lock_timeout = 15
+#
+# These next two settings control when the SSYNC subrequest handler will
+# abort an incoming SSYNC attempt. An abort will occur if there are at
+# least threshold number of failures and the value of failures / successes
+# exceeds the ratio. The defaults of 100 and 1.0 means that at least 100
+# failures have to occur and there have to be more failures than successes for
+# an abort to occur.
+# replication_failure_threshold = 100
+# replication_failure_ratio = 1.0
+#
+# Use splice() for zero-copy object GETs. This requires Linux kernel
+# version 3.0 or greater. If you set "splice = yes" but the kernel
+# does not support it, error messages will appear in the object server
+# logs at startup, but your object servers should continue to function.
+#
+# splice = no
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+# An optional filesystem path, which if present, will cause the healthcheck
+# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
+# disable_path =
+
+[filter:recon]
+use = egg:swift#recon
+recon_cache_path = /var/cache/swift
+recon_lock_path = /var/lock
+
+[object-replicator]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = object-replicator
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# daemonize = on
+#
+# Time in seconds to wait between replication passes
+# interval = 30
+# run_pause is deprecated, use interval instead
+# run_pause = 30
+#
+# concurrency = 1
+# stats_interval = 300
+#
+# default is rsync, alternative is ssync
+# sync_method = rsync
+#
+# max duration of a partition rsync
+# rsync_timeout = 900
+#
+# bandwidth limit for rsync in kB/s. 0 means unlimited
+# rsync_bwlimit = 0
+#
+# passed to rsync for io op timeout
+# rsync_io_timeout = 30
+#
+# Allow rsync to compress data which is transmitted to destination node
+# during sync. However, this is applicable only when destination node is in
+# a different region than the local one.
+# NOTE: Objects that are already compressed (for example: .tar.gz, .mp3) might
+# slow down the syncing process.
+# rsync_compress = no
+#
+# Format of the rysnc module where the replicator will send data. See
+# etc/rsyncd.conf-sample for some usage examples.
+# rsync_module = {replication_ip}::object
+#
+# node_timeout = <whatever's in the DEFAULT section or 10>
+# max duration of an http request; this is for REPLICATE finalization calls and
+# so should be longer than node_timeout
+# http_timeout = 60
+#
+# attempts to kill all workers if nothing replicates for lockup_timeout seconds
+# lockup_timeout = 1800
+#
+# The replicator also performs reclamation
+# reclaim_age = 604800
+#
+# ring_check_interval = 15
+# recon_cache_path = /var/cache/swift
+#
+# limits how long rsync error log lines are
+# 0 means to log the entire line
+# rsync_error_log_line_length = 0
+#
+# handoffs_first and handoff_delete are options for a special case
+# such as disk full in the cluster. These two options SHOULD NOT BE
+# CHANGED, except for such an extreme situations. (e.g. disks filled up
+# or are about to fill up. Anyway, DO NOT let your drives fill up)
+# handoffs_first is the flag to replicate handoffs prior to canonical
+# partitions. It allows to force syncing and deleting handoffs quickly.
+# If set to a True value(e.g. "True" or "1"), partitions
+# that are not supposed to be on the node will be replicated first.
+# handoffs_first = False
+#
+# handoff_delete is the number of replicas which are ensured in swift.
+# If the number less than the number of replicas is set, object-replicator
+# could delete local handoffs even if all replicas are not ensured in the
+# cluster. Object-replicator would remove local handoff partition directories
+# after syncing partition when the number of successful responses is greater
+# than or equal to this number. By default(auto), handoff partitions will be
+# removed when it has successfully replicated to all the canonical nodes.
+# handoff_delete = auto
+
+[object-reconstructor]
+# You can override the default log routing for this app here (don't use set!):
+# Unless otherwise noted, each setting below has the same meaning as described
+# in the [object-replicator] section, however these settings apply to the EC
+# reconstructor
+#
+# log_name = object-reconstructor
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# daemonize = on
+#
+# Time in seconds to wait between reconstruction passes
+# interval = 30
+# run_pause is deprecated, use interval instead
+# run_pause = 30
+#
+# concurrency = 1
+# stats_interval = 300
+# node_timeout = 10
+# http_timeout = 60
+# lockup_timeout = 1800
+# reclaim_age = 604800
+# ring_check_interval = 15
+# recon_cache_path = /var/cache/swift
+# handoffs_first = False
+
+[object-updater]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = object-updater
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# interval = 300
+# concurrency = 1
+# node_timeout = <whatever's in the DEFAULT section or 10>
+# slowdown will sleep that amount between objects
+# slowdown = 0.01
+#
+# recon_cache_path = /var/cache/swift
+
+[object-auditor]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = object-auditor
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# Time in seconds to wait between auditor passes
+# interval = 30
+#
+# You can set the disk chunk size that the auditor uses making it larger if
+# you like for more efficient local auditing of larger objects
+# disk_chunk_size = 65536
+# files_per_second = 20
+# concurrency = 1
+# bytes_per_second = 10000000
+# log_time = 3600
+# zero_byte_files_per_second = 50
+# recon_cache_path = /var/cache/swift
+
+# Takes a comma separated list of ints. If set, the object auditor will
+# increment a counter for every object whose size is <= to the given break
+# points and report the result after a full scan.
+# object_size_stats =
+
+# The auditor will cleanup old rsync tempfiles after they are "old
+# enough" to delete. You can configure the time elapsed in seconds
+# before rsync tempfiles will be unlinked, or the default value of
+# "auto" try to use object-replicator's rsync_timeout + 900 and fallback
+# to 86400 (1 day).
+# rsync_tempfile_timeout = auto
+
+# Note: Put it at the beginning of the pipleline to profile all middleware. But
+# it is safer to put this after healthcheck.
+[filter:xprofile]
+use = egg:swift#xprofile
+# This option enable you to switch profilers which should inherit from python
+# standard profiler. Currently the supported value can be 'cProfile',
+# 'eventlet.green.profile' etc.
+# profile_module = eventlet.green.profile
+#
+# This prefix will be used to combine process ID and timestamp to name the
+# profile data file. Make sure the executing user has permission to write
+# into this path (missing path segments will be created, if necessary).
+# If you enable profiling in more than one type of daemon, you must override
+# it with an unique value like: /var/log/swift/profile/object.profile
+# log_filename_prefix = /tmp/log/swift/profile/default.profile
+#
+# the profile data will be dumped to local disk based on above naming rule
+# in this interval.
+# dump_interval = 5.0
+#
+# Be careful, this option will enable profiler to dump data into the file with
+# time stamp which means there will be lots of files piled up in the directory.
+# dump_timestamp = false
+#
+# This is the path of the URL to access the mini web UI.
+# path = /__profile__
+#
+# Clear the data when the wsgi server shutdown.
+# flush_at_shutdown = false
+#
+# unwind the iterator of applications
+# unwind = false
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/proxy-server.conf b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/proxy-server.conf
new file mode 100644
index 00000000..4efdfcee
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/proxy-server.conf
@@ -0,0 +1,771 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
+[DEFAULT]
+# bind_ip = 0.0.0.0
+bind_port = 8080
+# bind_timeout = 30
+# backlog = 4096
+swift_dir = /etc/swift
+user = swift
+
+# Enables exposing configuration settings via HTTP GET /info.
+# expose_info = true
+
+# Key to use for admin calls that are HMAC signed. Default is empty,
+# which will disable admin calls to /info.
+# admin_key = secret_admin_key
+#
+# Allows the ability to withhold sections from showing up in the public calls
+# to /info. You can withhold subsections by separating the dict level with a
+# ".". The following would cause the sections 'container_quotas' and 'tempurl'
+# to not be listed, and the key max_failed_deletes would be removed from
+# bulk_delete. Default value is 'swift.valid_api_versions' which allows all
+# registered features to be listed via HTTP GET /info except
+# swift.valid_api_versions information
+# disallowed_sections = swift.valid_api_versions, container_quotas, tempurl
+
+# Use an integer to override the number of pre-forked processes that will
+# accept connections. Should default to the number of effective cpu
+# cores in the system. It's worth noting that individual workers will
+# use many eventlet co-routines to service multiple concurrent requests.
+# workers = auto
+#
+# Maximum concurrent requests per worker
+# max_clients = 1024
+#
+# Set the following two lines to enable SSL. This is for testing only.
+# cert_file = /etc/swift/proxy.crt
+# key_file = /etc/swift/proxy.key
+#
+# expiring_objects_container_divisor = 86400
+# expiring_objects_account_name = expiring_objects
+#
+# You can specify default log routing here if you want:
+# log_name = swift
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_headers = false
+# log_address = /dev/log
+# The following caps the length of log lines to the value given; no limit if
+# set to 0, the default.
+# log_max_line_length = 0
+#
+# This optional suffix (default is empty) that would be appended to the swift transaction
+# id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
+# This is very useful when one is managing more than one swift cluster.
+# trans_id_suffix =
+#
+# comma separated list of functions to call to setup custom log handlers.
+# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
+# adapted_logger
+# log_custom_handlers =
+#
+# If set, log_udp_host will override log_address
+# log_udp_host =
+# log_udp_port = 514
+#
+# You can enable StatsD logging here:
+# log_statsd_host =
+# log_statsd_port = 8125
+# log_statsd_default_sample_rate = 1.0
+# log_statsd_sample_rate_factor = 1.0
+# log_statsd_metric_prefix =
+#
+# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
+# cors_allow_origin =
+# strict_cors_mode = True
+#
+# client_timeout = 60
+# eventlet_debug = false
+
+[pipeline:main]
+# This sample pipeline uses tempauth and is used for SAIO dev work and
+# testing. See below for a pipeline using keystone.
+#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
+pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging moon proxy-server
+
+# The following pipeline shows keystone integration. Comment out the one
+# above and uncomment this one. Additional steps for integrating keystone are
+# covered further below in the filter sections for authtoken and keystoneauth.
+#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
+
+[app:proxy-server]
+use = egg:swift#proxy
+account_autocreate = True
+# You can override the default log routing for this app here:
+# set log_name = proxy-server
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_address = /dev/log
+#
+# log_handoffs = true
+# recheck_account_existence = 60
+# recheck_container_existence = 60
+# object_chunk_size = 65536
+# client_chunk_size = 65536
+#
+# How long the proxy server will wait on responses from the a/c/o servers.
+# node_timeout = 10
+#
+# How long the proxy server will wait for an initial response and to read a
+# chunk of data from the object servers while serving GET / HEAD requests.
+# Timeouts from these requests can be recovered from so setting this to
+# something lower than node_timeout would provide quicker error recovery
+# while allowing for a longer timeout for non-recoverable requests (PUTs).
+# Defaults to node_timeout, should be overriden if node_timeout is set to a
+# high number to prevent client timeouts from firing before the proxy server
+# has a chance to retry.
+# recoverable_node_timeout = node_timeout
+#
+# conn_timeout = 0.5
+#
+# How long to wait for requests to finish after a quorum has been established.
+# post_quorum_timeout = 0.5
+#
+# How long without an error before a node's error count is reset. This will
+# also be how long before a node is reenabled after suppression is triggered.
+# error_suppression_interval = 60
+#
+# How many errors can accumulate before a node is temporarily ignored.
+# error_suppression_limit = 10
+#
+# If set to 'true' any authorized user may create and delete accounts; if
+# 'false' no one, even authorized, can.
+# allow_account_management = false
+#
+# Set object_post_as_copy = false to turn on fast posts where only the metadata
+# changes are stored anew and the original data file is kept in place. This
+# makes for quicker posts.
+# object_post_as_copy = true
+#
+# If set to 'true' authorized accounts that do not yet exist within the Swift
+# cluster will be automatically created.
+# account_autocreate = false
+#
+# If set to a positive value, trying to create a container when the account
+# already has at least this maximum containers will result in a 403 Forbidden.
+# Note: This is a soft limit, meaning a user might exceed the cap for
+# recheck_account_existence before the 403s kick in.
+# max_containers_per_account = 0
+#
+# This is a comma separated list of account hashes that ignore the
+# max_containers_per_account cap.
+# max_containers_whitelist =
+#
+# Comma separated list of Host headers to which the proxy will deny requests.
+# deny_host_headers =
+#
+# Prefix used when automatically creating accounts.
+# auto_create_account_prefix = .
+#
+# Depth of the proxy put queue.
+# put_queue_depth = 10
+#
+# Storage nodes can be chosen at random (shuffle), by using timing
+# measurements (timing), or by using an explicit match (affinity).
+# Using timing measurements may allow for lower overall latency, while
+# using affinity allows for finer control. In both the timing and
+# affinity cases, equally-sorting nodes are still randomly chosen to
+# spread load.
+# The valid values for sorting_method are "affinity", "shuffle", or "timing".
+# sorting_method = shuffle
+#
+# If the "timing" sorting_method is used, the timings will only be valid for
+# the number of seconds configured by timing_expiry.
+# timing_expiry = 300
+#
+# By default on a GET/HEAD swift will connect to a storage node one at a time
+# in a single thread. There is smarts in the order they are hit however. If you
+# turn on concurrent_gets below, then replica count threads will be used.
+# With addition of the concurrency_timeout option this will allow swift to send
+# out GET/HEAD requests to the storage nodes concurrently and answer with the
+# first to respond. With an EC policy the parameter only affects HEAD requests.
+# concurrent_gets = off
+#
+# This parameter controls how long to wait before firing off the next
+# concurrent_get thread. A value of 0 would be fully concurrent, any other
+# number will stagger the firing of the threads. This number should be
+# between 0 and node_timeout. The default is what ever you set for the
+# conn_timeout parameter.
+# concurrency_timeout = 0.5
+#
+# Set to the number of nodes to contact for a normal request. You can use
+# '* replicas' at the end to have it use the number given times the number of
+# replicas for the ring being used for the request.
+# request_node_count = 2 * replicas
+#
+# Which backend servers to prefer on reads. Format is r<N> for region
+# N or r<N>z<M> for region N, zone M. The value after the equals is
+# the priority; lower numbers are higher priority.
+#
+# Example: first read from region 1 zone 1, then region 1 zone 2, then
+# anything in region 2, then everything else:
+# read_affinity = r1z1=100, r1z2=200, r2=300
+# Default is empty, meaning no preference.
+# read_affinity =
+#
+# Which backend servers to prefer on writes. Format is r<N> for region
+# N or r<N>z<M> for region N, zone M. If this is set, then when
+# handling an object PUT request, some number (see setting
+# write_affinity_node_count) of local backend servers will be tried
+# before any nonlocal ones.
+#
+# Example: try to write to regions 1 and 2 before writing to any other
+# nodes:
+# write_affinity = r1, r2
+# Default is empty, meaning no preference.
+# write_affinity =
+#
+# The number of local (as governed by the write_affinity setting)
+# nodes to attempt to contact first, before any non-local ones. You
+# can use '* replicas' at the end to have it use the number given
+# times the number of replicas for the ring being used for the
+# request.
+# write_affinity_node_count = 2 * replicas
+#
+# These are the headers whose values will only be shown to swift_owners. The
+# exact definition of a swift_owner is up to the auth system in use, but
+# usually indicates administrative responsibilities.
+# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control
+
+[filter:tempauth]
+use = egg:swift#tempauth
+# You can override the default log routing for this filter here:
+# set log_name = tempauth
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# The reseller prefix will verify a token begins with this prefix before even
+# attempting to validate it. Also, with authorization, only Swift storage
+# accounts with this prefix will be authorized by this middleware. Useful if
+# multiple auth systems are in use for one Swift cluster.
+# The reseller_prefix may contain a comma separated list of items. The first
+# item is used for the token as mentioned above. If second and subsequent
+# items exist, the middleware will handle authorization for an account with
+# that prefix. For example, for prefixes "AUTH, SERVICE", a path of
+# /v1/SERVICE_account is handled the same as /v1/AUTH_account. If an empty
+# (blank) reseller prefix is required, it must be first in the list. Two
+# single quote characters indicates an empty (blank) reseller prefix.
+# reseller_prefix = AUTH
+
+#
+# The require_group parameter names a group that must be presented by
+# either X-Auth-Token or X-Service-Token. Usually this parameter is
+# used only with multiple reseller prefixes (e.g., SERVICE_require_group=blah).
+# By default, no group is needed. Do not use .admin.
+# require_group =
+
+# The auth prefix will cause requests beginning with this prefix to be routed
+# to the auth subsystem, for granting tokens, etc.
+# auth_prefix = /auth/
+# token_life = 86400
+#
+# This allows middleware higher in the WSGI pipeline to override auth
+# processing, useful for middleware such as tempurl and formpost. If you know
+# you're not going to use such middleware and you want a bit of extra security,
+# you can set this to false.
+# allow_overrides = true
+#
+# This specifies what scheme to return with storage urls:
+# http, https, or default (chooses based on what the server is running as)
+# This can be useful with an SSL load balancer in front of a non-SSL server.
+# storage_url_scheme = default
+#
+# Lastly, you need to list all the accounts/users you want here. The format is:
+# user_<account>_<user> = <key> [group] [group] [...] [storage_url]
+# or if you want underscores in <account> or <user>, you can base64 encode them
+# (with no equal signs) and use this format:
+# user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url]
+# There are special groups of:
+# .reseller_admin = can do anything to any account for this auth
+# .admin = can do anything within the account
+# If neither of these groups are specified, the user can only access containers
+# that have been explicitly allowed for them by a .admin or .reseller_admin.
+# The trailing optional storage_url allows you to specify an alternate url to
+# hand back to the user upon authentication. If not specified, this defaults to
+# $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve
+# to what the requester would need to use to reach this host.
+# Here are example entries, required for running the tests:
+user_admin_admin = admin .admin .reseller_admin
+user_test_tester = testing .admin
+user_test2_tester2 = testing2 .admin
+user_test_tester3 = testing3
+user_test5_tester5 = testing5 service
+
+# To enable Keystone authentication you need to have the auth token
+# middleware first to be configured. Here is an example below, please
+# refer to the keystone's documentation for details about the
+# different settings.
+#
+# You'll also need to have the keystoneauth middleware enabled and have it in
+# your main pipeline, as show in the sample pipeline at the top of this file.
+#
+# Following parameters are known to work with keystonemiddleware v2.3.0
+# (above v2.0.0), but checking the latest information in the wiki page[1]
+# is recommended.
+# 1. http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html#configuration
+#
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+#auth_plugin = password
+auth_type = password
+project_domain_id = default
+user_domain_id = default
+project_name = service
+username = swift
+password = {{ CINDER_PASS }}
+delay_auth_decision = True
+#
+# delay_auth_decision defaults to False, but leaving it as false will
+# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from
+# working. This value must be explicitly set to True.
+# delay_auth_decision = False
+#
+# cache = swift.cache
+# include_service_catalog = False
+#
+[filter:keystoneauth]
+use = egg:swift#keystoneauth
+operator_roles = admin,user
+# The reseller_prefix option lists account namespaces that this middleware is
+# responsible for. The prefix is placed before the Keystone project id.
+# For example, for project 12345678, and prefix AUTH, the account is
+# named AUTH_12345678 (i.e., path is /v1/AUTH_12345678/...).
+# Several prefixes are allowed by specifying a comma-separated list
+# as in: "reseller_prefix = AUTH, SERVICE". The empty string indicates a
+# single blank/empty prefix. If an empty prefix is required in a list of
+# prefixes, a value of '' (two single quote characters) indicates a
+# blank/empty prefix. Except for the blank/empty prefix, an underscore ('_')
+# character is appended to the value unless already present.
+# reseller_prefix = AUTH
+#
+# The user must have at least one role named by operator_roles on a
+# project in order to create, delete and modify containers and objects
+# and to set and read privileged headers such as ACLs.
+# If there are several reseller prefix items, you can prefix the
+# parameter so it applies only to those accounts (for example
+# the parameter SERVICE_operator_roles applies to the /v1/SERVICE_<project>
+# path). If you omit the prefix, the option applies to all reseller
+# prefix items. For the blank/empty prefix, prefix with '' (do not put
+# underscore after the two single quote characters).
+# operator_roles = admin, swiftoperator
+#
+# The reseller admin role has the ability to create and delete accounts
+# reseller_admin_role = ResellerAdmin
+#
+# This allows middleware higher in the WSGI pipeline to override auth
+# processing, useful for middleware such as tempurl and formpost. If you know
+# you're not going to use such middleware and you want a bit of extra security,
+# you can set this to false.
+# allow_overrides = true
+#
+# If the service_roles parameter is present, an X-Service-Token must be
+# present in the request that when validated, grants at least one role listed
+# in the parameter. The X-Service-Token may be scoped to any project.
+# If there are several reseller prefix items, you can prefix the
+# parameter so it applies only to those accounts (for example
+# the parameter SERVICE_service_roles applies to the /v1/SERVICE_<project>
+# path). If you omit the prefix, the option applies to all reseller
+# prefix items. For the blank/empty prefix, prefix with '' (do not put
+# underscore after the two single quote characters).
+# By default, no service_roles are required.
+# service_roles =
+#
+# For backwards compatibility, keystoneauth will match names in cross-tenant
+# access control lists (ACLs) when both the requesting user and the tenant
+# are in the default domain i.e the domain to which existing tenants are
+# migrated. The default_domain_id value configured here should be the same as
+# the value used during migration of tenants to keystone domains.
+# default_domain_id = default
+#
+# For a new installation, or an installation in which keystone projects may
+# move between domains, you should disable backwards compatible name matching
+# in ACLs by setting allow_names_in_acls to false:
+# allow_names_in_acls = true
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+# An optional filesystem path, which if present, will cause the healthcheck
+# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE".
+# This facility may be used to temporarily remove a Swift node from a load
+# balancer pool during maintenance or upgrade (remove the file to allow the
+# node back into the load balancer pool).
+# disable_path =
+
+[filter:cache]
+use = egg:swift#memcache
+memcache_servers = {{ memcached_servers }}
+# You can override the default log routing for this filter here:
+# set log_name = cache
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# If not set here, the value for memcache_servers will be read from
+# memcache.conf (see memcache.conf-sample) or lacking that file, it will
+# default to the value below. You can specify multiple servers separated with
+# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 (IPv6 addresses must
+# follow rfc3986 section-3.2.2, i.e. [::1]:11211)
+# memcache_servers = 127.0.0.1:11211
+#
+# Sets how memcache values are serialized and deserialized:
+# 0 = older, insecure pickle serialization
+# 1 = json serialization but pickles can still be read (still insecure)
+# 2 = json serialization only (secure and the default)
+# If not set here, the value for memcache_serialization_support will be read
+# from /etc/swift/memcache.conf (see memcache.conf-sample).
+# To avoid an instant full cache flush, existing installations should
+# upgrade with 0, then set to 1 and reload, then after some time (24 hours)
+# set to 2 and reload.
+# In the future, the ability to use pickle serialization will be removed.
+# memcache_serialization_support = 2
+#
+# Sets the maximum number of connections to each memcached server per worker
+# memcache_max_connections = 2
+#
+# More options documented in memcache.conf-sample
+
+[filter:ratelimit]
+use = egg:swift#ratelimit
+# You can override the default log routing for this filter here:
+# set log_name = ratelimit
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# clock_accuracy should represent how accurate the proxy servers' system clocks
+# are with each other. 1000 means that all the proxies' clock are accurate to
+# each other within 1 millisecond. No ratelimit should be higher than the
+# clock accuracy.
+# clock_accuracy = 1000
+#
+# max_sleep_time_seconds = 60
+#
+# log_sleep_time_seconds of 0 means disabled
+# log_sleep_time_seconds = 0
+#
+# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
+# rate_buffer_seconds = 5
+#
+# account_ratelimit of 0 means disabled
+# account_ratelimit = 0
+
+# DEPRECATED- these will continue to work but will be replaced
+# by the X-Account-Sysmeta-Global-Write-Ratelimit flag.
+# Please see ratelimiting docs for details.
+# these are comma separated lists of account names
+# account_whitelist = a,b
+# account_blacklist = c,d
+
+# with container_limit_x = r
+# for containers of size x limit write requests per second to r. The container
+# rate will be linearly interpolated from the values given. With the values
+# below, a container of size 5 will get a rate of 75.
+# container_ratelimit_0 = 100
+# container_ratelimit_10 = 50
+# container_ratelimit_50 = 20
+
+# Similarly to the above container-level write limits, the following will limit
+# container GET (listing) requests.
+# container_listing_ratelimit_0 = 100
+# container_listing_ratelimit_10 = 50
+# container_listing_ratelimit_50 = 20
+
+[filter:domain_remap]
+use = egg:swift#domain_remap
+# You can override the default log routing for this filter here:
+# set log_name = domain_remap
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# storage_domain = example.com
+# path_root = v1
+
+# Browsers can convert a host header to lowercase, so check that reseller
+# prefix on the account is the correct case. This is done by comparing the
+# items in the reseller_prefixes config option to the found prefix. If they
+# match except for case, the item from reseller_prefixes will be used
+# instead of the found reseller prefix. When none match, the default reseller
+# prefix is used. When no default reseller prefix is configured, any request
+# with an account prefix not in that list will be ignored by this middleware.
+# reseller_prefixes = AUTH
+# default_reseller_prefix =
+
+[filter:catch_errors]
+use = egg:swift#catch_errors
+# You can override the default log routing for this filter here:
+# set log_name = catch_errors
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+
+[filter:cname_lookup]
+# Note: this middleware requires python-dnspython
+use = egg:swift#cname_lookup
+# You can override the default log routing for this filter here:
+# set log_name = cname_lookup
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# Specify the storage_domain that match your cloud, multiple domains
+# can be specified separated by a comma
+# storage_domain = example.com
+#
+# lookup_depth = 1
+
+# Note: Put staticweb just after your auth filter(s) in the pipeline
+[filter:staticweb]
+use = egg:swift#staticweb
+# You can override the default log routing for this filter here:
+# set log_name = staticweb
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+
+# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline
+[filter:tempurl]
+use = egg:swift#tempurl
+# The methods allowed with Temp URLs.
+# methods = GET HEAD PUT POST DELETE
+#
+# The headers to remove from incoming requests. Simply a whitespace delimited
+# list of header names and names can optionally end with '*' to indicate a
+# prefix match. incoming_allow_headers is a list of exceptions to these
+# removals.
+# incoming_remove_headers = x-timestamp
+#
+# The headers allowed as exceptions to incoming_remove_headers. Simply a
+# whitespace delimited list of header names and names can optionally end with
+# '*' to indicate a prefix match.
+# incoming_allow_headers =
+#
+# The headers to remove from outgoing responses. Simply a whitespace delimited
+# list of header names and names can optionally end with '*' to indicate a
+# prefix match. outgoing_allow_headers is a list of exceptions to these
+# removals.
+# outgoing_remove_headers = x-object-meta-*
+#
+# The headers allowed as exceptions to outgoing_remove_headers. Simply a
+# whitespace delimited list of header names and names can optionally end with
+# '*' to indicate a prefix match.
+# outgoing_allow_headers = x-object-meta-public-*
+
+# Note: Put formpost just before your auth filter(s) in the pipeline
+[filter:formpost]
+use = egg:swift#formpost
+
+# Note: Just needs to be placed before the proxy-server in the pipeline.
+[filter:name_check]
+use = egg:swift#name_check
+# forbidden_chars = '"`<>
+# maximum_length = 255
+# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$
+
+[filter:list-endpoints]
+use = egg:swift#list_endpoints
+# list_endpoints_path = /endpoints/
+
+[filter:proxy-logging]
+use = egg:swift#proxy_logging
+# If not set, logging directives from [DEFAULT] without "access_" will be used
+# access_log_name = swift
+# access_log_facility = LOG_LOCAL0
+# access_log_level = INFO
+# access_log_address = /dev/log
+#
+# If set, access_log_udp_host will override access_log_address
+# access_log_udp_host =
+# access_log_udp_port = 514
+#
+# You can use log_statsd_* from [DEFAULT] or override them here:
+# access_log_statsd_host =
+# access_log_statsd_port = 8125
+# access_log_statsd_default_sample_rate = 1.0
+# access_log_statsd_sample_rate_factor = 1.0
+# access_log_statsd_metric_prefix =
+# access_log_headers = false
+#
+# If access_log_headers is True and access_log_headers_only is set only
+# these headers are logged. Multiple headers can be defined as comma separated
+# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime
+# access_log_headers_only =
+#
+# By default, the X-Auth-Token is logged. To obscure the value,
+# set reveal_sensitive_prefix to the number of characters to log.
+# For example, if set to 12, only the first 12 characters of the
+# token appear in the log. An unauthorized access of the log file
+# won't allow unauthorized usage of the token. However, the first
+# 12 or so characters is unique enough that you can trace/debug
+# token usage. Set to 0 to suppress the token completely (replaced
+# by '...' in the log).
+# Note: reveal_sensitive_prefix will not affect the value
+# logged with access_log_headers=True.
+# reveal_sensitive_prefix = 16
+#
+# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
+# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
+# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
+#
+# Note: The double proxy-logging in the pipeline is not a mistake. The
+# left-most proxy-logging is there to log requests that were handled in
+# middleware and never made it through to the right-most middleware (and
+# proxy server). Double logging is prevented for normal requests. See
+# proxy-logging docs.
+
+# Note: Put before both ratelimit and auth in the pipeline.
+[filter:bulk]
+use = egg:swift#bulk
+# max_containers_per_extraction = 10000
+# max_failed_extractions = 1000
+# max_deletes_per_request = 10000
+# max_failed_deletes = 1000
+
+# In order to keep a connection active during a potentially long bulk request,
+# Swift may return whitespace prepended to the actual response body. This
+# whitespace will be yielded no more than every yield_frequency seconds.
+# yield_frequency = 10
+
+# Note: The following parameter is used during a bulk delete of objects and
+# their container. This would frequently fail because it is very likely
+# that all replicated objects have not been deleted by the time the middleware got a
+# successful response. It can be configured the number of retries. And the
+# number of seconds to wait between each retry will be 1.5**retry
+
+# delete_container_retry_count = 0
+
+# Note: Put after auth and staticweb in the pipeline.
+[filter:slo]
+use = egg:swift#slo
+# max_manifest_segments = 1000
+# max_manifest_size = 2097152
+#
+# Rate limiting applies only to segments smaller than this size (bytes).
+# rate_limit_under_size = 1048576
+#
+# Start rate-limiting SLO segment serving after the Nth small segment of a
+# segmented object.
+# rate_limit_after_segment = 10
+#
+# Once segment rate-limiting kicks in for an object, limit segments served
+# to N per second. 0 means no rate-limiting.
+# rate_limit_segments_per_sec = 1
+#
+# Time limit on GET requests (seconds)
+# max_get_time = 86400
+
+# Note: Put after auth and staticweb in the pipeline.
+# If you don't put it in the pipeline, it will be inserted for you.
+[filter:dlo]
+use = egg:swift#dlo
+# Start rate-limiting DLO segment serving after the Nth segment of a
+# segmented object.
+# rate_limit_after_segment = 10
+#
+# Once segment rate-limiting kicks in for an object, limit segments served
+# to N per second. 0 means no rate-limiting.
+# rate_limit_segments_per_sec = 1
+#
+# Time limit on GET requests (seconds)
+# max_get_time = 86400
+
+# Note: Put after auth in the pipeline.
+[filter:container-quotas]
+use = egg:swift#container_quotas
+
+# Note: Put after auth in the pipeline.
+[filter:account-quotas]
+use = egg:swift#account_quotas
+
+[filter:gatekeeper]
+use = egg:swift#gatekeeper
+# Set this to false if you want to allow clients to set arbitrary X-Timestamps
+# on uploaded objects. This may be used to preserve timestamps when migrating
+# from a previous storage system, but risks allowing users to upload
+# difficult-to-delete data.
+# shunt_inbound_x_timestamp = true
+#
+# You can override the default log routing for this filter here:
+# set log_name = gatekeeper
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+
+[filter:container_sync]
+use = egg:swift#container_sync
+# Set this to false if you want to disallow any full url values to be set for
+# any new X-Container-Sync-To headers. This will keep any new full urls from
+# coming in, but won't change any existing values already in the cluster.
+# Updating those will have to be done manually, as knowing what the true realm
+# endpoint should be cannot always be guessed.
+# allow_full_urls = true
+# Set this to specify this clusters //realm/cluster as "current" in /info
+# current = //REALM/CLUSTER
+
+# Note: Put it at the beginning of the pipeline to profile all middleware. But
+# it is safer to put this after catch_errors, gatekeeper and healthcheck.
+[filter:xprofile]
+use = egg:swift#xprofile
+# This option enable you to switch profilers which should inherit from python
+# standard profiler. Currently the supported value can be 'cProfile',
+# 'eventlet.green.profile' etc.
+# profile_module = eventlet.green.profile
+#
+# This prefix will be used to combine process ID and timestamp to name the
+# profile data file. Make sure the executing user has permission to write
+# into this path (missing path segments will be created, if necessary).
+# If you enable profiling in more than one type of daemon, you must override
+# it with an unique value like: /var/log/swift/profile/proxy.profile
+# log_filename_prefix = /tmp/log/swift/profile/default.profile
+#
+# the profile data will be dumped to local disk based on above naming rule
+# in this interval.
+# dump_interval = 5.0
+#
+# Be careful, this option will enable profiler to dump data into the file with
+# time stamp which means there will be lots of files piled up in the directory.
+# dump_timestamp = false
+#
+# This is the path of the URL to access the mini web UI.
+# path = /__profile__
+#
+# Clear the data when the wsgi server shutdown.
+# flush_at_shutdown = false
+#
+# unwind the iterator of applications
+# unwind = false
+
+# Note: Put after slo, dlo in the pipeline.
+# If you don't put it in the pipeline, it will be inserted automatically.
+[filter:versioned_writes]
+use = egg:swift#versioned_writes
+# Enables using versioned writes middleware and exposing configuration
+# settings via HTTP GET /info.
+# WARNING: Setting this option bypasses the "allow_versions" option
+# in the container configuration file, which will be eventually
+# deprecated. See documentation for more details.
+# allow_versioned_writes = false
+
+
+[filter:moon]
+paste.filter_factory = keystonemiddleware.moon_agent:filter_factory
+authz_login=admin
+authz_password=password
+logfile=/var/log/moon/keystonemiddleware.log
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/rsyncd.conf b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/rsyncd.conf
new file mode 100644
index 00000000..703c55eb
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/rsyncd.conf
@@ -0,0 +1,23 @@
+uid = swift
+gid = swift
+log file = /var/log/rsyncd.log
+pid file = /var/run/rsyncd.pid
+address = {{ internal_ip }}
+
+[account]
+max connections = 2
+path = /srv/node/
+read only = False
+lock file = /var/lock/account.lock
+
+[container]
+max connections = 2
+path = /srv/node/
+read only = False
+lock file = /var/lock/container.lock
+
+[object]
+max connections = 2
+path = /srv/node/
+read only = False
+lock file = /var/lock/object.lock
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/swift.conf b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/swift.conf
new file mode 100644
index 00000000..9a31501b
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/templates/swift.conf
@@ -0,0 +1,183 @@
+[swift-hash]
+
+# swift_hash_path_suffix and swift_hash_path_prefix are used as part of the
+# the hashing algorithm when determining data placement in the cluster.
+# These values should remain secret and MUST NOT change
+# once a cluster has been deployed.
+# Use only printable chars (python -c "import string; print(string.printable)")
+
+swift_hash_path_suffix = 7c6a7cd34d07aed5
+swift_hash_path_prefix = 0c4629166f4de441
+
+# storage policies are defined here and determine various characteristics
+# about how objects are stored and treated. Policies are specified by name on
+# a per container basis. Names are case-insensitive. The policy index is
+# specified in the section header and is used internally. The policy with
+# index 0 is always used for legacy containers and can be given a name for use
+# in metadata however the ring file name will always be 'object.ring.gz' for
+# backwards compatibility. If no policies are defined a policy with index 0
+# will be automatically created for backwards compatibility and given the name
+# Policy-0. A default policy is used when creating new containers when no
+# policy is specified in the request. If no other policies are defined the
+# policy with index 0 will be declared the default. If multiple policies are
+# defined you must define a policy with index 0 and you must specify a
+# default. It is recommended you always define a section for
+# storage-policy:0. Aliases are not required when defining a storage policy.
+#
+# A 'policy_type' argument is also supported but is not mandatory. Default
+# policy type 'replication' is used when 'policy_type' is unspecified.
+[storage-policy:0]
+name = Policy-0
+default = yes
+#policy_type = replication
+aliases = yellow, orange
+
+# the following section would declare a policy called 'silver', the number of
+# replicas will be determined by how the ring is built. In this example the
+# 'silver' policy could have a lower or higher # of replicas than the
+# 'Policy-0' policy above. The ring filename will be 'object-1.ring.gz'. You
+# may only specify one storage policy section as the default. If you changed
+# this section to specify 'silver' as the default, when a client created a new
+# container w/o a policy specified, it will get the 'silver' policy because
+# this config has specified it as the default. However if a legacy container
+# (one created with a pre-policy version of swift) is accessed, it is known
+# implicitly to be assigned to the policy with index 0 as opposed to the
+# current default. Note that even without specifying any aliases, a policy
+# always has at least the default name stored in aliases because this field is
+# used to contain all human readable names for a storage policy.
+#
+#[storage-policy:1]
+#name = silver
+#policy_type = replication
+
+# The following declares a storage policy of type 'erasure_coding' which uses
+# Erasure Coding for data reliability. Please refer to Swift documentation for
+# details on how the 'erasure_coding' storage policy is implemented.
+#
+# Swift uses PyECLib, a Python Erasure coding API library, for encode/decode
+# operations. Please refer to Swift documentation for details on how to
+# install PyECLib.
+#
+# When defining an EC policy, 'policy_type' needs to be 'erasure_coding' and
+# EC configuration parameters 'ec_type', 'ec_num_data_fragments' and
+# 'ec_num_parity_fragments' must be specified. 'ec_type' is chosen from the
+# list of EC backends supported by PyECLib. The ring configured for the
+# storage policy must have it's "replica" count configured to
+# 'ec_num_data_fragments' + 'ec_num_parity_fragments' - this requirement is
+# validated when services start. 'ec_object_segment_size' is the amount of
+# data that will be buffered up before feeding a segment into the
+# encoder/decoder. More information about these configuration options and
+# supported `ec_type` schemes is available in the Swift documentation. Please
+# refer to Swift documentation for details on how to configure EC policies.
+#
+# The example 'deepfreeze10-4' policy defined below is a _sample_
+# configuration with an alias of 'df10-4' as well as 10 'data' and 4 'parity'
+# fragments. 'ec_type' defines the Erasure Coding scheme.
+# 'liberasurecode_rs_vand' (Reed-Solomon Vandermonde) is used as an example
+# below.
+#
+#[storage-policy:2]
+#name = deepfreeze10-4
+#aliases = df10-4
+#policy_type = erasure_coding
+#ec_type = liberasurecode_rs_vand
+#ec_num_data_fragments = 10
+#ec_num_parity_fragments = 4
+#ec_object_segment_size = 1048576
+
+
+# The swift-constraints section sets the basic constraints on data
+# saved in the swift cluster. These constraints are automatically
+# published by the proxy server in responses to /info requests.
+
+[swift-constraints]
+
+# max_file_size is the largest "normal" object that can be saved in
+# the cluster. This is also the limit on the size of each segment of
+# a "large" object when using the large object manifest support.
+# This value is set in bytes. Setting it to lower than 1MiB will cause
+# some tests to fail. It is STRONGLY recommended to leave this value at
+# the default (5 * 2**30 + 2).
+
+#max_file_size = 5368709122
+
+
+# max_meta_name_length is the max number of bytes in the utf8 encoding
+# of the name portion of a metadata header.
+
+#max_meta_name_length = 128
+
+
+# max_meta_value_length is the max number of bytes in the utf8 encoding
+# of a metadata value
+
+#max_meta_value_length = 256
+
+
+# max_meta_count is the max number of metadata keys that can be stored
+# on a single account, container, or object
+
+#max_meta_count = 90
+
+
+# max_meta_overall_size is the max number of bytes in the utf8 encoding
+# of the metadata (keys + values)
+
+#max_meta_overall_size = 4096
+
+# max_header_size is the max number of bytes in the utf8 encoding of each
+# header. Using 8192 as default because eventlet use 8192 as max size of
+# header line. This value may need to be increased when using identity
+# v3 API tokens including more than 7 catalog entries.
+# See also include_service_catalog in proxy-server.conf-sample
+# (documented in overview_auth.rst)
+
+#max_header_size = 8192
+
+
+# By default the maximum number of allowed headers depends on the number of max
+# allowed metadata settings plus a default value of 32 for regular http
+# headers. If for some reason this is not enough (custom middleware for
+# example) it can be increased with the extra_header_count constraint.
+
+#extra_header_count = 0
+
+
+# max_object_name_length is the max number of bytes in the utf8 encoding
+# of an object name
+
+#max_object_name_length = 1024
+
+
+# container_listing_limit is the default (and max) number of items
+# returned for a container listing request
+
+#container_listing_limit = 10000
+
+
+# account_listing_limit is the default (and max) number of items returned
+# for an account listing request
+#account_listing_limit = 10000
+
+
+# max_account_name_length is the max number of bytes in the utf8 encoding
+# of an account name
+
+#max_account_name_length = 256
+
+
+# max_container_name_length is the max number of bytes in the utf8 encoding
+# of a container name
+
+#max_container_name_length = 256
+
+
+# By default all REST API calls should use "v1" or "v1.0" as the version string,
+# for example "/v1/account". This can be manually overridden to make this
+# backward-compatible, in case a different version string has been used before.
+# Use a comma-separated list in case of multiple allowed versions, for example
+# valid_api_versions = v0,v1,v2
+# This is only enforced for account, container and object requests. The allowed
+# api versions are by default excluded from /info.
+
+# valid_api_versions = v1,v1.0
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/Debian.yml
new file mode 100644
index 00000000..39aea32d
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/Debian.yml
@@ -0,0 +1,27 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+controller_packages:
+ - swift
+ - swift-proxy
+ - python-swiftclient
+ - python-keystoneclient
+ - memcached
+
+compute_packages:
+ - xfsprogs
+ - rsync
+ - swift
+ - swift-account
+ - swift-container
+ - swift-object
+
+
+services: []
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/main.yml
new file mode 100644
index 00000000..540068da
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/main.yml
@@ -0,0 +1,15 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages_noarch: []
+
+services_noarch: []
+
+controller_packages_noarch: []
+compute_packages_noarch: []