aboutsummaryrefslogtreecommitdiffstats
path: root/deploy/adapters/ansible/roles
diff options
context:
space:
mode:
Diffstat (limited to 'deploy/adapters/ansible/roles')
-rw-r--r--deploy/adapters/ansible/roles/aodh/handlers/main.yml13
-rw-r--r--deploy/adapters/ansible/roles/aodh/tasks/aodh_config.yml14
-rw-r--r--deploy/adapters/ansible/roles/aodh/tasks/aodh_install.yml28
-rw-r--r--deploy/adapters/ansible/roles/aodh/tasks/main.yml23
-rw-r--r--deploy/adapters/ansible/roles/aodh/templates/aodh.conf.j256
-rw-r--r--deploy/adapters/ansible/roles/aodh/vars/Debian.yml22
-rw-r--r--deploy/adapters/ansible/roles/aodh/vars/RedHat.yml22
-rw-r--r--deploy/adapters/ansible/roles/aodh/vars/main.yml12
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/handlers/main.yml6
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_config.yml37
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_install.yml35
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/tasks/main.yml42
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.conf.j238
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/templates/nova.conf.j27
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/vars/Debian.yml10
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/vars/RedHat.yml10
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/handlers/main.yml12
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_config.yml71
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_install.yml35
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/tasks/main.yml51
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.conf.j252
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/templates/cinder.conf.j22
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/templates/glance-api.conf.j22
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/templates/glance-registry.conf.j22
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/templates/wsgi-ceilometer.conf.j225
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/vars/Debian.yml17
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/vars/RedHat.yml31
-rw-r--r--deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml11
-rw-r--r--deploy/adapters/ansible/roles/ceph-mon/templates/ceph-mon.service22
-rw-r--r--deploy/adapters/ansible/roles/ceph-mon/vars/Debian.yml4
-rw-r--r--deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_post.yml19
-rw-r--r--deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml7
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-openstack/vars/Debian.yml4
-rw-r--r--deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml4
-rw-r--r--deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf63
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf45
-rw-r--r--deploy/adapters/ansible/roles/common/templates/pip.conf2
-rw-r--r--deploy/adapters/ansible/roles/common/vars/Debian.yml3
-rw-r--r--deploy/adapters/ansible/roles/congress/files/congress.service19
-rw-r--r--deploy/adapters/ansible/roles/congress/handlers/main.yml12
-rw-r--r--deploy/adapters/ansible/roles/congress/tasks/congress_config.yml15
-rw-r--r--deploy/adapters/ansible/roles/congress/tasks/congress_db.yml28
-rw-r--r--deploy/adapters/ansible/roles/congress/tasks/congress_install.yml37
-rw-r--r--deploy/adapters/ansible/roles/congress/tasks/main.yml16
-rw-r--r--deploy/adapters/ansible/roles/congress/templates/api-paste.ini34
-rw-r--r--deploy/adapters/ansible/roles/congress/templates/congress.conf510
-rw-r--r--deploy/adapters/ansible/roles/congress/templates/policy.json6
-rw-r--r--deploy/adapters/ansible/roles/congress/vars/Debian.yml21
-rw-r--r--deploy/adapters/ansible/roles/congress/vars/main.yml12
-rw-r--r--deploy/adapters/ansible/roles/dashboard/tasks/main.yml16
-rw-r--r--deploy/adapters/ansible/roles/dashboard/templates/local_settings.py.j2326
-rwxr-xr-xdeploy/adapters/ansible/roles/dashboard/templates/openstack-dashboard.conf.j25
-rw-r--r--deploy/adapters/ansible/roles/dashboard/vars/Debian.yml1
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml43
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml11
-rw-r--r--deploy/adapters/ansible/roles/database/templates/data.j26
-rw-r--r--deploy/adapters/ansible/roles/database/vars/Debian.yml18
-rw-r--r--deploy/adapters/ansible/roles/database/vars/main.yml7
-rw-r--r--deploy/adapters/ansible/roles/ext-network/handlers/main.yml2
-rw-r--r--deploy/adapters/ansible/roles/ext-network/tasks/main.yml58
-rw-r--r--deploy/adapters/ansible/roles/ext-network/vars/Debian.yml18
-rw-r--r--deploy/adapters/ansible/roles/ext-network/vars/RedHat.yml17
-rw-r--r--deploy/adapters/ansible/roles/ext-network/vars/main.yml10
-rw-r--r--deploy/adapters/ansible/roles/glance/tasks/nfs.yml6
-rw-r--r--deploy/adapters/ansible/roles/glance/templates/glance-api.conf28
-rw-r--r--deploy/adapters/ansible/roles/glance/templates/glance-registry.conf12
-rw-r--r--deploy/adapters/ansible/roles/glance/vars/Debian.yml3
-rw-r--r--deploy/adapters/ansible/roles/glance/vars/RedHat.yml4
-rw-r--r--deploy/adapters/ansible/roles/ha/templates/haproxy.cfg23
-rw-r--r--deploy/adapters/ansible/roles/heat/tasks/heat_install.yml13
-rw-r--r--deploy/adapters/ansible/roles/heat/templates/heat.j239
-rw-r--r--deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml131
-rw-r--r--deploy/adapters/ansible/roles/keystone/tasks/keystone_create.yml93
-rw-r--r--deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml20
-rw-r--r--deploy/adapters/ansible/roles/keystone/tasks/main.yml7
-rw-r--r--deploy/adapters/ansible/roles/keystone/templates/admin-openrc-v2.sh15
-rw-r--r--deploy/adapters/ansible/roles/keystone/templates/admin-openrc.sh11
-rw-r--r--deploy/adapters/ansible/roles/keystone/templates/demo-openrc.sh10
-rw-r--r--deploy/adapters/ansible/roles/keystone/templates/keystone.conf51
-rw-r--r--deploy/adapters/ansible/roles/keystone/templates/wsgi-keystone.conf.j210
-rw-r--r--deploy/adapters/ansible/roles/keystone/vars/Debian.yml5
-rw-r--r--deploy/adapters/ansible/roles/keystone/vars/main.yml76
-rw-r--r--deploy/adapters/ansible/roles/moon/files/controllers.py1062
-rw-r--r--deploy/adapters/ansible/roles/moon/files/deb.conf11
-rw-r--r--deploy/adapters/ansible/roles/moon/files/get_deb_depends.py27
-rwxr-xr-xdeploy/adapters/ansible/roles/moon/handlers/main.yml12
-rw-r--r--deploy/adapters/ansible/roles/moon/tasks/main.yml11
-rw-r--r--deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml20
-rw-r--r--deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml238
-rw-r--r--deploy/adapters/ansible/roles/moon/tasks/moon.yml16
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/admin-openrc.sh15
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/api-paste.ini106
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/demo-openrc.sh13
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/keystone-paste.ini96
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/keystone.conf59
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/proxy-server.conf775
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/wsgi-keystone.conf.j246
-rw-r--r--deploy/adapters/ansible/roles/moon/vars/Debian.yml168
-rw-r--r--deploy/adapters/ansible/roles/moon/vars/main.yml172
-rw-r--r--deploy/adapters/ansible/roles/neutron-compute/handlers/main.yml3
-rw-r--r--deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml8
-rw-r--r--deploy/adapters/ansible/roles/neutron-compute/templates/neutron.conf105
-rw-r--r--deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml4
-rw-r--r--deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml4
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/tasks/main.yml6
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml4
-rw-r--r--deploy/adapters/ansible/roles/nova-compute/tasks/main.yml21
-rw-r--r--deploy/adapters/ansible/roles/nova-compute/templates/nova-compute.conf2
-rw-r--r--deploy/adapters/ansible/roles/nova-compute/templates/nova.conf105
-rw-r--r--deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml6
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/files/opendaylight.service21
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_00_download_packages.yml7
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml9
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_04_install_pip_packages.yml19
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml20
-rw-r--r--[-rwxr-xr-x]deploy/adapters/ansible/roles/odl_cluster/tasks/01_odl_controller.yml8
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml11
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml34
-rw-r--r--[-rwxr-xr-x]deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml4
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/moon-odl.yml61
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/templates/jetty.xml20
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/templates/ml2_conf.sh2
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/templates/moon-environment3
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/templates/settings.xml82
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml8
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/vars/main.yml5
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/tasks/main.yml4
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml75
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml76
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/vars/main.yml13
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml10
-rw-r--r--deploy/adapters/ansible/roles/secgroup/templates/neutron.j24
-rw-r--r--deploy/adapters/ansible/roles/secgroup/templates/nova.j22
-rw-r--r--deploy/adapters/ansible/roles/secgroup/vars/Debian.yml4
-rwxr-xr-xdeploy/adapters/ansible/roles/setup-network/files/setup_networks/net_init22
-rwxr-xr-xdeploy/adapters/ansible/roles/storage/files/storage8
-rw-r--r--deploy/adapters/ansible/roles/swift/tasks/main.yml11
-rw-r--r--deploy/adapters/ansible/roles/swift/tasks/swift-compute1.yml80
-rw-r--r--deploy/adapters/ansible/roles/swift/tasks/swift-controller1.yml34
-rw-r--r--deploy/adapters/ansible/roles/swift/tasks/swift-controller2.yml93
-rw-r--r--deploy/adapters/ansible/roles/swift/tasks/swift.yml79
-rw-r--r--deploy/adapters/ansible/roles/swift/templates/account-server.conf200
-rw-r--r--deploy/adapters/ansible/roles/swift/templates/container-server.conf229
-rw-r--r--deploy/adapters/ansible/roles/swift/templates/object-server.conf347
-rw-r--r--deploy/adapters/ansible/roles/swift/templates/proxy-server.conf764
-rw-r--r--deploy/adapters/ansible/roles/swift/templates/rsyncd.conf23
-rw-r--r--deploy/adapters/ansible/roles/swift/templates/swift.conf183
-rw-r--r--deploy/adapters/ansible/roles/swift/vars/Debian.yml27
-rw-r--r--deploy/adapters/ansible/roles/swift/vars/main.yml15
-rw-r--r--deploy/adapters/ansible/roles/tacker/templates/tacker.j2421
150 files changed, 8191 insertions, 626 deletions
diff --git a/deploy/adapters/ansible/roles/aodh/handlers/main.yml b/deploy/adapters/ansible/roles/aodh/handlers/main.yml
new file mode 100644
index 00000000..b3399e0c
--- /dev/null
+++ b/deploy/adapters/ansible/roles/aodh/handlers/main.yml
@@ -0,0 +1,13 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: restart aodh services
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: services | union(services_noarch)
+
diff --git a/deploy/adapters/ansible/roles/aodh/tasks/aodh_config.yml b/deploy/adapters/ansible/roles/aodh/tasks/aodh_config.yml
new file mode 100644
index 00000000..e60d5338
--- /dev/null
+++ b/deploy/adapters/ansible/roles/aodh/tasks/aodh_config.yml
@@ -0,0 +1,14 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: aodh db sync
+ shell: su -s /bin/sh -c "aodh-dbsync" aodh
+ notify:
+ - restart aodh services
+
diff --git a/deploy/adapters/ansible/roles/aodh/tasks/aodh_install.yml b/deploy/adapters/ansible/roles/aodh/tasks/aodh_install.yml
new file mode 100644
index 00000000..d8a82270
--- /dev/null
+++ b/deploy/adapters/ansible/roles/aodh/tasks/aodh_install.yml
@@ -0,0 +1,28 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: install aodh packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: packages | union(packages_noarch)
+
+- name: update aodh conf
+ template: src={{ item }} dest=/etc/aodh/aodh.conf backup=yes
+ with_items:
+ - aodh.conf.j2
+ notify:
+ - restart aodh services
+
+- name: write services to monitor list
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
+ with_items: services | union(services_noarch)
+
+- name: remove default sqlite db
+ shell: rm /var/lib/aodh/aodh.sqlite || touch aodh.sqllite.db.removed
diff --git a/deploy/adapters/ansible/roles/aodh/tasks/main.yml b/deploy/adapters/ansible/roles/aodh/tasks/main.yml
new file mode 100644
index 00000000..9b61915f
--- /dev/null
+++ b/deploy/adapters/ansible/roles/aodh/tasks/main.yml
@@ -0,0 +1,23 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include: aodh_install.yml
+ tags:
+ - install
+ - aodh_install
+ - aodh
+
+- include: aodh_config.yml
+ when: inventory_hostname == groups['controller'][0]
+ tags:
+ - config
+ - aodh_config
+ - aodh
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/aodh/templates/aodh.conf.j2 b/deploy/adapters/ansible/roles/aodh/templates/aodh.conf.j2
new file mode 100644
index 00000000..d9eb0599
--- /dev/null
+++ b/deploy/adapters/ansible/roles/aodh/templates/aodh.conf.j2
@@ -0,0 +1,56 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
+[DEFAULT]
+transport_url = rabbit://{{ RABBIT_USER }}:{{ RABBIT_PASS }}@{{ rabbit_host }}
+rpc_backend = rabbit
+
+bind_host = {{ internal_ip }}
+bind_port = 8042
+auth_strategy = keystone
+debug = True
+
+[api]
+host = {{ internal_ip }}
+
+[database]
+connection = mysql://aodh:{{ AODH_DBPASS }}@{{ db_host }}/aodh
+
+[keystone_authtoken]
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = aodh
+password = {{ AODH_PASS }}
+
+identity_uri = http://{{ internal_vip.ip }}:35357
+auth_plugin = password
+project_domain_id = default
+user_domain_id = default
+token_cache_time = 300
+revocation_cache_time = 60
+
+[oslo_messaging_rabbit]
+rabbit_hosts = {{ internal_vip.ip }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+[service_credentials]
+auth_type = password
+auth_url = http://{{ internal_vip.ip }}:5000/v3
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = aodh
+password = {{ AODH_PASS }}
+interface = internalURL
+region_name = RegionOne
+
+endpoint_type = internalURL
diff --git a/deploy/adapters/ansible/roles/aodh/vars/Debian.yml b/deploy/adapters/ansible/roles/aodh/vars/Debian.yml
new file mode 100644
index 00000000..9bf4ad7a
--- /dev/null
+++ b/deploy/adapters/ansible/roles/aodh/vars/Debian.yml
@@ -0,0 +1,22 @@
+#############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#############################################################################
+---
+packages:
+ - aodh-api
+ - aodh-evaluator
+ - aodh-notifier
+ - aodh-listener
+ - aodh-expirer
+ - python-aodhclient
+
+services:
+ - aodh-api
+ - aodh-notifier
+ - aodh-evaluator
+ - aodh-listener
diff --git a/deploy/adapters/ansible/roles/aodh/vars/RedHat.yml b/deploy/adapters/ansible/roles/aodh/vars/RedHat.yml
new file mode 100644
index 00000000..3d25bd6c
--- /dev/null
+++ b/deploy/adapters/ansible/roles/aodh/vars/RedHat.yml
@@ -0,0 +1,22 @@
+#############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#############################################################################
+---
+packages:
+ - openstack-aodh-api
+ - openstack-aodh-evaluator
+ - openstack-aodh-notifier
+ - openstack-aodh-listener
+ - openstack-aodh-expirer
+ - python-aodhclient
+
+services:
+ - openstack-aodh-api
+ - openstack-aodh-notifier
+ - openstack-aodh-evaluator
+ - openstack-aodh-listener
diff --git a/deploy/adapters/ansible/roles/aodh/vars/main.yml b/deploy/adapters/ansible/roles/aodh/vars/main.yml
new file mode 100644
index 00000000..b17f6ed0
--- /dev/null
+++ b/deploy/adapters/ansible/roles/aodh/vars/main.yml
@@ -0,0 +1,12 @@
+##############################################################################
+## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+##
+## All rights reserved. This program and the accompanying materials
+## are made available under the terms of the Apache License, Version 2.0
+## which accompanies this distribution, and is available at
+## http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+---
+packages_noarch: []
+
+services_noarch: []
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/handlers/main.yml b/deploy/adapters/ansible/roles/ceilometer_compute/handlers/main.yml
index c973d7df..10b7c683 100644
--- a/deploy/adapters/ansible/roles/ceilometer_compute/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/handlers/main.yml
@@ -7,6 +7,10 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- name: restart ceilometer relation service
+- name: restart ceilometer service
service: name={{ item }} state=restarted enabled=yes
with_items: ceilometer_services
+
+- name: restart nova service
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: nova_services
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_config.yml b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_config.yml
new file mode 100644
index 00000000..b429d65b
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_config.yml
@@ -0,0 +1,37 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: copy configs
+ template:
+ src: "{{ item }}"
+ dest: /opt/os_templates
+ with_items:
+ - ceilometer.conf.j2
+ - nova.conf.j2
+
+- name: update ceilometer configs
+ shell: crudini --merge {{ item.dest }} < /opt/os_templates/{{ item.src }}
+ with_items:
+ - src: nova.conf.j2
+ dest: /etc/nova/nova.conf
+ notify: restart nova service
+
+- name: delete config
+ file:
+ path: /opt/os_templates/nova.conf.j2
+ state: absent
+
+- name: write services to monitor list
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
+ with_items: ceilometer_services
+
+- meta: flush_handlers
+
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_install.yml b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_install.yml
new file mode 100644
index 00000000..0f2ba3d2
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/ceilometer_install.yml
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: disable auto start
+ copy:
+ content: "#!/bin/sh\nexit 101"
+ dest: "/usr/sbin/policy-rc.d"
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
+- name: install ceilometer packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: ceilometer_packages | union(packages_noarch)
+
+- name: enable auto start
+ file:
+ path=/usr/sbin/policy-rc.d
+ state=absent
+ when: ansible_os_family == "Debian"
+
+- name: update ceilometer configs
+ template:
+ src: ceilometer.conf.j2
+ dest: /etc/ceilometer/ceilometer.conf
+ backup: yes
+ notify: restart ceilometer service
+
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/tasks/main.yml b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/main.yml
index 864ea97a..1e3c04d7 100644
--- a/deploy/adapters/ansible/roles/ceilometer_compute/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/main.yml
@@ -7,38 +7,16 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- include_vars: "{{ ansible_os_family }}.yml"
+- include: ceilometer_install.yml
+ tags:
+ - install
+ - ceilometer_install
+ - ceilometer
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install ceilometer packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: ceilometer_packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: copy ceilometer configs
- template: src={{ item.src}} dest=/opt/os_templates
- with_items: "{{ ceilometer_configs_templates }}"
-
-- name: update ceilometer configs
- shell: crudini --merge {{ item.1 }} < /opt/os_templates/{{ item.0.src }}
- with_subelements:
- - ceilometer_configs_templates
- - dest
- notify: restart ceilometer relation service
-
-- name: write services to monitor list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: ceilometer_services
+- include: ceilometer_config.yml
+ tags:
+ - config
+ - ceilometer_config
+ - ceilometer
- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.conf.j2 b/deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.conf.j2
new file mode 100644
index 00000000..bffd6068
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.conf.j2
@@ -0,0 +1,38 @@
+[DEFAULT]
+verbose = True
+rpc_backend = rabbit
+auth_strategy = keystone
+
+[publisher]
+metering_secret = {{ metering_secret }}
+
+[keystone_authtoken]
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = ceilometer
+password = {{ CEILOMETER_PASS }}
+
+auth_plugin = password
+project_domain_id = default
+user_domain_id = default
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+[service_credentials]
+auth_url = http://{{ internal_vip.ip }}:5000
+project_domain_id = default
+user_domain_id = default
+auth_type = password
+username = ceilometer
+project_name = service
+password = {{ CEILOMETER_PASS }}
+interface = internalURL
+region_name = RegionOne
+
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/templates/nova.conf.j2 b/deploy/adapters/ansible/roles/ceilometer_compute/templates/nova.conf.j2
new file mode 100644
index 00000000..68ffdc0a
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/templates/nova.conf.j2
@@ -0,0 +1,7 @@
+[DEFAULT]
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+
+[oslo_messaging_notifications]
+driver = messagingv2
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/vars/Debian.yml b/deploy/adapters/ansible/roles/ceilometer_compute/vars/Debian.yml
index 550d14f5..1bf3956f 100644
--- a/deploy/adapters/ansible/roles/ceilometer_compute/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/vars/Debian.yml
@@ -12,12 +12,6 @@ ceilometer_packages:
ceilometer_services:
- ceilometer-agent-compute
- - nova-compute
-ceilometer_configs_templates:
- - src: ceilometer.j2
- dest:
- - /etc/ceilometer/ceilometer.conf
- - src: nova.j2
- dest:
- - /etc/nova/nova.conf
+nova_services:
+ - nova-compute
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/vars/RedHat.yml b/deploy/adapters/ansible/roles/ceilometer_compute/vars/RedHat.yml
index 5a9128cd..c5778a49 100644
--- a/deploy/adapters/ansible/roles/ceilometer_compute/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/vars/RedHat.yml
@@ -14,12 +14,4 @@ ceilometer_packages:
ceilometer_services:
- openstack-ceilometer-compute
- - openstack-nova-compute
-
-ceilometer_configs_templates:
- - src: ceilometer.j2
- dest:
- - /etc/ceilometer/ceilometer.conf
- - src: nova.j2
- dest:
- - /etc/nova/nova.conf
+ - openstack-nova-compute
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/handlers/main.yml b/deploy/adapters/ansible/roles/ceilometer_controller/handlers/main.yml
index c973d7df..a3bfb85d 100644
--- a/deploy/adapters/ansible/roles/ceilometer_controller/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/handlers/main.yml
@@ -7,6 +7,16 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- name: restart ceilometer relation service
+- name: restart ceilometer service
service: name={{ item }} state=restarted enabled=yes
with_items: ceilometer_services
+
+- name: restart glance_cinder service
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: glance_cinder_services
+
+- name: reload apache server
+ service: name=apache2 state=reloaded
+
+- name: restart apache server
+ service: name=apache2 state=restarted enabled=yes
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_config.yml b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_config.yml
new file mode 100644
index 00000000..7f5209c1
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_config.yml
@@ -0,0 +1,71 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: update apache2 configs
+ template:
+ src: wsgi-ceilometer.conf.j2
+ dest: /etc/apache2/sites-available/ceilometer.conf
+ notify: reload apache server
+
+- name: enable ceilometer server
+ file:
+ src: /etc/apache2/sites-available/ceilometer.conf
+ dest: /etc/apache2/sites-enabled/ceilometer.conf
+ state: "link"
+ when: ansible_os_family == 'Debian'
+ notify: reload apache server
+
+- name: assure listen port exist
+ shell: echo "Listen {{ internal_ip }}:8777" >> /etc/apache2/ports.conf
+ notify:
+ - restart apache server
+
+- name: copy glance & cinder configs
+ template:
+ src: "{{ item }}"
+ dest: /opt/os_templates
+ with_items:
+ - cinder.conf.j2
+ - glance-api.conf.j2
+ - glance-registry.conf.j2
+
+- name: update configs
+ shell: crudini --merge {{ item.dest }} < /opt/os_templates/{{ item.src }}
+ with_items:
+ - src: cinder.conf.j2
+ dest: /etc/cinder/cinder.conf
+ - src: glance-api.conf.j2
+ dest: /etc/glance/glance-api.conf
+ - src: glance-registry.conf.j2
+ dest: /etc/glance/glance-registry.conf
+ notify: restart glance_cinder service
+
+- name: delete configs
+ file:
+ path: /opt/os_templates/{{ item }}
+ state: absent
+ with_items:
+ - cinder.conf.j2
+ - glance-api.conf.j2
+ - glance-registry.conf.j2
+
+- name: change meter polling interval to 300s
+ replace:
+ dest: /etc/ceilometer/pipeline.yaml
+ regexp: 'interval: .+'
+ replace: 'interval: 300'
+ notify: restart ceilometer service
+
+- name: write services to monitor list
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
+ with_items: ceilometer_services
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_install.yml b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_install.yml
new file mode 100644
index 00000000..0f2ba3d2
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/ceilometer_install.yml
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: disable auto start
+ copy:
+ content: "#!/bin/sh\nexit 101"
+ dest: "/usr/sbin/policy-rc.d"
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
+- name: install ceilometer packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: ceilometer_packages | union(packages_noarch)
+
+- name: enable auto start
+ file:
+ path=/usr/sbin/policy-rc.d
+ state=absent
+ when: ansible_os_family == "Debian"
+
+- name: update ceilometer configs
+ template:
+ src: ceilometer.conf.j2
+ dest: /etc/ceilometer/ceilometer.conf
+ backup: yes
+ notify: restart ceilometer service
+
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/tasks/main.yml b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/main.yml
index 6b1882cc..1e3c04d7 100644
--- a/deploy/adapters/ansible/roles/ceilometer_controller/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/main.yml
@@ -7,45 +7,16 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: disable auto start
- copy:
- content: "#!/bin/sh\nexit 101"
- dest: "/usr/sbin/policy-rc.d"
- mode: 0755
- when: ansible_os_family == "Debian"
-
-- name: install ceilometer packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: ceilometer_packages | union(packages_noarch)
-
-- name: enable auto start
- file:
- path=/usr/sbin/policy-rc.d
- state=absent
- when: ansible_os_family == "Debian"
-
-- name: copy ceilometer configs
- template: src={{ item.src}} dest=/opt/os_templates
- with_items: "{{ ceilometer_configs_templates }}"
-
-- name: update ceilometer configs
- shell: crudini --merge {{ item.1 }} < /opt/os_templates/{{ item.0.src }}
- with_subelements:
- - ceilometer_configs_templates
- - dest
- notify: restart ceilometer relation service
-
-- name: change meter polling interval to 300s
- replace:
- dest: /etc/ceilometer/pipeline.yaml
- regexp: 'interval: .+'
- replace: 'interval: 300'
- notify: restart ceilometer relation service
-
-- name: write services to monitor list
- lineinfile: dest=/opt/service create=yes line='{{ item }}'
- with_items: ceilometer_services
+- include: ceilometer_install.yml
+ tags:
+ - install
+ - ceilometer_install
+ - ceilometer
+
+- include: ceilometer_config.yml
+ tags:
+ - config
+ - ceilometer_config
+ - ceilometer
- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.conf.j2 b/deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.conf.j2
new file mode 100644
index 00000000..50271732
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.conf.j2
@@ -0,0 +1,52 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
+[DEFAULT]
+rpc_backend = rabbit
+auth_strategy = keystone
+verbose = True
+
+[api]
+host = {{ internal_ip }}
+
+[database]
+connection = mongodb://ceilometer:{{ CEILOMETER_DBPASS }}@{{ internal_vip.ip }}:27017/ceilometer
+
+[keystone_authtoken]
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = ceilometer
+password = {{ CEILOMETER_PASS }}
+
+identity_uri = http://{{ internal_vip.ip }}:35357
+auth_plugin = password
+project_domain_id = default
+user_domain_id = default
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+[publisher]
+metering_secret = {{ metering_secret }}
+
+[service_credentials]
+auth_type = password
+auth_url = http://{{ internal_vip.ip }}:5000/v3
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = ceilometer
+password = {{ CEILOMETER_PASS }}
+interface = internalURL
+region_name = RegionOne
+
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/templates/cinder.conf.j2 b/deploy/adapters/ansible/roles/ceilometer_controller/templates/cinder.conf.j2
new file mode 100644
index 00000000..e2d19cc3
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/templates/cinder.conf.j2
@@ -0,0 +1,2 @@
+[oslo_messaging_notifications]
+driver = messagingv2
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance-api.conf.j2 b/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance-api.conf.j2
new file mode 100644
index 00000000..e2d19cc3
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance-api.conf.j2
@@ -0,0 +1,2 @@
+[oslo_messaging_notifications]
+driver = messagingv2
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance-registry.conf.j2 b/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance-registry.conf.j2
new file mode 100644
index 00000000..e2d19cc3
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance-registry.conf.j2
@@ -0,0 +1,2 @@
+[oslo_messaging_notifications]
+driver = messagingv2
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/templates/wsgi-ceilometer.conf.j2 b/deploy/adapters/ansible/roles/ceilometer_controller/templates/wsgi-ceilometer.conf.j2
new file mode 100644
index 00000000..9909f800
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/templates/wsgi-ceilometer.conf.j2
@@ -0,0 +1,25 @@
+{% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
+{% if work_threads > 10 %}
+{% set work_threads = 10 %}
+{% endif %}
+
+<VirtualHost {{ internal_ip }}:8777>
+ WSGIDaemonProcess ceilometer-api processes=4 threads={{ work_threads }} user=ceilometer group=ceilometer display-name=%{GROUP}
+ WSGIProcessGroup ceilometer-api
+ WSGIScriptAlias / /usr/lib/python2.7/dist-packages/ceilometer/api/app.wsgi
+ WSGIApplicationGroup %{GLOBAL}
+ ErrorLog /var/log/apache2/ceilometer_error.log
+ CustomLog /var/log/apache2/ceilometer_access.log combined
+
+ <Directory /usr/lib/python2.7/dist-packages/ceilometer/api/>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
+
+WSGISocketPrefix /var/run/apache2
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/vars/Debian.yml b/deploy/adapters/ansible/roles/ceilometer_controller/vars/Debian.yml
index 55f5aa19..de860533 100644
--- a/deploy/adapters/ansible/roles/ceilometer_controller/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/vars/Debian.yml
@@ -17,17 +17,10 @@ ceilometer_packages:
ceilometer_services:
- ceilometer-agent-central
- ceilometer-agent-notification
- - ceilometer-api
- ceilometer-collector
-ceilometer_configs_templates:
- - src: ceilometer.j2
- dest:
- - /etc/ceilometer/ceilometer.conf
- - src: cinder.j2
- dest:
- - /etc/cinder/cinder.conf
- - src: glance.j2
- dest:
- - /etc/glance/glance-api.conf
- - /etc/glance/glance-registry.conf
+glance_cinder_services:
+ - glance-registry
+ - glance-api
+ - cinder-api
+ - cinder-scheduler
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/vars/RedHat.yml b/deploy/adapters/ansible/roles/ceilometer_controller/vars/RedHat.yml
index 86f464a5..de860533 100644
--- a/deploy/adapters/ansible/roles/ceilometer_controller/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/vars/RedHat.yml
@@ -8,26 +8,19 @@
##############################################################################
---
ceilometer_packages:
- - openstack-ceilometer-api
- - openstack-ceilometer-collector
- - openstack-ceilometer-central
- - openstack-ceilometer-notification
+ - ceilometer-api
+ - ceilometer-collector
+ - ceilometer-agent-central
+ - ceilometer-agent-notification
- python-ceilometerclient
ceilometer_services:
- - openstack-ceilometer-central
- - openstack-ceilometer-notification
- - openstack-ceilometer-api
- - openstack-ceilometer-collector
+ - ceilometer-agent-central
+ - ceilometer-agent-notification
+ - ceilometer-collector
-ceilometer_configs_templates:
- - src: ceilometer.j2
- dest:
- - /etc/ceilometer/ceilometer.conf
- - src: cinder.j2
- dest:
- - /etc/cinder/cinder.conf
- - src: glance.j2
- dest:
- - /etc/glance/glance-api.conf
- - /etc/glance/glance-registry.conf
+glance_cinder_services:
+ - glance-registry
+ - glance-api
+ - cinder-api
+ - cinder-scheduler
diff --git a/deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml b/deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml
index 658d109e..1d14c2d2 100644
--- a/deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml
+++ b/deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml
@@ -15,6 +15,17 @@
- name: Populate the monitor daemon
shell: "ceph-mon --mkfs -i {{ inventory_hostname }} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring"
+- name: Change ceph/mon dir owner to ceph
+ shell: "chown -R ceph:ceph /var/lib/ceph/mon"
+ when: ansible_os_family == "Debian"
+
+- name: copy templates
+ template:
+ src: ceph-mon.service
+ dest: /lib/systemd/system/ceph-mon.service
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
- name: Touch the done and auto start file
file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}/{{ item }}" state="touch"
with_items:
diff --git a/deploy/adapters/ansible/roles/ceph-mon/templates/ceph-mon.service b/deploy/adapters/ansible/roles/ceph-mon/templates/ceph-mon.service
new file mode 100644
index 00000000..5a3cf753
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-mon/templates/ceph-mon.service
@@ -0,0 +1,22 @@
+[Unit]
+Description=Ceph cluster monitor daemon
+Documentation=man:ceph-mon
+
+After=network-online.target local-fs.target ceph-create-keys.service
+Wants=network-online.target local-fs.target ceph-create-keys.service
+
+PartOf=ceph.target
+
+[Service]
+LimitNOFILE=1048576
+LimitNPROC=1048576
+EnvironmentFile=-/etc/default/ceph
+Environment=CLUSTER=ceph
+ExecStart=/usr/bin/ceph-mon -f --cluster ${CLUSTER} --id {{ inventory_hostname }} --setuser ceph --setgroup ceph
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=on-failure
+RestartSec=30
+TasksMax=infinity
+
+[Install]
+WantedBy=multi-user.target
diff --git a/deploy/adapters/ansible/roles/ceph-mon/vars/Debian.yml b/deploy/adapters/ansible/roles/ceph-mon/vars/Debian.yml
index 16b7989b..a792acad 100644
--- a/deploy/adapters/ansible/roles/ceph-mon/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/ceph-mon/vars/Debian.yml
@@ -8,5 +8,5 @@
##############################################################################
---
-ceph_start_script: "start ceph-mon id={{ inventory_hostname }}"
-ceph_start_type: "upstart"
+ceph_start_script: "service ceph-mon start"
+ceph_start_type: "systemd"
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_post.yml b/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_post.yml
new file mode 100644
index 00000000..2097ca57
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_post.yml
@@ -0,0 +1,19 @@
+##############################################################################
+## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+##
+## All rights reserved. This program and the accompanying materials
+## are made available under the terms of the Apache License, Version 2.0
+## which accompanies this distribution, and is available at
+## http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+---
+- name: get mount info
+ command: mount
+ register: mount_info
+
+- name: try unmount image nfs directory
+ shell: |
+ umount /var/lib/glance/images
+ sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
+ when: mount_info.stdout.find('images') != -1
+
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml b/deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml
index 8c9734d7..06c3acb6 100644
--- a/deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml
@@ -11,6 +11,7 @@
- ceph_deploy
- ceph_openstack_pre
- ceph_openstack_conf
+ - ceph_openstack_post
- ceph_openstack
- include: ceph_openstack_pre.yml
@@ -24,3 +25,9 @@
- ceph_deploy
- ceph_openstack_conf
- ceph_openstack
+
+- include: ceph_openstack_post.yml
+ tags:
+ - ceph_deploy
+ - ceph_openstack_post
+ - ceph_openstack
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/vars/Debian.yml b/deploy/adapters/ansible/roles/ceph-openstack/vars/Debian.yml
index 1da42323..db10bd14 100755
--- a/deploy/adapters/ansible/roles/ceph-openstack/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/ceph-openstack/vars/Debian.yml
@@ -11,9 +11,9 @@ packages:
- ceph-deploy
- python-flask
- libgoogle-perftools4
- - libleveldb1
+ - libleveldb1v5
- liblttng-ust0
- - libsnappy1
+ - libsnappy1v5
- librbd1
- librados2
- python-ceph
diff --git a/deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml b/deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml
index ff99d68a..363e5e6d 100644
--- a/deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml
+++ b/deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml
@@ -22,6 +22,10 @@
- name: prepare osd disk
shell: ceph-disk prepare --fs-type xfs /var/local/osd
+- name: change local/osd dir owner to ceph
+ shell: chown -R ceph:ceph /var/local/osd
+ when: ansible_os_family == "Debian"
+
- name: activate osd node
shell: ceph-disk activate /var/local/osd
diff --git a/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf b/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf
index 66d9948a..d428a078 100644
--- a/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf
+++ b/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf
@@ -1,3 +1,9 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
[DEFAULT]
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
@@ -9,17 +15,13 @@ debug = {{ DEBUG }}
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
-notification_driver=cinder.openstack.common.notifier.rpc_notifier
+notification_driver = cinder.openstack.common.notifier.rpc_notifier
volumes_dir = /var/lib/cinder/volumes
-
-log_file=/var/log/cinder/cinder.log
+transport_url = rabbit://{{ RABBIT_USER }}:{{ RABBIT_PASS }}@{{ rabbit_host }}
+log_file = /var/log/cinder/cinder.log
control_exchange = cinder
rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_port = 5672
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
my_ip = {{ storage_controller_host }}
glance_host = {{ internal_vip.ip }}
@@ -28,8 +30,8 @@ api_rate_limit = False
storage_availability_zone = nova
quota_volumes = 10
-quota_gigabytes=1000
-quota_driver=cinder.quota.DbQuotaDriver
+quota_gigabytes = 1000
+quota_driver = cinder.quota.DbQuotaDriver
osapi_volume_listen = {{ storage_controller_host }}
osapi_volume_listen_port = 8776
@@ -38,29 +40,46 @@ db_backend = sqlalchemy
volume_name_template = volume-%s
snapshot_name_template = snapshot-%s
-max_gigabytes=10000
+max_gigabytes = 10000
-volume_clear=zero
-volume_clear_size=10
+volume_clear = zero
+volume_clear_size = 10
-iscsi_ip_address={{ storage_controller_host }}
-iscsi_port=3260
-iscsi_helper=tgtadm
+iscsi_ip_address = {{ storage_controller_host }}
+iscsi_port = 3260
+iscsi_helper = tgtadm
-volumes_dir=/var/lib/cinder/volumes
+volumes_dir = /var/lib/cinder/volumes
+volume_driver = cinder.volume.drivers.lvm.LVMISCSIDriver
-volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
+[database]
+connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
+idle_timeout = 30
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v3
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = cinder
+password = {{ CINDER_PASS }}
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = cinder
admin_password = {{ CINDER_PASS }}
-[database]
-connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
-idle_timeout = 30
-
[keymgr]
encryption_auth_url=http://{{ internal_vip.ip }}:5000/v3
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ rabbit_host }}
+rabbit_port = 5672
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+[oslo_concurrency]
+lock_path = /var/lib/cinder/tmp
diff --git a/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf b/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf
index 0660cba9..e7946b5c 100644
--- a/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf
+++ b/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf
@@ -10,25 +10,22 @@ state_path = /var/lib/cinder
lock_path = /var/lib/cinder/tmp
notification_driver=cinder.openstack.common.notifier.rpc_notifier
volumes_dir = /var/lib/cinder/volumes
-
+transport_url = rabbit://{{ RABBIT_USER }}:{{ RABBIT_PASS }}@{{ rabbit_host }}
log_file=/var/log/cinder/cinder.log
control_exchange = cinder
rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_port = 5672
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
my_ip = {{ storage_controller_host }}
glance_host = {{ internal_vip.ip }}
glance_port = 9292
+glance_api_servers = http://{{ internal_vip.ip }}:9292
api_rate_limit = False
storage_availability_zone = nova
quota_volumes = 10
-quota_gigabytes=1000
-quota_driver=cinder.quota.DbQuotaDriver
+quota_gigabytes = 1000
+quota_driver = cinder.quota.DbQuotaDriver
osapi_volume_listen = {{ storage_controller_host }}
osapi_volume_listen_port = 8776
@@ -37,26 +34,42 @@ db_backend = sqlalchemy
volume_name_template = volume-%s
snapshot_name_template = snapshot-%s
-max_gigabytes=10000
+max_gigabytes = 10000
-volume_clear=zero
-volume_clear_size=10
+volume_clear = zero
+volume_clear_size = 10
-iscsi_ip_address={{ storage_controller_host }}
+iscsi_ip_address = {{ storage_controller_host }}
iscsi_port=3260
iscsi_helper=tgtadm
volumes_dir=/var/lib/cinder/volumes
-
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
+[database]
+connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
+idle_timeout = 30
+
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v3
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = cinder
+password = {{ CINDER_PASS }}
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = cinder
admin_password = {{ CINDER_PASS }}
-[database]
-connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
-idle_timeout = 30
+[lvm]
+volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
+volume_group = cinder-volumes
+iscsi_protocol = iscsi
+iscsi_helper = tgtadm
+
+[oslo_concurrency]
+lock_path = /var/lib/cinder/tmp
diff --git a/deploy/adapters/ansible/roles/common/templates/pip.conf b/deploy/adapters/ansible/roles/common/templates/pip.conf
index 7bb3e43e..59981258 100644
--- a/deploy/adapters/ansible/roles/common/templates/pip.conf
+++ b/deploy/adapters/ansible/roles/common/templates/pip.conf
@@ -1,5 +1,5 @@
[global]
-find-links = http://{{ COMPASS_SERVER.stdout_lines[0] }}/pip
+find-links = http://{{ COMPASS_SERVER.stdout_lines[0] }}/pip-openstack
no-index = true
[install]
trusted-host={{ COMPASS_SERVER.stdout_lines[0] }}
diff --git a/deploy/adapters/ansible/roles/common/vars/Debian.yml b/deploy/adapters/ansible/roles/common/vars/Debian.yml
index 1d7972eb..46e0374f 100644
--- a/deploy/adapters/ansible/roles/common/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/common/vars/Debian.yml
@@ -10,11 +10,12 @@
packages:
- ubuntu-cloud-keyring
- python-dev
- - openvswitch-datapath-dkms
- openvswitch-switch
+ - openvswitch-switch-dpdk
- python-memcache
- python-iniparse
- python-lxml
+ - python-crypto
#- python-d* #TODO, need remove
pip_packages:
diff --git a/deploy/adapters/ansible/roles/congress/files/congress.service b/deploy/adapters/ansible/roles/congress/files/congress.service
new file mode 100644
index 00000000..4ec26c8c
--- /dev/null
+++ b/deploy/adapters/ansible/roles/congress/files/congress.service
@@ -0,0 +1,19 @@
+[Unit]
+Description=OpenStack Congress server
+After=
+
+[Service]
+User=root
+Group=root
+Type=simple
+WorkingDirectory=/var/lib/congress
+PermissionsStartOnly=true
+ExecStartPre=/bin/mkdir -p /var/lock/congress /var/log/congress /var/lib/congress
+ExecStartPre=/usr/bin/touch /var/log/congress/congress.log
+ExecStart=/usr/local/bin/congress-server --config-file /etc/congress/congress.conf
+Restart=on-failure
+LimitNOFILE=65535
+TimeoutStopSec=15
+
+[Install]
+WantedBy=multi-user.target
diff --git a/deploy/adapters/ansible/roles/congress/handlers/main.yml b/deploy/adapters/ansible/roles/congress/handlers/main.yml
new file mode 100644
index 00000000..b4ea8e90
--- /dev/null
+++ b/deploy/adapters/ansible/roles/congress/handlers/main.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: restart congress services
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: "{{ services | union(services_noarch) }}"
diff --git a/deploy/adapters/ansible/roles/congress/tasks/congress_config.yml b/deploy/adapters/ansible/roles/congress/tasks/congress_config.yml
new file mode 100644
index 00000000..f40d4c22
--- /dev/null
+++ b/deploy/adapters/ansible/roles/congress/tasks/congress_config.yml
@@ -0,0 +1,15 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: congress db sync
+ shell: /usr/local/bin/congress-db-manage --config-file /etc/congress/congress.conf upgrade head
+ when: inventory_hostname == haproxy_hosts.keys()[0]
+
+- name: start congress service
+ shell: systemctl start congress.service
diff --git a/deploy/adapters/ansible/roles/congress/tasks/congress_db.yml b/deploy/adapters/ansible/roles/congress/tasks/congress_db.yml
new file mode 100644
index 00000000..1883509b
--- /dev/null
+++ b/deploy/adapters/ansible/roles/congress/tasks/congress_db.yml
@@ -0,0 +1,28 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: create congress db
+ mysql_db:
+ login_unix_socket: /var/run/mysqld/mysqld.sock
+ name: "{{ item.db }}"
+ state: present
+ with_items: "{{ credentials }}"
+
+- name: create congress db user
+ mysql_user:
+ login_unix_socket: /var/run/mysqld/mysqld.sock
+ name: "{{ item[0].user }}"
+ password: "{{ item[0].password }}"
+ priv: "*.*:ALL,GRANT"
+ host: "{{ item[1] }}"
+ state: present
+ with_nested:
+ - "{{ credentials }}"
+ - ['%', 'localhost']
+
diff --git a/deploy/adapters/ansible/roles/congress/tasks/congress_install.yml b/deploy/adapters/ansible/roles/congress/tasks/congress_install.yml
new file mode 100644
index 00000000..1e620783
--- /dev/null
+++ b/deploy/adapters/ansible/roles/congress/tasks/congress_install.yml
@@ -0,0 +1,37 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: install congress packages
+ pip: name={{ item }} state=present
+ with_items: "{{ packages }}"
+
+- name: create congress etc directory
+ file: path=/etc/congress state=directory
+
+- name: update congress conf
+ template: src={{ item }} dest=/etc/congress/{{ item }}
+ backup=yes
+ with_items:
+ - congress.conf
+ - api-paste.ini
+ - policy.json
+
+- name: create congress service
+ copy: src=congress.service dest=/lib/systemd/system/
+
+- name: create congress service work dir
+ file: path=/var/lib/congress state=directory
+
+- name: link the congress service
+ file:
+ src: /lib/systemd/system/congress.service
+ dest: /etc/systemd/system/multi-user.target.wants/congress.service
+ state: link
diff --git a/deploy/adapters/ansible/roles/congress/tasks/main.yml b/deploy/adapters/ansible/roles/congress/tasks/main.yml
new file mode 100644
index 00000000..f8056d15
--- /dev/null
+++ b/deploy/adapters/ansible/roles/congress/tasks/main.yml
@@ -0,0 +1,16 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include: congress_install.yml
+
+- include: congress_db.yml
+ when:
+ - inventory_hostname == haproxy_hosts.keys()[0]
+
+- include: congress_config.yml
diff --git a/deploy/adapters/ansible/roles/congress/templates/api-paste.ini b/deploy/adapters/ansible/roles/congress/templates/api-paste.ini
new file mode 100644
index 00000000..39be570b
--- /dev/null
+++ b/deploy/adapters/ansible/roles/congress/templates/api-paste.ini
@@ -0,0 +1,34 @@
+[composite:congress]
+use = egg:Paste#urlmap
+/: congressversions
+/v1: congress_api_v1
+
+[pipeline:congressversions]
+pipeline = cors catch_errors congressversionapp
+
+[app:congressversionapp]
+paste.app_factory = congress.api.versions:Versions.factory
+
+[composite:congress_api_v1]
+use = call:congress.auth:pipeline_factory
+keystone = cors request_id catch_errors authtoken keystonecontext congress_api
+noauth = cors request_id catch_errors congress_api
+
+[app:congress_api]
+paste.app_factory = congress.service:congress_app_factory
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware:RequestId.factory
+
+[filter:catch_errors]
+paste.filter_factory = oslo_middleware:CatchErrors.factory
+
+[filter:keystonecontext]
+paste.filter_factory = congress.auth:CongressKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+[filter:cors]
+paste.filter_factory = oslo_middleware.cors:filter_factory
+oslo_config_project = congress
diff --git a/deploy/adapters/ansible/roles/congress/templates/congress.conf b/deploy/adapters/ansible/roles/congress/templates/congress.conf
new file mode 100644
index 00000000..0305b418
--- /dev/null
+++ b/deploy/adapters/ansible/roles/congress/templates/congress.conf
@@ -0,0 +1,510 @@
+{% set memcached_servers = [] %}
+{% set rabbitmq_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% set _ = rabbitmq_servers.append('%s:5672'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+{% set rabbitmq_servers = rabbitmq_servers|join(',') %}
+[DEFAULT]
+
+#
+# From congress
+#
+# The host IP to bind to (string tmq_serversvalue)
+bind_host = {{ internal_ip }}
+
+# The port to bind to (port value)
+# Minimum value: 0
+# Maximum value: 65535
+bind_port = 1789
+
+# Thread pool size for eventlet. (integer value)
+#max_simultaneous_requests = 1024
+
+# Set this to true to enable TCP_KEEALIVE socket option on connections received
+# by the API server. (boolean value)
+#tcp_keepalive = false
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server socket. Only
+# applies if tcp_keepalive is true. Not supported on OS X. (integer value)
+#tcp_keepidle = 600
+
+# The path to the latest policy dump (string value)
+policy_path = /etc/congress/policy.json
+
+# The file containing datasource configuration (string value)
+#datasource_file = <None>
+
+# The absolute path to the congress repo (string value)
+#root_path = <None>
+
+# The number of worker processes to serve the congress API application.
+# (integer value)
+#api_workers = 1
+
+# The API paste config file to use (string value)
+#api_paste_config = api-paste.ini
+
+# The type of authentication to use (string value)
+auth_strategy = keystone
+
+# List of driver class paths to import. (list value)
+drivers = congress.datasources.neutronv2_driver.NeutronV2Driver,congress.datasources.glancev2_driver.GlanceV2Driver,congress.datasources.nova_driver.NovaDriver,congress.datasources.keystone_driver.KeystoneDriver,congress.datasources.ceilometer_driver.CeilometerDriver,congress.datasources.cinder_driver.CinderDriver,congress.datasources.swift_driver.SwiftDriver,congress.datasources.plexxi_driver.PlexxiDriver,congress.datasources.vCenter_driver.VCenterDriver,congress.datasources.cloudfoundryv2_driver.CloudFoundryV2Driver,congress.datasources.murano_driver.MuranoDriver,congress.datasources.ironic_driver.IronicDriver
+
+
+# The number of seconds to wait between synchronizing datasource config from
+# the database (integer value)
+#datasource_sync_period = 0
+
+# Sets the flag to False if you don't want the congress to execute actions.
+# (boolean value)
+#enable_execute_action = true
+
+# The flag to use congress new distributed architecture.Don't set it to True in
+# L release since the new architecture is under implementation. (boolean value)
+#distributed_architecture = false
+
+# Explicitly specify the temporary working directory (string value)
+#tempdir = <None>
+
+# Make exception message format errors fatal (boolean value)
+#fatal_exception_format_errors = false
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+# Note: This option can be changed without restarting.
+debug = True
+
+# DEPRECATED: If set to false, the logging level will be set to WARNING instead
+# of the default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Note: This option can be changed without restarting.
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+log_file = congress.log
+
+# (Optional) The base directory used for relative log_file paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+log_dir = /var/log/congress
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[cors]
+
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. Format: "<protocol>://<host>[:<port>]", no trailing
+# slash. Example: https://horizon.example.com (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = X-Auth-Token,X-OpenStack-Request-ID,X-Subject-Token,X-Service-Token
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = X-Auth-Token,X-OpenStack-Request-ID,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id
+
+
+[cors.subdomain]
+
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. Format: "<protocol>://<host>[:<port>]", no trailing
+# slash. Example: https://horizon.example.com (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = X-Auth-Token,X-OpenStack-Request-ID,X-Subject-Token,X-Service-Token
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = X-Auth-Token,X-OpenStack-Request-ID,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id
+
+
+[database]
+
+#
+# From oslo.db
+#
+
+# DEPRECATED: The file name to use with SQLite. (string value)
+# Deprecated group/name - [DEFAULT]/sqlite_db
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Should use config option connection or slave_connection to connect
+# the database.
+#sqlite_db = oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group/name - [DEFAULT]/sqlite_synchronous
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database. (string
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+connection = mysql+pymysql://congress:{{ CONGRESS_DBPASS }}@{{ db_host }}/congress
+
+# The SQLAlchemy connection string to use to connect to the slave database.
+# (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including the
+# default, overrides any server-set SQL mode. To use whatever SQL mode is set
+# by the server configuration, set this to no value. Example: mysql_sql_mode=
+# (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. Setting a value of
+# 0 indicates no limit. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = 5
+
+# Maximum number of database connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
+# value)
+# Minimum value: 0
+# Maximum value: 100
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection lost.
+# (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database operation up to
+# db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries of a
+# database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before error is
+# raised. Set to -1 to specify an infinite retry count. (integer value)
+#db_max_retries = 20
+
+
+[keystone_authtoken]
+
+#
+# From keystonemiddleware.auth_token
+#
+
+# Complete "public" Identity API endpoint. This endpoint should not be an
+# "admin" endpoint, as it should be accessible by all end users.
+# Unauthenticated clients are redirected to this endpoint to authenticate.
+# Although this endpoint should ideally be unversioned, client support in the
+# wild varies. If you're using a versioned v2 endpoint here, then this should
+# *not* be the same endpoint the service user utilizes for validating tokens,
+# because normal end users may not be able to reach that endpoint. (string
+# value)
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+project_name = service
+password = {{ CONGRESS_PASS }}
+username = congress
+auth_type = password
+# API version of the admin Identity API endpoint. (string value)
+
+# Do not handle authorization requests within the middleware, but delegate the
+# authorization decision to downstream WSGI components. (boolean value)
+#delay_auth_decision = false
+
+# Request timeout value for communicating with Identity API server. (integer
+# value)
+#http_connect_timeout = <None>
+
+# How many times are we trying to reconnect when communicating with Identity
+# API Server. (integer value)
+#http_request_max_retries = 3
+
+# Request environment key where the Swift cache object is stored. When
+# auth_token middleware is deployed with a Swift cache, use this option to have
+# the middleware share a caching backend with swift. Otherwise, use the
+# ``memcached_servers`` option instead. (string value)
+#cache = <None>
+
+# Required if identity server requires client certificate (string value)
+#certfile = <None>
+
+# Required if identity server requires client certificate (string value)
+#keyfile = <None>
+
+# A PEM encoded Certificate Authority to use when verifying HTTPs connections.
+# Defaults to system CAs. (string value)
+#cafile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# The region in which the identity server can be found. (string value)
+#region_name = <None>
+
+# Directory used to cache files related to PKI tokens. (string value)
+#signing_dir = <None>
+
+# Optionally specify a list of memcached server(s) to use for caching. If left
+# undefined, tokens will instead be cached in-process. (list value)
+# Deprecated group/name - [keystone_authtoken]/memcache_servers
+#memcached_servers = <None>
+
+# In order to prevent excessive effort spent validating tokens, the middleware
+# caches previously-seen tokens for a configurable duration (in seconds). Set
+# to -1 to disable caching completely. (integer value)
+#token_cache_time = 300
+
+# Determines the frequency at which the list of revoked tokens is retrieved
+# from the Identity service (in seconds). A high number of revocation events
+# combined with a low cache duration may significantly reduce performance. Only
+# valid for PKI tokens. (integer value)
+#revocation_cache_time = 10
+
+# (Optional) If defined, indicate whether token data should be authenticated or
+# authenticated and encrypted. If MAC, token data is authenticated (with HMAC)
+# in the cache. If ENCRYPT, token data is encrypted and authenticated in the
+# cache. If the value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+# Allowed values: None, MAC, ENCRYPT
+#memcache_security_strategy = None
+
+# (Optional, mandatory if memcache_security_strategy is defined) This string is
+# used for key derivation. (string value)
+#memcache_secret_key = <None>
+
+# (Optional) Number of seconds memcached server is considered dead before it is
+# tried again. (integer value)
+#memcache_pool_dead_retry = 300
+
+# (Optional) Maximum total number of open connections to every memcached
+# server. (integer value)
+#memcache_pool_maxsize = 10
+
+# (Optional) Socket timeout in seconds for communicating with a memcached
+# server. (integer value)
+#memcache_pool_socket_timeout = 3
+
+# (Optional) Number of seconds a connection to memcached is held unused in the
+# pool before it is closed. (integer value)
+#memcache_pool_unused_timeout = 60
+
+# (Optional) Number of seconds that an operation will wait to get a memcached
+# client connection from the pool. (integer value)
+#memcache_pool_conn_get_timeout = 10
+
+# (Optional) Use the advanced (eventlet safe) memcached client pool. The
+# advanced pool will only work under python 2.x. (boolean value)
+#memcache_use_advanced_pool = false
+
+# (Optional) Indicate whether to set the X-Service-Catalog header. If False,
+# middleware will not ask for service catalog on token validation and will not
+# set the X-Service-Catalog header. (boolean value)
+#include_service_catalog = true
+
+# Used to control the use and type of token binding. Can be set to: "disabled"
+# to not check token binding. "permissive" (default) to validate binding
+# information if the bind type is of a form known to the server and ignore it
+# if not. "strict" like "permissive" but if the bind type is unknown the token
+# will be rejected. "required" any form of token binding is needed to be
+# allowed. Finally the name of a binding method that must be present in tokens.
+# (string value)
+#enforce_token_bind = permissive
+
+# If true, the revocation list will be checked for cached tokens. This requires
+# that PKI tokens are configured on the identity server. (boolean value)
+#check_revocations_for_cached = false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm
+# or multiple. The algorithms are those supported by Python standard
+# hashlib.new(). The hashes will be tried in the order given, so put the
+# preferred one first for performance. The result of the first hash will be
+# stored in the cache. This will typically be set to multiple values only while
+# migrating from a less secure algorithm to a more secure one. Once all the old
+# tokens are expired this option should be set to a single value for better
+# performance. (list value)
+#hash_algorithms = md5
+
+# Authentication type to load (string value)
+# Deprecated group/name - [keystone_authtoken]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (string value)
+#auth_section = <None>
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+# Deprecated group/name - [DEFAULT]/policy_file
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string value)
+# Deprecated group/name - [DEFAULT]/policy_default_rule
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be relative
+# to any directory in the search path defined by the config_dir option, or
+# absolute paths. The file defined by policy_file must exist for these
+# directories to be searched. Missing or empty directories are ignored. (multi
+# valued)
+# Deprecated group/name - [DEFAULT]/policy_dirs
+#policy_dirs = policy.d
+
+[oslo_messaging_rabbit]
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_hosts = {{ rabbitmq_servers }}
diff --git a/deploy/adapters/ansible/roles/congress/templates/policy.json b/deploy/adapters/ansible/roles/congress/templates/policy.json
new file mode 100644
index 00000000..4476051d
--- /dev/null
+++ b/deploy/adapters/ansible/roles/congress/templates/policy.json
@@ -0,0 +1,6 @@
+{
+ "context_is_admin": "role:admin",
+ "admin_only": "rule:context_is_admin",
+ "regular_user": "",
+ "default": "rule:admin_only"
+}
diff --git a/deploy/adapters/ansible/roles/congress/vars/Debian.yml b/deploy/adapters/ansible/roles/congress/vars/Debian.yml
new file mode 100644
index 00000000..1cc4645e
--- /dev/null
+++ b/deploy/adapters/ansible/roles/congress/vars/Debian.yml
@@ -0,0 +1,21 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages:
+ - congress
+ - python-congressclient
+ - python-cloudfoundryclient
+
+service:
+ - congress
+
+credentials:
+ - user: congress
+ db: congress
+ password: "{{ CONGRESS_DBPASS }}"
diff --git a/deploy/adapters/ansible/roles/congress/vars/main.yml b/deploy/adapters/ansible/roles/congress/vars/main.yml
new file mode 100644
index 00000000..f6fef749
--- /dev/null
+++ b/deploy/adapters/ansible/roles/congress/vars/main.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages_noarch: []
+
+services_noarch: []
diff --git a/deploy/adapters/ansible/roles/dashboard/tasks/main.yml b/deploy/adapters/ansible/roles/dashboard/tasks/main.yml
index 229e3cfe..9be6fd6c 100644
--- a/deploy/adapters/ansible/roles/dashboard/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/dashboard/tasks/main.yml
@@ -71,19 +71,9 @@
- restart dashboard services
- name: update ubuntu horizon settings
- lineinfile:
- dest: /etc/openstack-dashboard/local_settings.py
- regexp: '{{ item.regexp }}'
- line: '{{ item.line }}'
- with_items:
- - regexp: '^WEBROOT[ \t]*=.*'
- line: 'WEBROOT = "/horizon"'
- - regexp: '^COMPRESS_OFFLINE[ \t]*=.*'
- line: 'COMPRESS_OFFLINE=True'
- - regexp: '^ALLOWED_HOSTS[ \t]*=.*'
- line: 'ALLOWED_HOSTS = ["*"]'
- - regexp: '^OPENSTACK_HOST[ \t]*=.*'
- line: 'OPENSTACK_HOST = "{{ internal_ip }}"'
+ template:
+ src: local_settings.py.j2
+ dest: "/etc/openstack-dashboard/local_settings.py"
when: ansible_os_family == 'Debian'
notify:
- restart dashboard services
diff --git a/deploy/adapters/ansible/roles/dashboard/templates/local_settings.py.j2 b/deploy/adapters/ansible/roles/dashboard/templates/local_settings.py.j2
new file mode 100644
index 00000000..7278d5c2
--- /dev/null
+++ b/deploy/adapters/ansible/roles/dashboard/templates/local_settings.py.j2
@@ -0,0 +1,326 @@
+# -*- coding: utf-8 -*-
+
+import os
+
+from django.utils.translation import ugettext_lazy as _
+
+from horizon.utils import secret_key
+
+from openstack_dashboard import exceptions
+from openstack_dashboard.settings import HORIZON_CONFIG
+
+DEBUG = False
+
+WEBROOT = '/'
+
+LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
+
+SECRET_KEY = secret_key.generate_or_read_from_file('/var/lib/openstack-dashboard/secret_key')
+
+SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
+
+CACHES = {
+ 'default': {
+ 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
+ 'LOCATION': '{{ internal_vip.ip }}:11211',
+ },
+}
+
+EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
+
+OPENSTACK_HOST = "{{ internal_ip }}"
+OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
+OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "default"
+OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
+OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
+
+OPENSTACK_API_VERSIONS = {
+ "identity": 3,
+ "image": 2,
+ "volume": 2,
+}
+
+OPENSTACK_KEYSTONE_BACKEND = {
+ 'name': 'native',
+ 'can_edit_user': True,
+ 'can_edit_group': True,
+ 'can_edit_project': True,
+ 'can_edit_domain': True,
+ 'can_edit_role': True,
+}
+
+OPENSTACK_HYPERVISOR_FEATURES = {
+ 'can_set_mount_point': False,
+ 'can_set_password': False,
+ 'requires_keypair': False,
+ 'enable_quotas': True
+}
+
+OPENSTACK_CINDER_FEATURES = {
+ 'enable_backup': False,
+}
+
+OPENSTACK_NEUTRON_NETWORK = {
+ 'enable_router': True,
+ 'enable_quotas': True,
+ 'enable_ipv6': True,
+ 'enable_distributed_router': False,
+ 'enable_ha_router': False,
+ 'enable_lb': True,
+ 'enable_firewall': True,
+ 'enable_vpn': True,
+ 'enable_fip_topology_check': True,
+ 'profile_support': None,
+ 'supported_vnic_types': ['*'],
+}
+
+OPENSTACK_HEAT_STACK = {
+ 'enable_user_pass': True,
+}
+
+IMAGE_CUSTOM_PROPERTY_TITLES = {
+ "architecture": _("Architecture"),
+ "kernel_id": _("Kernel ID"),
+ "ramdisk_id": _("Ramdisk ID"),
+ "image_state": _("Euca2ools state"),
+ "project_id": _("Project ID"),
+ "image_type": _("Image Type"),
+}
+
+IMAGE_RESERVED_CUSTOM_PROPERTIES = []
+
+API_RESULT_LIMIT = 1000
+API_RESULT_PAGE_SIZE = 20
+SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024
+INSTANCE_LOG_LENGTH = 35
+DROPDOWN_MAX_ITEMS = 30
+
+TIME_ZONE = "UTC"
+
+LOGGING = {
+ 'version': 1,
+ 'disable_existing_loggers': False,
+ 'formatters': {
+ 'operation': {
+ 'format': '%(asctime)s %(message)s'
+ },
+ },
+ 'handlers': {
+ 'null': {
+ 'level': 'DEBUG',
+ 'class': 'logging.NullHandler',
+ },
+ 'console': {
+ 'level': 'INFO',
+ 'class': 'logging.StreamHandler',
+ },
+ 'operation': {
+ 'level': 'INFO',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'operation',
+ },
+ },
+ 'loggers': {
+ 'django.db.backends': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'requests': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'horizon': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'horizon.operation_log': {
+ 'handlers': ['operation'],
+ 'level': 'INFO',
+ 'propagate': False,
+ },
+ 'openstack_dashboard': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'novaclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'cinderclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'keystoneclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'glanceclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'neutronclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'heatclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'ceilometerclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'swiftclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'openstack_auth': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'nose.plugins.manager': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'django': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'iso8601': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'scss': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ },
+}
+
+SECURITY_GROUP_RULES = {
+ 'all_tcp': {
+ 'name': _('All TCP'),
+ 'ip_protocol': 'tcp',
+ 'from_port': '1',
+ 'to_port': '65535',
+ },
+ 'all_udp': {
+ 'name': _('All UDP'),
+ 'ip_protocol': 'udp',
+ 'from_port': '1',
+ 'to_port': '65535',
+ },
+ 'all_icmp': {
+ 'name': _('All ICMP'),
+ 'ip_protocol': 'icmp',
+ 'from_port': '-1',
+ 'to_port': '-1',
+ },
+ 'ssh': {
+ 'name': 'SSH',
+ 'ip_protocol': 'tcp',
+ 'from_port': '22',
+ 'to_port': '22',
+ },
+ 'smtp': {
+ 'name': 'SMTP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '25',
+ 'to_port': '25',
+ },
+ 'dns': {
+ 'name': 'DNS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '53',
+ 'to_port': '53',
+ },
+ 'http': {
+ 'name': 'HTTP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '80',
+ 'to_port': '80',
+ },
+ 'pop3': {
+ 'name': 'POP3',
+ 'ip_protocol': 'tcp',
+ 'from_port': '110',
+ 'to_port': '110',
+ },
+ 'imap': {
+ 'name': 'IMAP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '143',
+ 'to_port': '143',
+ },
+ 'ldap': {
+ 'name': 'LDAP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '389',
+ 'to_port': '389',
+ },
+ 'https': {
+ 'name': 'HTTPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '443',
+ 'to_port': '443',
+ },
+ 'smtps': {
+ 'name': 'SMTPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '465',
+ 'to_port': '465',
+ },
+ 'imaps': {
+ 'name': 'IMAPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '993',
+ 'to_port': '993',
+ },
+ 'pop3s': {
+ 'name': 'POP3S',
+ 'ip_protocol': 'tcp',
+ 'from_port': '995',
+ 'to_port': '995',
+ },
+ 'ms_sql': {
+ 'name': 'MS SQL',
+ 'ip_protocol': 'tcp',
+ 'from_port': '1433',
+ 'to_port': '1433',
+ },
+ 'mysql': {
+ 'name': 'MYSQL',
+ 'ip_protocol': 'tcp',
+ 'from_port': '3306',
+ 'to_port': '3306',
+ },
+ 'rdp': {
+ 'name': 'RDP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '3389',
+ 'to_port': '3389',
+ },
+}
+
+REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',
+ 'LAUNCH_INSTANCE_DEFAULTS',
+ 'OPENSTACK_IMAGE_FORMATS']
+
+DEFAULT_THEME = 'ubuntu'
+WEBROOT='/horizon/'
+ALLOWED_HOSTS = ['*',]
+COMPRESS_OFFLINE = True
+ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []}
diff --git a/deploy/adapters/ansible/roles/dashboard/templates/openstack-dashboard.conf.j2 b/deploy/adapters/ansible/roles/dashboard/templates/openstack-dashboard.conf.j2
index 403fcc22..664af687 100755
--- a/deploy/adapters/ansible/roles/dashboard/templates/openstack-dashboard.conf.j2
+++ b/deploy/adapters/ansible/roles/dashboard/templates/openstack-dashboard.conf.j2
@@ -1,8 +1,11 @@
{% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
+{% if work_threads > 10 %}
+{% set work_threads = 10 %}
+{% endif %}
<VirtualHost {{ internal_ip }}:80>
WSGIScriptAlias /horizon {{ horizon_dir }}/wsgi/django.wsgi
- WSGIDaemonProcess horizon user=horizon group=horizon processes={{ work_threads }} threads={{ work_threads }}
+ WSGIDaemonProcess horizon user=horizon group=horizon processes=4 threads={{ work_threads }}
WSGIProcessGroup horizon
Alias /static {{ horizon_dir }}/static/
Alias /horizon/static {{ horizon_dir }}/static/
diff --git a/deploy/adapters/ansible/roles/dashboard/vars/Debian.yml b/deploy/adapters/ansible/roles/dashboard/vars/Debian.yml
index 5c9b032e..aaeb8cdb 100644
--- a/deploy/adapters/ansible/roles/dashboard/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/dashboard/vars/Debian.yml
@@ -11,6 +11,7 @@ packages: []
services:
- memcached
+ - apache2
apache_config_dir: /etc/apache2
horizon_dir: /usr/share/openstack-dashboard/openstack_dashboard
diff --git a/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml b/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml
index f083a40f..442cd18b 100644
--- a/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml
@@ -7,14 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- name: Register RECOVERY
- set_fact: RECOVERY_ENV={{RECOVERY_ENV | default('False')}}
-
-- name: killall mysqld processes
- shell: sudo killall -9 mysqld
- when: RECOVERY_ENV
- ignore_errors: True
-
- name: get cluster status
shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}'
register: cluster_status
@@ -22,11 +14,9 @@
- inventory_hostname == haproxy_hosts.keys()[0]
- name: start first node to create new cluster
- service:
- name: mysql
- state: restarted
- enabled: yes
- args: "--wsrep-new-cluster"
+ shell: >
+ service mysql bootstrap;
+ service mysql start;
when: |
inventory_hostname == haproxy_hosts.keys()[0]
and not cluster_status.stdout | search("OPERATIONAL")
@@ -47,19 +37,30 @@
register: cluster_nodes
changed_when: false
-- name: restart other nodes and join cluster
- service:
- name: mysql
- state: restarted
- enabled: yes
+- name: restart other nodes and join cluster1
+ shell: service mysql restart;
+ when: |
+ inventory_hostname != haproxy_hosts.keys()[0]
+ and not cluster_nodes.stdout | search( "{{ internal_ip }}" )
+ ignore_errors: True
+
+- name: delay 60 seconds
+ shell: sleep 60
+
+- name: restart other nodes and join cluster2
+ shell: service mysql restart;
when: |
inventory_hostname != haproxy_hosts.keys()[0]
and not cluster_nodes.stdout | search( "{{ internal_ip }}" )
+- name: chmod directory
+ shell: >
+ chmod 755 -R /var/lib/mysql/ ;
+ chmod 755 -R /var/log/mysql/ ;
+ chmod 755 -R /etc/mysql/conf.d/;
+
- name: restart first nodes
- service:
- name: mysql
- state: restarted
+ shell: service mysql restart
when: |
(inventory_hostname == haproxy_hosts.keys()[0]
and haproxy_hosts|length > 1
diff --git a/deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml b/deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml
index bf9f3464..1b08172d 100644
--- a/deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml
@@ -14,16 +14,16 @@
mode: 0755
when: ansible_os_family == "Debian"
-- name: install python-mysqldb
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: maridb_packages | union(packages_noarch)
-
- name: change open file limit
copy:
content: "* - nofile 65536 }}"
dest: "/etc/security/limits.conf"
mode: 0755
+- name: install python-mysqldb
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: maridb_packages | union(packages_noarch)
+
- name: create conf dir for wsrep
file: path=/etc/my.cnf.d state=directory mode=0755
when: ansible_os_family == "RedHat"
@@ -52,7 +52,7 @@
when: ansible_os_family == "Debian"
- name: set owner
- file: path=/var/lib/mysql owner=mysql group=mysql recurse=yes state=directory
+ file: path=/var/lib/mysql owner=mysql group=mysql recurse=yes state=directory mode=0755
- name: get logfile stat
stat: path='{{ mysql_data_dir }}/ib_logfile0'
@@ -67,3 +67,4 @@
when: |
logfile_stat.stat.exists
and logfile_stat.stat.size != 1073741824
+
diff --git a/deploy/adapters/ansible/roles/database/templates/data.j2 b/deploy/adapters/ansible/roles/database/templates/data.j2
index 109201ab..66c2fead 100644
--- a/deploy/adapters/ansible/roles/database/templates/data.j2
+++ b/deploy/adapters/ansible/roles/database/templates/data.j2
@@ -6,6 +6,7 @@ drop database if exists neutron;
drop database if exists nova;
drop database if exists cinder;
drop database if exists heat;
+drop database if exists aodh;
CREATE DATABASE keystone;
{% for host in ['%', 'localhost', inventory_hostname] %}
@@ -37,6 +38,11 @@ CREATE DATABASE heat;
GRANT ALL ON heat.* TO 'heat'@'{{ host }}' IDENTIFIED BY '{{ HEAT_DBPASS }}';
{% endfor %}
+CREATE DATABASE aodh;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON aodh.* TO 'aodh'@'{{ host }}' IDENTIFIED BY '{{ AODH_DBPASS }}';
+{% endfor %}
+
{% if WSREP_SST_USER is defined %}
{% for host in ['%', 'localhost', inventory_hostname] %}
GRANT ALL ON *.* TO '{{ WSREP_SST_USER }}'@'{{ host }}' IDENTIFIED BY '{{ WSREP_SST_PASS }}';
diff --git a/deploy/adapters/ansible/roles/database/vars/Debian.yml b/deploy/adapters/ansible/roles/database/vars/Debian.yml
index 621dc492..1021524d 100644
--- a/deploy/adapters/ansible/roles/database/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/database/vars/Debian.yml
@@ -17,11 +17,21 @@ mysql_packages:
- mysql-server
maridb_packages:
- - libaio1
- - libssl0.9.8
+ - apt-transport-https
+ - debconf-utils
+ - libaio1
+ - libc6
+ - libdbd-mysql-perl
+ - libgcc1
+ - libgcrypt20
+ - libstdc++6
+ - python-software-properties
+ - mariadb-client
+ - galera-3
+ - rsync
+ - socat
+ - mariadb-galera-server-10.0
- python-mysqldb
- - mysql-wsrep-server-5.5
- - galera-3
pip_packages: []
diff --git a/deploy/adapters/ansible/roles/database/vars/main.yml b/deploy/adapters/ansible/roles/database/vars/main.yml
index c0538899..a32897f0 100644
--- a/deploy/adapters/ansible/roles/database/vars/main.yml
+++ b/deploy/adapters/ansible/roles/database/vars/main.yml
@@ -23,6 +23,9 @@ credentials:
db: glance
password: "{{ GLANCE_DBPASS }}"
- user: nova
+ db: nova_api
+ password: "{{ NOVA_DBPASS }}"
+ - user: nova
db: nova
password: "{{ NOVA_DBPASS }}"
- user: cinder
@@ -31,4 +34,6 @@ credentials:
- user: heat
db: heat
password: "{{ HEAT_DBPASS }}"
-
+ - user: aodh
+ db: aodh
+ password: "{{ AODH_DBPASS }}"
diff --git a/deploy/adapters/ansible/roles/ext-network/handlers/main.yml b/deploy/adapters/ansible/roles/ext-network/handlers/main.yml
index a7945861..36e39072 100644
--- a/deploy/adapters/ansible/roles/ext-network/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/ext-network/handlers/main.yml
@@ -8,7 +8,7 @@
##############################################################################
---
- name: restart neutron-plugin-openvswitch-agent
- service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
+ service: name=neutron-openvswitch-agent state=restarted enabled=yes
when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
- name: restart neutron-l3-agent
diff --git a/deploy/adapters/ansible/roles/ext-network/tasks/main.yml b/deploy/adapters/ansible/roles/ext-network/tasks/main.yml
index cb6cb2ce..f68105f1 100644
--- a/deploy/adapters/ansible/roles/ext-network/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ext-network/tasks/main.yml
@@ -7,40 +7,38 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+# FIXME: temporary workaround for openstack api access random failure
+- name: restart api server
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: api_services | union(api_services_noarch)
+ ignore_errors: True
+
- name: restart neutron server
service: name=neutron-server state=restarted enabled=yes
+- name: wait for neutron ready
+ wait_for: port=9696 delay=10 timeout=60 host={{ internal_ip }}
+
- name: create external net
- neutron_network:
- login_username: ADMIN
- login_password: "{{ ADMIN_PASS }}"
- login_tenant_name: admin
- auth_url: "http://{{ internal_vip.ip }}:35357/v2.0"
- name: "{{ public_net_info.network }}"
- provider_network_type: "{{ public_net_info.type }}"
- provider_physical_network: "{{ public_net_info.provider_network }}"
- provider_segmentation_id: "{{ public_net_info.segment_id}}"
- shared: false
- router_external: yes
- state: present
- run_once: true
- when: 'public_net_info.enable == True'
+ shell:
+ . /opt/admin-openrc.sh;
+ neutron net-create \
+ {{ public_net_info.network }} \
+ --provider:network_type {{ public_net_info.type }} \
+ --provider:physical_network {{ public_net_info.provider_network }} \
+ --router:external True
+ when: public_net_info.enable == True and inventory_hostname == groups['controller'][0]
- name: create external subnet
- neutron_subnet:
- login_username: ADMIN
- login_password: "{{ ADMIN_PASS }}"
- login_tenant_name: admin
- auth_url: "http://{{ internal_vip.ip }}:35357/v2.0"
- name: "{{ public_net_info.subnet }}"
- network_name: "{{ public_net_info.network }}"
- cidr: "{{ public_net_info.floating_ip_cidr }}"
- enable_dhcp: "{{ public_net_info.enable_dhcp }}"
- no_gateway: "{{ public_net_info.no_gateway }}"
- gateway_ip: "{{ public_net_info.external_gw }}"
- allocation_pool_start: "{{ public_net_info.floating_ip_start }}"
- allocation_pool_end: "{{ public_net_info.floating_ip_end }}"
- state: present
- run_once: true
- when: 'public_net_info.enable == True'
+ shell:
+ . /opt/admin-openrc.sh;
+ neutron subnet-create \
+ --name {{ public_net_info.subnet }} \
+ --gateway {{ public_net_info.external_gw }} \
+ --allocation-pool \
+ start={{ public_net_info.floating_ip_start }},end={{ public_net_info.floating_ip_end }} \
+ {{ public_net_info.network }} {{ public_net_info.floating_ip_cidr }}
+ when: public_net_info.enable == True and inventory_hostname == groups['controller'][0]
diff --git a/deploy/adapters/ansible/roles/ext-network/vars/Debian.yml b/deploy/adapters/ansible/roles/ext-network/vars/Debian.yml
new file mode 100644
index 00000000..0b5c78b6
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ext-network/vars/Debian.yml
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+api_services:
+ - nova-api
+ - glance-api
+ - ceilometer-api
+ - heat-api
+ - heat-api-cfn
+ - aodh-api
+ - cinder-api
+
diff --git a/deploy/adapters/ansible/roles/ext-network/vars/RedHat.yml b/deploy/adapters/ansible/roles/ext-network/vars/RedHat.yml
new file mode 100644
index 00000000..886401fd
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ext-network/vars/RedHat.yml
@@ -0,0 +1,17 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+api_services:
+ - openstack-nova-api
+ - openstack-glance-api
+ - openstack-ceilometer-api
+ - openstack-heat-api
+ - openstack-heat-api-cfn
+ - openstack-cinder-api
+
diff --git a/deploy/adapters/ansible/roles/ext-network/vars/main.yml b/deploy/adapters/ansible/roles/ext-network/vars/main.yml
new file mode 100644
index 00000000..b19b6ebf
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ext-network/vars/main.yml
@@ -0,0 +1,10 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+api_services_noarch: []
diff --git a/deploy/adapters/ansible/roles/glance/tasks/nfs.yml b/deploy/adapters/ansible/roles/glance/tasks/nfs.yml
index 482be22e..39a49dc1 100644
--- a/deploy/adapters/ansible/roles/glance/tasks/nfs.yml
+++ b/deploy/adapters/ansible/roles/glance/tasks/nfs.yml
@@ -7,6 +7,10 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+- name: install nfs packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: nfs_packages
+
- name: install nfs
local_action: yum name={{ item }} state=present
with_items:
@@ -56,7 +60,7 @@
mkdir -p /var/lib/glance/images
mount -t nfs -onfsvers=3 {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images
sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
- #echo {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab
+ echo {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab
when: mount_info.stdout.find('images') == -1
retries: 5
delay: 3
diff --git a/deploy/adapters/ansible/roles/glance/templates/glance-api.conf b/deploy/adapters/ansible/roles/glance/templates/glance-api.conf
index 9be29f4f..241f04ce 100644
--- a/deploy/adapters/ansible/roles/glance/templates/glance-api.conf
+++ b/deploy/adapters/ansible/roles/glance/templates/glance-api.conf
@@ -36,31 +36,43 @@ scrub_time = 43200
image_cache_dir = /var/lib/glance/image-cache/
show_image_direct_url = True
-[task]
-task_executor = taskflow
-
[database]
backend = sqlalchemy
connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance?charset=utf8
idle_timeout = 30
+sqlite_db = /var/lib/glance/glance.sqlite
+
+[task]
+task_executor = taskflow
[glance_store]
default_store = file
stores = file,http,cinder,rbd
filesystem_store_datadir = /var/lib/glance/images/
+[image_format]
+disk_formats = ami,ari,aki,vhd,vhdx,vmdk,raw,qcow2,vdi,iso,root-tar
+
[profiler]
enabled = True
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = glance
+password = {{ GLANCE_PASS }}
+token_cache_time = 300
+revocation_cache_time = 60
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = glance
admin_password = {{ GLANCE_PASS }}
-memcached_servers = {{ memcached_servers }}
-token_cache_time = 300
-revocation_cache_time = 60
[paste_deploy]
flavor= keystone
@@ -74,6 +86,8 @@ rabbit_use_ssl = false
rabbit_userid = {{ RABBIT_USER }}
rabbit_password = {{ RABBIT_PASS }}
rabbit_virtual_host = /
+default_notification_exchange = glance
+
rabbit_notification_exchange = glance
rabbit_notification_topic = notifications
rabbit_durable_queues = False
diff --git a/deploy/adapters/ansible/roles/glance/templates/glance-registry.conf b/deploy/adapters/ansible/roles/glance/templates/glance-registry.conf
index 8453b966..ccd8f1bb 100644
--- a/deploy/adapters/ansible/roles/glance/templates/glance-registry.conf
+++ b/deploy/adapters/ansible/roles/glance/templates/glance-registry.conf
@@ -30,12 +30,20 @@ idle_timeout = 30
enabled = True
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = glance
+password = {{ GLANCE_PASS }}
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = glance
admin_password = {{ GLANCE_PASS }}
-memcached_servers = {{ memcached_servers }}
token_cache_time = 300
revocation_cache_time = 60
diff --git a/deploy/adapters/ansible/roles/glance/vars/Debian.yml b/deploy/adapters/ansible/roles/glance/vars/Debian.yml
index b5b4b6c0..d1825012 100644
--- a/deploy/adapters/ansible/roles/glance/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/glance/vars/Debian.yml
@@ -11,6 +11,9 @@ packages:
- glance
- nfs-common
+nfs_packages:
+ - nfs-common
+
nfs_services: []
services:
diff --git a/deploy/adapters/ansible/roles/glance/vars/RedHat.yml b/deploy/adapters/ansible/roles/glance/vars/RedHat.yml
index 517f347c..2987d0c4 100644
--- a/deploy/adapters/ansible/roles/glance/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/glance/vars/RedHat.yml
@@ -11,6 +11,10 @@ packages:
- openstack-glance
- rpcbind
+nfs_packages:
+ - nfs-utils
+ - rpcbind
+
nfs_services:
- rpcbind
diff --git a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
index a6876da7..5fbcc9d9 100644
--- a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
+++ b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
@@ -158,6 +158,16 @@ listen proxy-cinder_api_cluster
server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5
{% endfor %}
+#listen proxy-swift-proxy
+# bind {{ internal_vip.ip }}:8080
+# bind {{ public_vip.ip }}:8080
+# balance source
+# option tcpka
+# option tcplog
+#{% for host,ip in haproxy_hosts.items() %}
+# server {{ host }} {{ ip }}:8080 weight 1 check inter 2000 rise 2 fall 5
+#{% endfor %}
+
listen proxy-ceilometer_api_cluster
bind {{ internal_vip.ip }}:8777
bind {{ public_vip.ip }}:8777
@@ -180,6 +190,17 @@ listen proxy-aodh_api_cluster
server {{ host }} {{ ip }}:8042 weight 1 check inter 2000 rise 2 fall 5
{% endfor %}
+listen proxy-congress_api_cluster
+ bind {{ internal_vip.ip }}:1789
+ bind {{ public_vip.ip }}:1789
+ mode tcp
+ option tcp-check
+ option tcplog
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:1789 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
listen proxy-dashboarad
bind {{ public_vip.ip }}:80
mode http
@@ -196,7 +217,7 @@ listen proxy-dashboarad
listen stats
mode http
- bind 0.0.0.0:9998
+ bind 0.0.0.0:9999
stats enable
stats refresh 30s
stats uri /
diff --git a/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml b/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml
index a6e76c74..6a0f1c73 100644
--- a/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml
+++ b/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml
@@ -21,13 +21,13 @@
- name: create heat user domain
shell: >
- . /opt/admin-openrc-v3.sh;
+ . /opt/admin-openrc.sh;
openstack domain create --description "Stack projects and users" heat;
openstack user create --domain heat --password {{ HEAT_PASS }} heat_domain_admin;
openstack role add --domain heat --user-domain heat --user heat_domain_admin admin;
openstack role create heat_stack_owner;
openstack role add --project demo --user demo heat_stack_owner;
- when: inventory_hostname == groups['controller'][0] and ansible_os_family == "Debian"
+ when: inventory_hostname == groups['controller'][0]
- name: update heat conf
template: src=heat.j2
@@ -36,13 +36,4 @@
notify:
- restart heat service
- remove heat-sqlite-db
- when: ansible_os_family == "RedHat"
-- name: update heat conf
- template: src=heat_debian.j2
- dest=/etc/heat/heat.conf
- backup=yes
- notify:
- - restart heat service
- - remove heat-sqlite-db
- when: ansible_os_family == "Debian"
diff --git a/deploy/adapters/ansible/roles/heat/templates/heat.j2 b/deploy/adapters/ansible/roles/heat/templates/heat.j2
index aec6b2eb..72d4b61e 100644
--- a/deploy/adapters/ansible/roles/heat/templates/heat.j2
+++ b/deploy/adapters/ansible/roles/heat/templates/heat.j2
@@ -1,11 +1,17 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
[DEFAULT]
heat_metadata_server_url = http://{{ internal_vip.ip }}:8000
heat_waitcondition_server_url = http://{{ internal_vip.ip }}:8000/v1/waitcondition
rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
log_dir = /var/log/heat
+stack_domain_admin = heat_domain_admin
+stack_domain_admin_password = {{ HEAT_PASS }}
+stack_user_domain_name = heat
[database]
connection = mysql://heat:{{ HEAT_DBPASS }}@{{ db_host }}/heat
@@ -14,12 +20,35 @@ use_db_reconnect = True
pool_timeout = 10
[ec2authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+
+[clients_keystone]
+auth_uri = http://{{ internal_vip.ip }}:35357
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = heat
+password = {{ HEAT_PASS }}
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = heat
admin_password = {{ HEAT_PASS }}
+[oslo_messaging_rabbit]
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+[trustee]
+auth_type = password
+auth_url = http://{{ internal_vip.ip }}:35357
+username = heat
+password = {{ HEAT_PASS }}
+user_domain_name = default
diff --git a/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml b/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml
index e7e9297e..ea211470 100644
--- a/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml
+++ b/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml
@@ -7,55 +7,90 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+- include_vars: "{{ ansible_os_family }}.yml"
+
- name: keystone-manage db-sync
- #keystone_manage: action=dbsync
shell: su -s /bin/sh -c 'keystone-manage db_sync' keystone
+- name: Check if fernet keys already exist
+ stat:
+ path: "/etc/keystone/fernet-keys/0"
+ register: fernet_keys_0
+
+- name: Create fernet keys for Keystone
+ command:
+ keystone-manage fernet_setup
+ --keystone-user keystone
+ --keystone-group keystone
+ when: not fernet_keys_0.stat.exists
+ notify:
+ - restart keystone services
+
+- name: Rotate fernet keys for Keystone
+ command:
+ keystone-manage fernet_rotate
+ --keystone-user keystone
+ --keystone-group keystone
+ when: fernet_keys_0.stat.exists
+ notify:
+ - restart keystone services
+
+- name: Distribute the fernet key repository
+ shell: rsync -e 'ssh -o StrictHostKeyChecking=no' \
+ -avz \
+ --delete \
+ /etc/keystone/fernet-keys \
+ root@{{ hostvars[ item ].ansible_eth0.ipv4.address }}:/etc/keystone/
+ with_items: groups['controller'][1:]
+ notify:
+ - restart keystone services
+
+- name: Check if credential keys already exist
+ stat:
+ path: "/etc/keystone/credential-keys/0"
+ register: credential_keys_0
+
+- name: Create credential keys for Keystone
+ command:
+ keystone-manage credential_setup
+ --keystone-user keystone
+ --keystone-group keystone
+ when: not credential_keys_0.stat.exists
+ notify:
+ - restart keystone services
+
+- name: Rotate credential keys for Keystone
+ command:
+ keystone-manage credential_rotate
+ --keystone-user keystone
+ --keystone-group keystone
+ when: credential_keys_0.stat.exists
+ notify:
+ - restart keystone services
+
+- name: Distribute the credential key repository
+ shell: rsync -e 'ssh -o StrictHostKeyChecking=no' \
+ -avz \
+ --delete \
+ /etc/keystone/credential-keys \
+ root@{{ hostvars[ item ].ansible_eth0.ipv4.address }}:/etc/keystone/
+ with_items: groups['controller'][1:]
+ notify:
+ - restart keystone services
+
+- name: Bootstrap the Identity service
+ shell:
+ keystone-manage bootstrap \
+ --bootstrap-password {{ ADMIN_PASS }} \
+ --bootstrap-admin-url http://{{ internal_ip }}:35357/v3/ \
+ --bootstrap-internal-url http://{{ internal_ip }}:35357/v3/ \
+ --bootstrap-public-url http://{{ internal_ip }}:5000/v3/
+ --bootstrap-region-id RegionOne \
+ notify:
+ - restart keystone services
+
+- meta: flush_handlers
+
- name: wait for keystone ready
- wait_for: port=35357 delay=3 timeout=10 host={{ internal_ip }}
-
-- name: cron job to purge expired tokens hourly
- cron:
- name: 'purge expired tokens'
- special_time: hourly
- job: '/usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1'
-
-- name: add tenants
- keystone_user:
- token: "{{ ADMIN_TOKEN }}"
- endpoint: "http://{{ internal_ip }}:35357/v2.0"
- tenant: "{{ item.tenant }}"
- tenant_description: "{{ item.tenant_description }}"
- with_items: "{{ os_users }}"
-
-- name: add users
- keystone_user:
- token: "{{ ADMIN_TOKEN }}"
- endpoint: "http://{{ internal_ip }}:35357/v2.0"
- user: "{{ item.user }}"
- tenant: "{{ item.tenant }}"
- password: "{{ item.password }}"
- email: "{{ item.email }}"
- with_items: "{{ os_users }}"
-
-- name: grant roles
- keystone_user:
- token: "{{ ADMIN_TOKEN }}"
- endpoint: "http://{{ internal_ip }}:35357/v2.0"
- user: "{{ item.user }}"
- role: "{{ item.role }}"
- tenant: "{{ item.tenant }}"
- with_items: "{{ os_users }}"
-
-- name: add endpoints
- keystone_service:
- token: "{{ ADMIN_TOKEN }}"
- endpoint: "http://{{ internal_ip }}:35357/v2.0"
- name: "{{ item.name }}"
- type: "{{ item.type }}"
- region: "{{ item.region}}"
- description: "{{ item.description }}"
- publicurl: "{{ item.publicurl }}"
- internalurl: "{{ item.internalurl }}"
- adminurl: "{{ item.adminurl }}"
- with_items: "{{ os_services }}"
+ wait_for: port=35357 delay=15 timeout=60 host={{ internal_ip }}
+
diff --git a/deploy/adapters/ansible/roles/keystone/tasks/keystone_create.yml b/deploy/adapters/ansible/roles/keystone/tasks/keystone_create.yml
new file mode 100644
index 00000000..53077776
--- /dev/null
+++ b/deploy/adapters/ansible/roles/keystone/tasks/keystone_create.yml
@@ -0,0 +1,93 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: set keystone endpoint
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack endpoint set \
+ --interface public \
+ --url {{ item.publicurl }} \
+ $(openstack endpoint list | grep keystone | grep public | awk '{print $2}');
+ openstack endpoint set \
+ --interface internal \
+ --url {{ item.internalurl }} \
+ $(openstack endpoint list | grep keystone | grep internal | awk '{print $2}');
+ openstack endpoint set \
+ --interface admin \
+ --url {{ item.adminurl }} \
+ $(openstack endpoint list | grep keystone | grep admin | awk '{print $2}');
+ with_items: "{{ os_services[0:1] }}"
+
+- name: add service
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack service create \
+ --name "{{ item.name }}"
+ --description "{{ item.description }}" \
+ {{ item.type }}
+ with_items: "{{ os_services[1:] }}"
+
+- name: add project
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack project create --description "Service Project" service;
+ openstack project create --domain default --description "Demo Project" demo;
+
+- name: set admin user
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack user set \
+ --email "{{ item.email }}" \
+ --project "{{ item.tenant }}" \
+ --description "{{ item.tenant_description }}" \
+ --password "{{ item.password }}" \
+ {{ item.user }}
+ with_items: "{{ os_users }}"
+ when: item["user"] == "admin"
+
+- name: add user
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack user create \
+ --email "{{ item.email }}" \
+ --project "{{ item.tenant }}" \
+ --description "{{ item.tenant_description }}" \
+ --password "{{ item.password }}" \
+ {{ item.user }}
+ with_items: "{{ os_users[1:] }}"
+
+- name: add roles
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack role create {{ item.role }}
+ with_items: "{{ os_users }}"
+ when: item["user"] == "demo"
+
+- name: grant roles
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack role add \
+ --project "{{ item.tenant }}" \
+ --user "{{ item.user }}" \
+ {{ item.role }}
+ with_items: "{{ os_users }}"
+
+- name: add endpoints
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack endpoint create \
+ --region {{ item.region }} \
+ {{ item.name }} public {{ item.publicurl }};
+ openstack endpoint create \
+ --region {{ item.region }} \
+ {{ item.name }} internal {{ item.internalurl }};
+ openstack endpoint create \
+ --region {{ item.region }} \
+ {{ item.name }} admin {{ item.adminurl }};
+ with_items: "{{ os_services[1:] }}"
diff --git a/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml b/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml
index ea6926f4..757349c5 100644
--- a/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml
+++ b/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml
@@ -26,6 +26,16 @@
state=absent
when: ansible_os_family == "Debian"
+- name: disable boot auto start
+ file:
+ path={{ item }}
+ state=absent
+ with_items:
+ - /etc/init.d/keystone
+ - /etc/init/keystone.conf
+ - /lib/systemd/system/keystone.service
+ when: ansible_os_family == "Debian"
+
- name: generate keystone service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
@@ -56,7 +66,7 @@
- name: update apache2 configs
template:
src: wsgi-keystone.conf.j2
- dest: '{{ apache_config_dir }}/sites-available/wsgi-keystone.conf'
+ dest: '{{ apache_config_dir }}/sites-available/keystone.conf'
when: ansible_os_family == 'Debian'
notify:
- restart keystone services
@@ -64,15 +74,15 @@
- name: update apache2 configs
template:
src: wsgi-keystone.conf.j2
- dest: '{{ apache_config_dir }}/wsgi-keystone.conf'
+ dest: '{{ apache_config_dir }}/keystone.conf'
when: ansible_os_family == 'RedHat'
notify:
- restart keystone services
- name: enable keystone server
file:
- src: "{{ apache_config_dir }}/sites-available/wsgi-keystone.conf"
- dest: "{{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf"
+ src: "{{ apache_config_dir }}/sites-available/keystone.conf"
+ dest: "{{ apache_config_dir }}/sites-enabled/keystone.conf"
state: "link"
when: ansible_os_family == 'Debian'
notify:
@@ -82,7 +92,7 @@
template: src={{ item }} dest=/opt/{{ item }}
with_items:
- admin-openrc.sh
+ - admin-openrc-v2.sh
- demo-openrc.sh
- - admin-openrc-v3.sh
- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/keystone/tasks/main.yml b/deploy/adapters/ansible/roles/keystone/tasks/main.yml
index 21939fa7..ad619d40 100644
--- a/deploy/adapters/ansible/roles/keystone/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/keystone/tasks/main.yml
@@ -20,4 +20,11 @@
- keystone_config
- keystone
+- include: keystone_create.yml
+ when: inventory_hostname == groups['controller'][0]
+ tags:
+ - config
+ - keystone_create
+ - keystone
+
- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/keystone/templates/admin-openrc-v2.sh b/deploy/adapters/ansible/roles/keystone/templates/admin-openrc-v2.sh
new file mode 100644
index 00000000..6ba620ff
--- /dev/null
+++ b/deploy/adapters/ansible/roles/keystone/templates/admin-openrc-v2.sh
@@ -0,0 +1,15 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# Verify the Identity Service installation
+export OS_PASSWORD={{ ADMIN_PASS }}
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
+export OS_USERNAME=admin
+export OS_VOLUME_API_VERSION=2
+
diff --git a/deploy/adapters/ansible/roles/keystone/templates/admin-openrc.sh b/deploy/adapters/ansible/roles/keystone/templates/admin-openrc.sh
index 6ba620ff..94d5850f 100644
--- a/deploy/adapters/ansible/roles/keystone/templates/admin-openrc.sh
+++ b/deploy/adapters/ansible/roles/keystone/templates/admin-openrc.sh
@@ -7,9 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# Verify the Identity Service installation
-export OS_PASSWORD={{ ADMIN_PASS }}
+export OS_PROJECT_DOMAIN_NAME=default
+export OS_USER_DOMAIN_NAME=default
export OS_TENANT_NAME=admin
-export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
+export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
-export OS_VOLUME_API_VERSION=2
-
+export OS_PASSWORD={{ ADMIN_PASS }}
+export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_IMAGE_API_VERSION=2
diff --git a/deploy/adapters/ansible/roles/keystone/templates/demo-openrc.sh b/deploy/adapters/ansible/roles/keystone/templates/demo-openrc.sh
index 5807e868..920f42ed 100644
--- a/deploy/adapters/ansible/roles/keystone/templates/demo-openrc.sh
+++ b/deploy/adapters/ansible/roles/keystone/templates/demo-openrc.sh
@@ -6,8 +6,12 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+export OS_PROJECT_DOMAIN_NAME=default
+export OS_USER_DOMAIN_NAME=default
+export OS_TENANT_NAME=demo
+export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD={{ DEMO_PASS }}
-export OS_TENANT_NAME=demo
-export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
-
+export OS_AUTH_URL=http://{{ internal_vip.ip }}:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_IMAGE_API_VERSION=2
diff --git a/deploy/adapters/ansible/roles/keystone/templates/keystone.conf b/deploy/adapters/ansible/roles/keystone/templates/keystone.conf
index 649fc32c..919be344 100644
--- a/deploy/adapters/ansible/roles/keystone/templates/keystone.conf
+++ b/deploy/adapters/ansible/roles/keystone/templates/keystone.conf
@@ -7,51 +7,52 @@
{% set memcached_servers = memcached_servers|join(',') %}
{% set rabbitmq_servers = rabbitmq_servers|join(',') %}
[DEFAULT]
-admin_token={{ ADMIN_TOKEN }}
debug={{ DEBUG }}
log_dir = /var/log/keystone
[cache]
-backend=keystone.cache.memcache_pool
-memcache_servers={{ memcached_servers}}
+backend = keystone.cache.memcache_pool
+memcache_servers = {{ memcached_servers}}
enabled=true
[revoke]
-driver=sql
-expiration_buffer=3600
-caching=true
+driver = sql
+expiration_buffer = 3600
+caching = true
[database]
connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone?charset=utf8
-idle_timeout=30
-min_pool_size=5
-max_pool_size=120
-pool_timeout=30
+idle_timeout = 30
+min_pool_size = 5
+max_pool_size = 120
+pool_timeout = 30
+[fernet_tokens]
+key_repository = /etc/keystone/fernet-keys/
[identity]
-default_domain_id=default
-driver=sql
+default_domain_id = default
+driver = sql
[assignment]
-driver=sql
+driver = sql
[resource]
-driver=sql
-caching=true
-cache_time=3600
-
+driver = sql
+caching = true
+cache_time = 3600
+
[token]
-enforce_token_bind=permissive
-expiration=43200
-provider=uuid
-driver=sql
-caching=true
-cache_time=3600
+enforce_token_bind = permissive
+expiration = 43200
+provider = fernet
+driver = sql
+caching = true
+cache_time = 3600
[eventlet_server]
-public_bind_host= {{ identity_host }}
-admin_bind_host= {{ identity_host }}
+public_bind_host = {{ identity_host }}
+admin_bind_host = {{ identity_host }}
[oslo_messaging_rabbit]
rabbit_userid = {{ RABBIT_USER }}
diff --git a/deploy/adapters/ansible/roles/keystone/templates/wsgi-keystone.conf.j2 b/deploy/adapters/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
index 64d864af..55c89839 100644
--- a/deploy/adapters/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
+++ b/deploy/adapters/ansible/roles/keystone/templates/wsgi-keystone.conf.j2
@@ -1,6 +1,10 @@
- {% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
+{% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
+{% if work_threads > 10 %}
+{% set work_threads = 10 %}
+{% endif %}
+
<VirtualHost {{ internal_ip }}:5000>
- WSGIDaemonProcess keystone-public processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
+ WSGIDaemonProcess keystone-public processes=4 threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-public
WSGIScriptAlias / /usr/bin/keystone-wsgi-public
WSGIApplicationGroup %{GLOBAL}
@@ -23,7 +27,7 @@
</VirtualHost>
<VirtualHost {{ internal_ip }}:35357>
- WSGIDaemonProcess keystone-admin processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
+ WSGIDaemonProcess keystone-admin processes=4 threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-admin
WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
WSGIApplicationGroup %{GLOBAL}
diff --git a/deploy/adapters/ansible/roles/keystone/vars/Debian.yml b/deploy/adapters/ansible/roles/keystone/vars/Debian.yml
index b8d8e7c2..89bfbe0a 100644
--- a/deploy/adapters/ansible/roles/keystone/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/keystone/vars/Debian.yml
@@ -11,8 +11,11 @@
cron_path: "/var/spool/cron/crontabs"
packages:
- - keystone
+ - apache2
+ - libapache2-mod-wsgi
+ - python-keystone
- python-openstackclient
+ - keystone
services:
- apache2
diff --git a/deploy/adapters/ansible/roles/keystone/vars/main.yml b/deploy/adapters/ansible/roles/keystone/vars/main.yml
index 655cd98d..ecaf7b51 100644
--- a/deploy/adapters/ansible/roles/keystone/vars/main.yml
+++ b/deploy/adapters/ansible/roles/keystone/vars/main.yml
@@ -9,6 +9,7 @@
---
packages_noarch:
- python-keystoneclient
+ - python3-keystoneclient
services_noarch: []
os_services:
@@ -16,9 +17,9 @@ os_services:
type: identity
region: RegionOne
description: "OpenStack Identity"
- publicurl: "http://{{ public_vip.ip }}:5000/v2.0"
- internalurl: "http://{{ internal_vip.ip }}:5000/v2.0"
- adminurl: "http://{{ internal_vip.ip }}:35357/v2.0"
+ publicurl: "http://{{ public_vip.ip }}:5000/v3"
+ internalurl: "http://{{ internal_vip.ip }}:5000/v3"
+ adminurl: "http://{{ internal_vip.ip }}:35357/v3"
- name: glance
type: image
@@ -32,9 +33,9 @@ os_services:
type: compute
region: RegionOne
description: "OpenStack Compute"
- publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
+ publicurl: "http://{{ public_vip.ip }}:8774/v2.1/%\\(tenant_id\\)s"
+ internalurl: "http://{{ internal_vip.ip }}:8774/v2.1/%\\(tenant_id\\)s"
+ adminurl: "http://{{ internal_vip.ip }}:8774/v2.1/%\\(tenant_id\\)s"
- name: neutron
type: network
@@ -52,29 +53,37 @@ os_services:
internalurl: "http://{{ internal_vip.ip }}:8777"
adminurl: "http://{{ internal_vip.ip }}:8777"
+ - name: aodh
+ type: alarming
+ region: RegionOne
+ description: "OpenStack Telemetry"
+ publicurl: "http://{{ public_vip.ip }}:8042"
+ internalurl: "http://{{ internal_vip.ip }}:8042"
+ adminurl: "http://{{ internal_vip.ip }}:8042"
+
- name: cinder
type: volume
region: RegionOne
description: "OpenStack Block Storage"
- publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
+ publicurl: "http://{{ public_vip.ip }}:8776/v1/%\\(tenant_id\\)s"
+ internalurl: "http://{{ internal_vip.ip }}:8776/v1/%\\(tenant_id\\)s"
+ adminurl: "http://{{ internal_vip.ip }}:8776/v1/%\\(tenant_id\\)s"
- name: cinderv2
type: volumev2
region: RegionOne
description: "OpenStack Block Storage v2"
- publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
+ publicurl: "http://{{ public_vip.ip }}:8776/v2/%\\(tenant_id\\)s"
+ internalurl: "http://{{ internal_vip.ip }}:8776/v2/%\\(tenant_id\\)s"
+ adminurl: "http://{{ internal_vip.ip }}:8776/v2/%\\(tenant_id\\)s"
- name: heat
type: orchestration
region: RegionOne
description: "OpenStack Orchestration"
- publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
+ publicurl: "http://{{ public_vip.ip }}:8004/v1/%\\(tenant_id\\)s"
+ internalurl: "http://{{ internal_vip.ip }}:8004/v1/%\\(tenant_id\\)s"
+ adminurl: "http://{{ internal_vip.ip }}:8004/v1/%\\(tenant_id\\)s"
- name: heat-cfn
type: cloudformation
@@ -84,6 +93,22 @@ os_services:
internalurl: "http://{{ internal_vip.ip }}:8000/v1"
adminurl: "http://{{ internal_vip.ip }}:8000/v1"
+ - name: congress
+ type: policy
+ region: RegionOne
+ description: "OpenStack Policy Service"
+ publicurl: "http://{{ public_vip.ip }}:1789"
+ internalurl: "http://{{ internal_vip.ip }}:1789"
+ adminurl: "http://{{ internal_vip.ip }}:1789"
+
+# - name: swift
+# type: object-store
+# region: RegionOne
+# description: "OpenStack Object Storage"
+# publicurl: "http://{{ public_vip.ip }}:8080/v1/AUTH_%\\(tenant_id\\)s"
+# internalurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%\\(tenant_id\\)s"
+# adminurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%\\(tenant_id\\)s"
+
os_users:
- user: admin
password: "{{ ADMIN_PASS }}"
@@ -134,6 +159,13 @@ os_users:
tenant: service
tenant_description: "Service Tenant"
+ - user: aodh
+ password: "{{ AODH_PASS }}"
+ email: aodh@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
- user: heat
password: "{{ HEAT_PASS }}"
email: heat@admin.com
@@ -141,9 +173,23 @@ os_users:
tenant: service
tenant_description: "Service Tenant"
+ - user: congress
+ password: "{{ CONGRESS_PASS }}"
+ email: congress@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
- user: demo
password: "{{ DEMO_PASS }}"
email: heat@demo.com
role: heat_stack_user
tenant: demo
tenant_description: "Demo Tenant"
+
+# - user: swift
+# password: "{{ CINDER_PASS }}"
+# email: swift@admin.com
+# role: admin
+# tenant: service
+# tenant_description: "Service Tenant"
diff --git a/deploy/adapters/ansible/roles/moon/files/controllers.py b/deploy/adapters/ansible/roles/moon/files/controllers.py
new file mode 100644
index 00000000..fd107a5e
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/files/controllers.py
@@ -0,0 +1,1062 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the
+# 'Apache-2.0' license which can be found in the file 'LICENSE' in this
+# package distribution or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+from keystone.common import controller
+from keystone import config
+from keystone import exception
+from keystone.models import token_model
+from keystone.contrib.moon.exception import * # noqa: F403
+from oslo_log import log
+from uuid import uuid4
+import requests
+
+
+CONF = config.CONF
+LOG = log.getLogger(__name__)
+
+
+@dependency.requires('configuration_api') # noqa: 405
+class Configuration(controller.V3Controller):
+ collection_name = 'configurations'
+ member_name = 'configuration'
+
+ def __init__(self):
+ super(Configuration, self).__init__()
+
+ def _get_user_id_from_token(self, token_id):
+ response = self.token_provider_api.validate_token(token_id)
+ token_ref = token_model.KeystoneToken(
+ token_id=token_id, token_data=response)
+ return token_ref.get('user')
+
+ @controller.protected()
+ def get_policy_templates(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ return self.configuration_api.get_policy_templates_dict(user_id)
+
+ @controller.protected()
+ def get_aggregation_algorithms(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ return self.configuration_api.get_aggregation_algorithms_dict(user_id)
+
+ @controller.protected()
+ def get_sub_meta_rule_algorithms(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ return self.configuration_api.get_sub_meta_rule_algorithms_dict(
+ user_id)
+
+
+@dependency.requires('tenant_api', 'resource_api') # noqa: 405
+class Tenants(controller.V3Controller):
+
+ def __init__(self):
+ super(Tenants, self).__init__()
+
+ def _get_user_id_from_token(self, token_id):
+ response = self.token_provider_api.validate_token(token_id)
+ token_ref = token_model.KeystoneToken(
+ token_id=token_id, token_data=response)
+ return token_ref.get('user')
+
+ @controller.protected()
+ def get_tenants(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ return self.tenant_api.get_tenants_dict(user_id)
+
+ def __get_keystone_tenant_dict(
+ self, tenant_id="", tenant_name="", tenant_description="", domain="default"): # noqa
+ tenants = self.resource_api.list_projects()
+ for tenant in tenants:
+ if tenant_id and tenant_id == tenant['id']:
+ return tenant
+ if tenant_name and tenant_name == tenant['name']:
+ return tenant
+ if not tenant_id:
+ tenant_id = uuid4().hex
+ if not tenant_name:
+ tenant_name = tenant_id
+ tenant = {
+ "id": tenant_id,
+ "name": tenant_name,
+ "description": tenant_description,
+ "enabled": True,
+ "domain_id": domain
+ }
+ keystone_tenant = self.resource_api.create_project(
+ tenant["id"], tenant)
+ return keystone_tenant
+
+ @controller.protected()
+ def add_tenant(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ k_tenant_dict = self.__get_keystone_tenant_dict(
+ tenant_name=kw.get('tenant_name'),
+ tenant_description=kw.get(
+ 'tenant_description', kw.get('tenant_name')),
+ domain=kw.get('tenant_domain', "default"),
+
+ )
+ tenant_dict = dict()
+ tenant_dict['id'] = k_tenant_dict['id']
+ tenant_dict['name'] = kw.get('tenant_name', None)
+ tenant_dict['description'] = kw.get('tenant_description', None)
+ tenant_dict['intra_authz_extension_id'] = kw.get(
+ 'tenant_intra_authz_extension_id', None)
+ tenant_dict['intra_admin_extension_id'] = kw.get(
+ 'tenant_intra_admin_extension_id', None)
+ return self.tenant_api.add_tenant_dict(
+ user_id, tenant_dict['id'], tenant_dict)
+
+ @controller.protected()
+ def get_tenant(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ tenant_id = kw.get('tenant_id', None)
+ return self.tenant_api.get_tenant_dict(user_id, tenant_id)
+
+ @controller.protected()
+ def del_tenant(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ tenant_id = kw.get('tenant_id', None)
+ return self.tenant_api.del_tenant(user_id, tenant_id)
+
+ @controller.protected()
+ def set_tenant(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ # Next line will raise an error if tenant doesn't exist
+ k_tenant_dict = self.resource_api.get_project(
+ kw.get('tenant_id', None))
+ tenant_id = kw.get('tenant_id', None)
+ tenant_dict = dict()
+ tenant_dict['name'] = k_tenant_dict.get('name', None)
+ if 'tenant_description' in kw:
+ tenant_dict['description'] = kw.get('tenant_description', None)
+ if 'tenant_intra_authz_extension_id' in kw:
+ tenant_dict['intra_authz_extension_id'] = kw.get(
+ 'tenant_intra_authz_extension_id', None)
+ if 'tenant_intra_admin_extension_id' in kw:
+ tenant_dict['intra_admin_extension_id'] = kw.get(
+ 'tenant_intra_admin_extension_id', None)
+ self.tenant_api.set_tenant_dict(user_id, tenant_id, tenant_dict)
+
+
+def callback(self, context, prep_info, *args, **kwargs):
+ token_ref = ""
+ if context.get('token_id') is not None:
+ token_ref = token_model.KeystoneToken(
+ token_id=context['token_id'],
+ token_data=self.token_provider_api.validate_token(
+ context['token_id']))
+ if not token_ref:
+ raise exception.Unauthorized
+
+
+@dependency.requires('authz_api') # noqa: 405
+class Authz_v3(controller.V3Controller):
+
+ def __init__(self):
+ super(Authz_v3, self).__init__()
+
+ @controller.protected(callback)
+ def get_authz(self, context, tenant_id, subject_k_id,
+ object_name, action_name):
+ try:
+ return self.authz_api.authz(
+ tenant_id, subject_k_id, object_name, action_name)
+ except Exception as e:
+ return {'authz': False, 'comment': unicode(e)}
+
+
+@dependency.requires('admin_api', 'root_api') # noqa: 405
+class IntraExtensions(controller.V3Controller):
+ collection_name = 'intra_extensions'
+ member_name = 'intra_extension'
+
+ def __init__(self):
+ super(IntraExtensions, self).__init__()
+
+ def _get_user_id_from_token(self, token_id):
+ response = self.token_provider_api.validate_token(token_id)
+ token_ref = token_model.KeystoneToken(
+ token_id=token_id, token_data=response)
+ return token_ref.get('user')['id']
+
+ # IntraExtension functions
+ @controller.protected()
+ def get_intra_extensions(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ return self.admin_api.get_intra_extensions_dict(user_id)
+
+ @controller.protected()
+ def add_intra_extension(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_dict = dict()
+ intra_extension_dict['name'] = kw.get('intra_extension_name', None)
+ intra_extension_dict['model'] = kw.get('intra_extension_model', None)
+ intra_extension_dict['genre'] = kw.get('intra_extension_genre', None)
+ intra_extension_dict['description'] = kw.get(
+ 'intra_extension_description', None)
+ intra_extension_dict['subject_categories'] = kw.get(
+ 'intra_extension_subject_categories', dict())
+ intra_extension_dict['object_categories'] = kw.get(
+ 'intra_extension_object_categories', dict())
+ intra_extension_dict['action_categories'] = kw.get(
+ 'intra_extension_action_categories', dict())
+ intra_extension_dict['subjects'] = kw.get(
+ 'intra_extension_subjects', dict())
+ intra_extension_dict['objects'] = kw.get(
+ 'intra_extension_objects', dict())
+ intra_extension_dict['actions'] = kw.get(
+ 'intra_extension_actions', dict())
+ intra_extension_dict['subject_scopes'] = kw.get(
+ 'intra_extension_subject_scopes', dict())
+ intra_extension_dict['object_scopes'] = kw.get(
+ 'intra_extension_object_scopes', dict())
+ intra_extension_dict['action_scopes'] = kw.get(
+ 'intra_extension_action_scopes', dict())
+ intra_extension_dict['subject_assignments'] = kw.get(
+ 'intra_extension_subject_assignments', dict())
+ intra_extension_dict['object_assignments'] = kw.get(
+ 'intra_extension_object_assignments', dict())
+ intra_extension_dict['action_assignments'] = kw.get(
+ 'intra_extension_action_assignments', dict())
+ intra_extension_dict['aggregation_algorithm'] = kw.get(
+ 'intra_extension_aggregation_algorithm', dict())
+ intra_extension_dict['sub_meta_rules'] = kw.get(
+ 'intra_extension_sub_meta_rules', dict())
+ intra_extension_dict['rules'] = kw.get('intra_extension_rules', dict())
+ ref = self.admin_api.load_intra_extension_dict(
+ user_id, intra_extension_dict=intra_extension_dict)
+ return self.admin_api.populate_default_data(ref)
+
+ @controller.protected()
+ def get_intra_extension(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ return self.admin_api.get_intra_extension_dict(
+ user_id, intra_extension_id)
+
+ @controller.protected()
+ def del_intra_extension(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ self.admin_api.del_intra_extension(user_id, intra_extension_id)
+
+ @controller.protected()
+ def set_intra_extension(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ intra_extension_dict = dict()
+ intra_extension_dict['name'] = kw.get('intra_extension_name', None)
+ intra_extension_dict['model'] = kw.get('intra_extension_model', None)
+ intra_extension_dict['genre'] = kw.get('intra_extension_genre', None)
+ intra_extension_dict['description'] = kw.get(
+ 'intra_extension_description', None)
+ return self.admin_api.set_intra_extension_dict(
+ user_id, intra_extension_id, intra_extension_dict)
+
+ @controller.protected()
+ def load_root_intra_extension(self, context, **kw):
+ self.root_api.load_root_intra_extension_dict()
+
+ # Metadata functions
+ @controller.protected()
+ def get_subject_categories(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ return self.admin_api.get_subject_categories_dict(
+ user_id, intra_extension_id)
+
+ @controller.protected()
+ def add_subject_category(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_category_dict = dict()
+ subject_category_dict['name'] = kw.get('subject_category_name', None)
+ subject_category_dict['description'] = kw.get(
+ 'subject_category_description', None)
+ return self.admin_api.add_subject_category_dict(
+ user_id, intra_extension_id, subject_category_dict)
+
+ @controller.protected()
+ def get_subject_category(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_category_id = kw.get('subject_category_id', None)
+ return self.admin_api.get_subject_category_dict(
+ user_id, intra_extension_id, subject_category_id)
+
+ @controller.protected()
+ def del_subject_category(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_category_id = kw.get('subject_category_id', None)
+ self.admin_api.del_subject_category(
+ user_id, intra_extension_id, subject_category_id)
+
+ @controller.protected()
+ def set_subject_category(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_category_id = kw.get('subject_category_id', None)
+ subject_category_dict = dict()
+ subject_category_dict['name'] = kw.get('subject_category_name', None)
+ subject_category_dict['description'] = kw.get(
+ 'subject_category_description', None)
+ return self.admin_api.set_subject_category_dict(
+ user_id, intra_extension_id, subject_category_id, subject_category_dict) # noqa
+
+ @controller.protected()
+ def get_object_categories(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ return self.admin_api.get_object_categories_dict(
+ user_id, intra_extension_id)
+
+ @controller.protected()
+ def add_object_category(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_category_dict = dict()
+ object_category_dict['name'] = kw.get('object_category_name', None)
+ object_category_dict['description'] = kw.get(
+ 'object_category_description', None)
+ return self.admin_api.add_object_category_dict(
+ user_id, intra_extension_id, object_category_dict)
+
+ @controller.protected()
+ def get_object_category(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_category_id = kw.get('object_category_id', None)
+ return self.admin_api.get_object_categories_dict(
+ user_id, intra_extension_id, object_category_id)
+
+ @controller.protected()
+ def del_object_category(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_category_id = kw.get('object_category_id', None)
+ self.admin_api.del_object_category(
+ user_id, intra_extension_id, object_category_id)
+
+ @controller.protected()
+ def set_object_category(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_category_id = kw.get('object_category_id', None)
+ object_category_dict = dict()
+ object_category_dict['name'] = kw.get('object_category_name', None)
+ object_category_dict['description'] = kw.get(
+ 'object_category_description', None)
+ return self.admin_api.set_object_category_dict(
+ user_id, intra_extension_id, object_category_id, object_category_dict) # noqa
+
+ @controller.protected()
+ def get_action_categories(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ return self.admin_api.get_action_categories_dict(
+ user_id, intra_extension_id)
+
+ @controller.protected()
+ def add_action_category(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_category_dict = dict()
+ action_category_dict['name'] = kw.get('action_category_name', None)
+ action_category_dict['description'] = kw.get(
+ 'action_category_description', None)
+ return self.admin_api.add_action_category_dict(
+ user_id, intra_extension_id, action_category_dict)
+
+ @controller.protected()
+ def get_action_category(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_category_id = kw.get('action_category_id', None)
+ return self.admin_api.get_action_categories_dict(
+ user_id, intra_extension_id, action_category_id)
+
+ @controller.protected()
+ def del_action_category(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_category_id = kw.get('action_category_id', None)
+ self.admin_api.del_action_category(
+ user_id, intra_extension_id, action_category_id)
+
+ @controller.protected()
+ def set_action_category(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_category_id = kw.get('action_category_id', None)
+ action_category_dict = dict()
+ action_category_dict['name'] = kw.get('action_category_name', None)
+ action_category_dict['description'] = kw.get(
+ 'action_category_description', None)
+ return self.admin_api.set_action_category_dict(
+ user_id, intra_extension_id, action_category_id, action_category_dict) # noqa
+
+ # Perimeter functions
+ @controller.protected()
+ def get_subjects(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ return self.admin_api.get_subjects_dict(user_id, intra_extension_id)
+
+ @controller.protected()
+ def add_subject(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_dict = dict()
+ subject_dict['name'] = kw.get('subject_name', None)
+ subject_dict['description'] = kw.get('subject_description', None)
+ subject_dict['password'] = kw.get('subject_password', None)
+ subject_dict['email'] = kw.get('subject_email', None)
+ return self.admin_api.add_subject_dict(
+ user_id, intra_extension_id, subject_dict)
+
+ @controller.protected()
+ def get_subject(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_id = kw.get('subject_id', None)
+ return self.admin_api.get_subject_dict(
+ user_id, intra_extension_id, subject_id)
+
+ @controller.protected()
+ def del_subject(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_id = kw.get('subject_id', None)
+ self.admin_api.del_subject(user_id, intra_extension_id, subject_id)
+
+ @controller.protected()
+ def set_subject(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_id = kw.get('subject_id', None)
+ subject_dict = dict()
+ subject_dict['name'] = kw.get('subject_name', None)
+ subject_dict['description'] = kw.get('subject_description', None)
+ return self.admin_api.set_subject_dict(
+ user_id, intra_extension_id, subject_id, subject_dict)
+
+ @controller.protected()
+ def get_objects(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ return self.admin_api.get_objects_dict(user_id, intra_extension_id)
+
+ @controller.protected()
+ def add_object(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_dict = dict()
+ object_dict['name'] = kw.get('object_name', None)
+ object_dict['description'] = kw.get('object_description', None)
+ return self.admin_api.add_object_dict(
+ user_id, intra_extension_id, object_dict)
+
+ @controller.protected()
+ def get_object(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_id = kw.get('object_id', None)
+ return self.admin_api.get_object_dict(
+ user_id, intra_extension_id, object_id)
+
+ @controller.protected()
+ def del_object(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_id = kw.get('object_id', None)
+ self.admin_api.del_object(user_id, intra_extension_id, object_id)
+
+ @controller.protected()
+ def set_object(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_id = kw.get('object_id', None)
+ object_dict = dict()
+ object_dict['name'] = kw.get('object_name', None)
+ object_dict['description'] = kw.get('object_description', None)
+ return self.admin_api.set_object_dict(
+ user_id, intra_extension_id, object_id, object_dict)
+
+ @controller.protected()
+ def get_actions(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ return self.admin_api.get_actions_dict(user_id, intra_extension_id)
+
+ @controller.protected()
+ def add_action(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_dict = dict()
+ action_dict['name'] = kw.get('action_name', None)
+ action_dict['description'] = kw.get('action_description', None)
+ return self.admin_api.add_action_dict(
+ user_id, intra_extension_id, action_dict)
+
+ @controller.protected()
+ def get_action(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_id = kw.get('action_id', None)
+ return self.admin_api.get_action_dict(
+ user_id, intra_extension_id, action_id)
+
+ @controller.protected()
+ def del_action(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_id = kw.get('action_id', None)
+ self.admin_api.del_action(user_id, intra_extension_id, action_id)
+
+ @controller.protected()
+ def set_action(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_id = kw.get('action_id', None)
+ action_dict = dict()
+ action_dict['name'] = kw.get('action_name', None)
+ action_dict['description'] = kw.get('action_description', None)
+ return self.admin_api.set_action_dict(
+ user_id, intra_extension_id, action_id, action_dict)
+
+ # Scope functions
+ @controller.protected()
+ def get_subject_scopes(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_category_id = kw.get('subject_category_id', None)
+ return self.admin_api.get_subject_scopes_dict(
+ user_id, intra_extension_id, subject_category_id)
+
+ @controller.protected()
+ def add_subject_scope(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_category_id = kw.get('subject_category_id', None)
+ subject_scope_dict = dict()
+ subject_scope_dict['name'] = kw.get('subject_scope_name', None)
+ subject_scope_dict['description'] = kw.get(
+ 'subject_scope_description', None)
+ return self.admin_api.add_subject_scope_dict(
+ user_id, intra_extension_id, subject_category_id, subject_scope_dict) # noqa
+
+ @controller.protected()
+ def get_subject_scope(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_category_id = kw.get('subject_category_id', None)
+ subject_scope_id = kw.get('subject_scope_id', None)
+ return self.admin_api.get_subject_scope_dict(
+ user_id, intra_extension_id, subject_category_id, subject_scope_id)
+
+ @controller.protected()
+ def del_subject_scope(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_category_id = kw.get('subject_category_id', None)
+ subject_scope_id = kw.get('subject_scope_id', None)
+ self.admin_api.del_subject_scope(
+ user_id,
+ intra_extension_id,
+ subject_category_id,
+ subject_scope_id)
+
+ @controller.protected()
+ def set_subject_scope(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_category_id = kw.get('subject_category_id', None)
+ subject_scope_id = kw.get('subject_scope_id', None)
+ subject_scope_dict = dict()
+ subject_scope_dict['name'] = kw.get('subject_scope_name', None)
+ subject_scope_dict['description'] = kw.get(
+ 'subject_scope_description', None)
+ return self.admin_api.set_subject_scope_dict(
+ user_id, intra_extension_id, subject_category_id, subject_scope_id, subject_scope_dict) # noqa
+
+ @controller.protected()
+ def get_object_scopes(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_category_id = kw.get('object_category_id', None)
+ return self.admin_api.get_object_scopes_dict(
+ user_id, intra_extension_id, object_category_id)
+
+ @controller.protected()
+ def add_object_scope(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_category_id = kw.get('object_category_id', None)
+ object_scope_dict = dict()
+ object_scope_dict['name'] = kw.get('object_scope_name', None)
+ object_scope_dict['description'] = kw.get(
+ 'object_scope_description', None)
+ return self.admin_api.add_object_scope_dict(
+ user_id, intra_extension_id, object_category_id, object_scope_dict)
+
+ @controller.protected()
+ def get_object_scope(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_category_id = kw.get('object_category_id', None)
+ object_scope_id = kw.get('object_scope_id', None)
+ return self.admin_api.get_object_scope_dict(
+ user_id, intra_extension_id, object_category_id, object_scope_id)
+
+ @controller.protected()
+ def del_object_scope(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_category_id = kw.get('object_category_id', None)
+ object_scope_id = kw.get('object_scope_id', None)
+ self.admin_api.del_object_scope(
+ user_id,
+ intra_extension_id,
+ object_category_id,
+ object_scope_id)
+
+ @controller.protected()
+ def set_object_scope(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_category_id = kw.get('object_category_id', None)
+ object_scope_id = kw.get('object_scope_id', None)
+ object_scope_dict = dict()
+ object_scope_dict['name'] = kw.get('object_scope_name', None)
+ object_scope_dict['description'] = kw.get(
+ 'object_scope_description', None)
+ return self.admin_api.set_object_scope_dict(
+ user_id, intra_extension_id, object_category_id, object_scope_id, object_scope_dict) # noqa
+
+ @controller.protected()
+ def get_action_scopes(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_category_id = kw.get('action_category_id', None)
+ return self.admin_api.get_action_scopes_dict(
+ user_id, intra_extension_id, action_category_id)
+
+ @controller.protected()
+ def add_action_scope(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_category_id = kw.get('action_category_id', None)
+ action_scope_dict = dict()
+ action_scope_dict['name'] = kw.get('action_scope_name', None)
+ action_scope_dict['description'] = kw.get(
+ 'action_scope_description', None)
+ return self.admin_api.add_action_scope_dict(
+ user_id, intra_extension_id, action_category_id, action_scope_dict)
+
+ @controller.protected()
+ def get_action_scope(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_category_id = kw.get('action_category_id', None)
+ action_scope_id = kw.get('action_scope_id', None)
+ return self.admin_api.get_action_scope_dict(
+ user_id, intra_extension_id, action_category_id, action_scope_id)
+
+ @controller.protected()
+ def del_action_scope(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_category_id = kw.get('action_category_id', None)
+ action_scope_id = kw.get('action_scope_id', None)
+ self.admin_api.del_action_scope(
+ user_id,
+ intra_extension_id,
+ action_category_id,
+ action_scope_id)
+
+ @controller.protected()
+ def set_action_scope(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_category_id = kw.get('action_category_id', None)
+ action_scope_id = kw.get('action_scope_id', None)
+ action_scope_dict = dict()
+ action_scope_dict['name'] = kw.get('action_scope_name', None)
+ action_scope_dict['description'] = kw.get(
+ 'action_scope_description', None)
+ return self.admin_api.set_action_scope_dict(
+ user_id, intra_extension_id, action_category_id, action_scope_id, action_scope_dict) # noqa
+
+ # Assignment functions
+
+ @controller.protected()
+ def add_subject_assignment(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_id = kw.get('subject_id', None)
+ subject_category_id = kw.get('subject_category_id', None)
+ subject_scope_id = kw.get('subject_scope_id', None)
+ return self.admin_api.add_subject_assignment_list(
+ user_id, intra_extension_id, subject_id, subject_category_id, subject_scope_id) # noqa
+
+ @controller.protected()
+ def get_subject_assignment(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_id = kw.get('subject_id', None)
+ subject_category_id = kw.get('subject_category_id', None)
+ return self.admin_api.get_subject_assignment_list(
+ user_id, intra_extension_id, subject_id, subject_category_id)
+
+ @controller.protected()
+ def del_subject_assignment(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ subject_id = kw.get('subject_id', None)
+ subject_category_id = kw.get('subject_category_id', None)
+ subject_scope_id = kw.get('subject_scope_id', None)
+ self.admin_api.del_subject_assignment(
+ user_id,
+ intra_extension_id,
+ subject_id,
+ subject_category_id,
+ subject_scope_id)
+
+ @controller.protected()
+ def add_object_assignment(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_id = kw.get('object_id', None)
+ object_category_id = kw.get('object_category_id', None)
+ object_scope_id = kw.get('object_scope_id', None)
+ return self.admin_api.add_object_assignment_list(
+ user_id, intra_extension_id, object_id, object_category_id, object_scope_id) # noqa
+
+ @controller.protected()
+ def get_object_assignment(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_id = kw.get('object_id', None)
+ object_category_id = kw.get('object_category_id', None)
+ return self.admin_api.get_object_assignment_list(
+ user_id, intra_extension_id, object_id, object_category_id)
+
+ @controller.protected()
+ def del_object_assignment(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ object_id = kw.get('object_id', None)
+ object_category_id = kw.get('object_category_id', None)
+ object_scope_id = kw.get('object_scope_id', None)
+ self.admin_api.del_object_assignment(
+ user_id,
+ intra_extension_id,
+ object_id,
+ object_category_id,
+ object_scope_id)
+
+ @controller.protected()
+ def add_action_assignment(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_id = kw.get('action_id', None)
+ action_category_id = kw.get('action_category_id', None)
+ action_scope_id = kw.get('action_scope_id', None)
+ return self.admin_api.add_action_assignment_list(
+ user_id, intra_extension_id, action_id, action_category_id, action_scope_id) # noqa
+
+ @controller.protected()
+ def get_action_assignment(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_id = kw.get('action_id', None)
+ action_category_id = kw.get('action_category_id', None)
+ return self.admin_api.get_action_assignment_list(
+ user_id, intra_extension_id, action_id, action_category_id)
+
+ @controller.protected()
+ def del_action_assignment(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ action_id = kw.get('action_id', None)
+ action_category_id = kw.get('action_category_id', None)
+ action_scope_id = kw.get('action_scope_id', None)
+ self.admin_api.del_action_assignment(
+ user_id,
+ intra_extension_id,
+ action_id,
+ action_category_id,
+ action_scope_id)
+
+ # Metarule functions
+
+ @controller.protected()
+ def get_aggregation_algorithm(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ return self.admin_api.get_aggregation_algorithm_id(
+ user_id, intra_extension_id)
+
+ @controller.protected()
+ def set_aggregation_algorithm(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ aggregation_algorithm_id = kw.get('aggregation_algorithm_id', None)
+ return self.admin_api.set_aggregation_algorithm_id(
+ user_id, intra_extension_id, aggregation_algorithm_id)
+
+ @controller.protected()
+ def get_sub_meta_rules(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ return self.admin_api.get_sub_meta_rules_dict(
+ user_id, intra_extension_id)
+
+ @controller.protected()
+ def add_sub_meta_rule(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ sub_meta_rule_dict = dict()
+ sub_meta_rule_dict['name'] = kw.get('sub_meta_rule_name', None)
+ sub_meta_rule_dict['algorithm'] = kw.get(
+ 'sub_meta_rule_algorithm', None)
+ sub_meta_rule_dict['subject_categories'] = kw.get(
+ 'sub_meta_rule_subject_categories', None)
+ sub_meta_rule_dict['object_categories'] = kw.get(
+ 'sub_meta_rule_object_categories', None)
+ sub_meta_rule_dict['action_categories'] = kw.get(
+ 'sub_meta_rule_action_categories', None)
+ return self.admin_api.add_sub_meta_rule_dict(
+ user_id, intra_extension_id, sub_meta_rule_dict)
+
+ @controller.protected()
+ def get_sub_meta_rule(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
+ return self.admin_api.get_sub_meta_rule_dict(
+ user_id, intra_extension_id, sub_meta_rule_id)
+
+ @controller.protected()
+ def del_sub_meta_rule(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
+ self.admin_api.del_sub_meta_rule(
+ user_id, intra_extension_id, sub_meta_rule_id)
+
+ @controller.protected()
+ def set_sub_meta_rule(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
+ sub_meta_rule_dict = dict()
+ sub_meta_rule_dict['name'] = kw.get('sub_meta_rule_name', None)
+ sub_meta_rule_dict['algorithm'] = kw.get(
+ 'sub_meta_rule_algorithm', None)
+ sub_meta_rule_dict['subject_categories'] = kw.get(
+ 'sub_meta_rule_subject_categories', None)
+ sub_meta_rule_dict['object_categories'] = kw.get(
+ 'sub_meta_rule_object_categories', None)
+ sub_meta_rule_dict['action_categories'] = kw.get(
+ 'sub_meta_rule_action_categories', None)
+ return self.admin_api.set_sub_meta_rule_dict(
+ user_id, intra_extension_id, sub_meta_rule_id, sub_meta_rule_dict)
+
+ # Rules functions
+ @controller.protected()
+ def get_rules(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
+ return self.admin_api.get_rules_dict(
+ user_id, intra_extension_id, sub_meta_rule_id)
+
+ @controller.protected()
+ def add_rule(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
+ subject_category_list = kw.get('subject_categories', [])
+ object_category_list = kw.get('object_categories', [])
+ action_category_list = kw.get('action_categories', [])
+ enabled_bool = kw.get('enabled', True)
+ rule_list = subject_category_list + action_category_list + \
+ object_category_list + [enabled_bool, ]
+ return self.admin_api.add_rule_dict(
+ user_id, intra_extension_id, sub_meta_rule_id, rule_list)
+
+ @controller.protected()
+ def get_rule(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
+ rule_id = kw.get('rule_id', None)
+ return self.admin_api.get_rule_dict(
+ user_id, intra_extension_id, sub_meta_rule_id, rule_id)
+
+ @controller.protected()
+ def del_rule(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
+ rule_id = kw.get('rule_id', None)
+ self.admin_api.del_rule(
+ user_id,
+ intra_extension_id,
+ sub_meta_rule_id,
+ rule_id)
+
+ @controller.protected()
+ def set_rule(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ intra_extension_id = kw.get('intra_extension_id', None)
+ sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
+ rule_id = kw.get('rule_id', None)
+ rule_list = list()
+ subject_category_list = kw.get('subject_categories', [])
+ object_category_list = kw.get('object_categories', [])
+ action_category_list = kw.get('action_categories', [])
+ rule_list = subject_category_list + action_category_list + object_category_list # noqa
+ return self.admin_api.set_rule_dict(
+ user_id, intra_extension_id, sub_meta_rule_id, rule_id, rule_list)
+
+
+@dependency.requires('authz_api') # noqa: 405
+class InterExtensions(controller.V3Controller):
+
+ def __init__(self):
+ super(InterExtensions, self).__init__()
+
+ def _get_user_from_token(self, token_id):
+ response = self.token_provider_api.validate_token(token_id)
+ token_ref = token_model.KeystoneToken(
+ token_id=token_id, token_data=response)
+ return token_ref['user']
+
+ # @controller.protected()
+ # def get_inter_extensions(self, context, **kw):
+ # user = self._get_user_from_token(context.get('token_id'))
+ # return {
+ # 'inter_extensions':
+ # self.interextension_api.get_inter_extensions()
+ # }
+
+ # @controller.protected()
+ # def get_inter_extension(self, context, **kw):
+ # user = self._get_user_from_token(context.get('token_id'))
+ # return {
+ # 'inter_extensions':
+ # self.interextension_api.get_inter_extension(uuid=kw['inter_extension_id'])
+ # }
+
+ # @controller.protected()
+ # def create_inter_extension(self, context, **kw):
+ # user = self._get_user_from_token(context.get('token_id'))
+ # return self.interextension_api.create_inter_extension(kw)
+
+ # @controller.protected()
+ # def delete_inter_extension(self, context, **kw):
+ # user = self._get_user_from_token(context.get('token_id'))
+ # if 'inter_extension_id' not in kw:
+ # raise exception.Error
+ # return
+ # self.interextension_api.delete_inter_extension(kw['inter_extension_id'])
+
+
+@dependency.requires('moonlog_api', 'authz_api') # noqa: 405
+class Logs(controller.V3Controller):
+
+ def __init__(self):
+ super(Logs, self).__init__()
+
+ def _get_user_id_from_token(self, token_id):
+ response = self.token_provider_api.validate_token(token_id)
+ token_ref = token_model.KeystoneToken(
+ token_id=token_id, token_data=response)
+ return token_ref['user']
+
+ @controller.protected()
+ def get_logs(self, context, **kw):
+ user_id = self._get_user_id_from_token(context.get('token_id'))
+ options = kw.get('options', '')
+ return self.moonlog_api.get_logs(user_id, options)
+
+
+@dependency.requires('identity_api', "token_provider_api", "resource_api") # noqa: 405
+class MoonAuth(controller.V3Controller):
+
+ def __init__(self):
+ super(MoonAuth, self).__init__()
+
+ def _get_project(self, uuid="", name=""):
+ projects = self.resource_api.list_projects()
+ for project in projects:
+ if uuid and uuid == project['id']:
+ return project
+ elif name and name == project['name']:
+ return project
+
+ def get_token(self, context, **kw):
+ data_auth = {
+ "auth": {
+ "identity": {
+ "methods": [
+ "password"
+ ],
+ "password": {
+ "user": {
+ "domain": {
+ "id": "Default"
+ },
+ "name": kw['username'],
+ "password": kw['password']
+ }
+ }
+ }
+ }
+ }
+
+ message = {}
+ if "project" in kw:
+ project = self._get_project(name=kw['project'])
+ if project:
+ data_auth["auth"]["scope"] = dict()
+ data_auth["auth"]["scope"]['project'] = dict()
+ data_auth["auth"]["scope"]['project']['id'] = project['id']
+ else:
+ message = {
+ "error": {
+ "message": "Unable to find project {}".format(kw['project']), # noqa
+ "code": 200,
+ "title": "UnScopedToken"
+ }}
+
+# req = requests.post("http://localhost:5000/v3/auth/tokens",
+# json=data_auth,
+# headers={"Content-Type": "application/json"}
+# )
+ req = requests.post("http://172.16.1.222:5000/v3/auth/tokens",
+ json=data_auth,
+ headers={"Content-Type": "application/json"}
+ )
+ if req.status_code not in (200, 201):
+ LOG.error(req.text)
+ else:
+ _token = req.headers['X-Subject-Token']
+ _data = req.json()
+ _result = {
+ "token": _token,
+ 'message': message
+ }
+ try:
+ _result["roles"] = map(
+ lambda x: x['name'], _data["token"]["roles"])
+ except KeyError:
+ pass
+ return _result
+ return {"token": None, 'message': req.json()}
diff --git a/deploy/adapters/ansible/roles/moon/files/deb.conf b/deploy/adapters/ansible/roles/moon/files/deb.conf
new file mode 100644
index 00000000..6e1159a1
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/files/deb.conf
@@ -0,0 +1,11 @@
+keystone/admin-password: password
+keystone/auth-token: password
+keystone/admin-password-confirm: password
+keystone/admin-email: root@localhost
+keystone/admin-role-name: admin
+keystone/admin-user: admin
+keystone/create-admin-tenant: false
+keystone/region-name: Orange
+keystone/admin-tenant-name: admin
+keystone/register-endpoint: false
+keystone/configure_db: false
diff --git a/deploy/adapters/ansible/roles/moon/files/get_deb_depends.py b/deploy/adapters/ansible/roles/moon/files/get_deb_depends.py
new file mode 100644
index 00000000..d510bcf4
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/files/get_deb_depends.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python3
+
+import sys
+import subprocess
+
+pkts = []
+
+for arg in sys.argv[1:]:
+ proc = subprocess.Popen(["dpkg-deb",
+ "--info",
+ arg],
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out = proc.stdout.read()
+ err = proc.stderr.read()
+ if err:
+ print("An error occurred with {} ({})".format(arg, err))
+ continue
+ for line in out.splitlines():
+ line = line.decode('utf-8')
+ if " Depends:" in line:
+ line = line.replace(" Depends:", "")
+ for _dep in line.split(','):
+ pkts.append(_dep.split()[0])
+
+print(" ".join(pkts))
diff --git a/deploy/adapters/ansible/roles/moon/handlers/main.yml b/deploy/adapters/ansible/roles/moon/handlers/main.yml
new file mode 100755
index 00000000..608a8a09
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/handlers/main.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: restart keystone services
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: services | union(services_noarch)
diff --git a/deploy/adapters/ansible/roles/moon/tasks/main.yml b/deploy/adapters/ansible/roles/moon/tasks/main.yml
new file mode 100644
index 00000000..a3511de7
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/tasks/main.yml
@@ -0,0 +1,11 @@
+#############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include: moon.yml
+ when: moon == "Enable"
diff --git a/deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml b/deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml
new file mode 100644
index 00000000..e4142b5f
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml
@@ -0,0 +1,20 @@
+#############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: update api-paste.ini
+ template: src=api-paste.ini dest=/etc/nova/api-paste.ini backup=yes
+
+- name: restart nova task
+ service: name={{ item }} state=restarted enabled=yes
+ with_items:
+ - nova-compute
+
+#- name: restart swift task
+# shell: swift-init all start
+# ignore_errors: True
diff --git a/deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml b/deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml
new file mode 100644
index 00000000..95dd2e89
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml
@@ -0,0 +1,238 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# install all packages
+- name: install keystone packages
+ shell: apt-get install -y python-pip unzip
+
+# download master.zip
+- name: get image http server
+ shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
+ register: http_server
+
+- name: download keystone-moon packages
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/moon/master.zip" dest=/tmp/master.zip mode=0444
+
+- name: extract keystone-moon packages
+ unarchive: src=/tmp/master.zip dest=/tmp copy=no
+
+# install all dependencies
+- name: copy scripts
+ copy: src=get_deb_depends.py dest=/tmp/get_deb_depends.py
+
+- name: install keystone-moon dependencies
+ shell: "apt-get install `python /tmp/get_deb_depends.py /tmp/moon-bin-master/*.deb`"
+ when: ansible_os_family == "Debian"
+
+- name: delete configuration file
+ shell: >
+ rm -f {{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf;
+ rm -f {{ apache_config_dir }}/sites-available/wsgi-keystone.conf;
+
+# install keystone moon
+- name: copy scripts
+ copy: src=deb.conf dest=/tmp/deb.conf
+
+- name: install keystone moon
+ shell: >
+ export DEBIAN_FRONTEND="noninteractive";
+ sudo -E dpkg -i /tmp/moon-bin-master/*moon*.deb;
+
+#- name: install keystone moon
+# shell: >
+# export DEBIAN_FRONTEND="noninteractive";
+# sudo -E debconf-set-selections python-keystone < /tmp/deb.conf;
+# sudo -E dpkg -i /tmp/moon-bin-master/*moon*.deb;
+
+- name: stop keystone task
+ shell: >
+ service keystone stop;
+ mv /etc/init.d/keystone /home/;
+ mv /etc/init/keystone.conf /home/;
+ mv /lib/systemd/system/keystone.service /home/;
+
+# config keystone and apache2
+- name: delete sqlite database
+ file:
+ path: /var/lib/keystone/keystone.db
+ state: absent
+
+#- name: update keystone conf
+# template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
+
+
+#- name: assure listen port exist
+# lineinfile:
+# dest: '{{ apache_config_dir }}/ports.conf'
+# regexp: '{{ item.regexp }}'
+# line: '{{ item.line}}'
+# with_items:
+# - regexp: "^Listen {{ internal_ip }}:5000"
+# line: "Listen {{ internal_ip }}:5000"
+# - regexp: "^Listen {{ internal_ip }}:35357"
+# line: "Listen {{ internal_ip }}:35357"
+
+- name: update apache2 configs
+ template:
+ src: wsgi-keystone.conf.j2
+ dest: '{{ apache_config_dir }}/sites-available/wsgi-keystone.conf'
+ when: ansible_os_family == 'Debian'
+
+- name: enable keystone server
+ file:
+ src: "{{ apache_config_dir }}/sites-available/wsgi-keystone.conf"
+ dest: "{{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf"
+ state: "link"
+ when: ansible_os_family == 'Debian'
+
+#- name: keystone source files
+# template: src={{ item }} dest=/opt/{{ item }}
+# with_items:
+# - admin-openrc.sh
+# - demo-openrc.sh
+
+# keystone paste ini
+- name: keystone paste ini 1
+ shell: sudo cp /etc/keystone/keystone-paste.ini /etc/keystone/keystone-paste.ini.bak;
+
+- name: keystone paste ini 2
+ shell: sudo sed "3i[pipeline:moon_pipeline]\npipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension moon_service\n\n[app:moon_service]\nuse = egg:keystone#moon_service\n" /etc/keystone/keystone-paste.ini > /tmp/keystone-paste.ini;
+
+- name: keystone paste ini 3
+ shell: sudo cp /tmp/keystone-paste.ini /etc/keystone/keystone-paste.ini;
+
+- name: keystone paste ini 4
+ shell: sudo sed "s/use = egg:Paste#urlmap/use = egg:Paste#urlmap\n\/moon = moon_pipeline/" /etc/keystone/keystone-paste.ini > /tmp/keystone-paste.ini;
+
+- name: keystone paste ini 5
+ shell: sudo cp /tmp/keystone-paste.ini /etc/keystone/keystone-paste.ini;
+
+# moon log
+- name: moon log
+ shell: >
+ sudo mkdir /var/log/moon/;
+ sudo chown keystone /var/log/moon/;
+ sudo addgroup moonlog;
+ sudo chgrp moonlog /var/log/moon/;
+ sudo touch /var/log/moon/keystonemiddleware.log;
+ sudo touch /var/log/moon/system.log;
+ sudo chgrp moonlog /var/log/moon/keystonemiddleware.log;
+ sudo chgrp moonlog /var/log/moon/system.log;
+ sudo chmod g+rw /var/log/moon;
+ sudo chmod g+rw /var/log/moon/keystonemiddleware.log;
+ sudo chmod g+rw /var/log/moon/system.log;
+ sudo adduser keystone moonlog;
+ # sudo adduser swift moonlog;
+ sudo adduser nova moonlog;
+
+
+# keystone db sync
+- name: keystone db sync
+ shell: >
+ sudo /usr/bin/keystone-manage db_sync;
+ sudo /usr/bin/keystone-manage db_sync --extension moon;
+ when: inventory_hostname == haproxy_hosts.keys()[0]
+
+
+#############################################
+- name: wait for keystone ready
+ wait_for: port=35357 delay=3 timeout=10 host={{ internal_ip }}
+
+#- name: cron job to purge expired tokens hourly
+# cron:
+# name: 'purge expired tokens'
+# special_time: hourly
+# job: '/usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1'
+
+#############################################
+# moon workaround
+- name: copy scripts
+ copy: src=controllers.py dest=/usr/lib/python2.7/dist-packages/keystone/contrib/moon/controllers.py
+
+# apache2 restart
+- name: restart apache2
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: services | union(services_noarch)
+
+# install moonclient
+- name: install moon client
+ shell: sudo pip install /tmp/moon-bin-master/python-moonclient-0.1.tar.gz
+
+###################################################
+
+
+#- name: add tenants
+# keystone_user:
+# token: "{{ ADMIN_TOKEN }}"
+# endpoint: "http://{{ internal_ip }}:35357/v2.0"
+# tenant: "{{ item.tenant }}"
+# tenant_description: "{{ item.tenant_description }}"
+# with_items: "{{ os_users }}"
+# when: inventory_hostname == groups['controller'][0]
+#
+#- name: add users
+# keystone_user:
+# token: "{{ ADMIN_TOKEN }}"
+# endpoint: "http://{{ internal_ip }}:35357/v2.0"
+# user: "{{ item.user }}"
+# tenant: "{{ item.tenant }}"
+# password: "{{ item.password }}"
+# email: "{{ item.email }}"
+# with_items: "{{ os_users }}"
+# when: inventory_hostname == groups['controller'][0]
+#
+#- name: grant roles
+# keystone_user:
+# token: "{{ ADMIN_TOKEN }}"
+# endpoint: "http://{{ internal_ip }}:35357/v2.0"
+# user: "{{ item.user }}"
+# role: "{{ item.role }}"
+# tenant: "{{ item.tenant }}"
+# with_items: "{{ os_users }}"
+# when: inventory_hostname == groups['controller'][0]
+#
+#- name: add endpoints
+# keystone_service:
+# token: "{{ ADMIN_TOKEN }}"
+# endpoint: "http://{{ internal_ip }}:35357/v2.0"
+# name: "{{ item.name }}"
+# type: "{{ item.type }}"
+# region: "{{ item.region}}"
+# description: "{{ item.description }}"
+# publicurl: "{{ item.publicurl }}"
+# internalurl: "{{ item.internalurl }}"
+# adminurl: "{{ item.adminurl }}"
+# with_items: "{{ os_services }}"
+# when: inventory_hostname == groups['controller'][0]
+
+
+###################################################
+
+- name: update api-paste.ini
+ template: src=api-paste.ini dest=/etc/nova/api-paste.ini backup=yes
+
+#- name: update proxy-server conf
+# template: src=proxy-server.conf dest=/etc/swift/proxy-server.conf backup=yes
+
+# restart nova
+- name: restart nova
+ service: name={{ item }} state=restarted enabled=yes
+ with_items:
+ - nova-api
+ - nova-cert
+ - nova-conductor
+ - nova-consoleauth
+ - nova-scheduler
+
+# restart swift
+#- name: restart swift
+# service: name={{ item }} state=restarted enabled=yes
+# with_items:
+# - swift-proxy
+# - memcached
diff --git a/deploy/adapters/ansible/roles/moon/tasks/moon.yml b/deploy/adapters/ansible/roles/moon/tasks/moon.yml
new file mode 100644
index 00000000..40e1c98c
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/tasks/moon.yml
@@ -0,0 +1,16 @@
+#############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- include: moon-controller.yml
+ when: inventory_hostname in groups['controller']
+
+- include: moon-compute.yml
+ when: inventory_hostname in groups['compute']
diff --git a/deploy/adapters/ansible/roles/moon/templates/admin-openrc.sh b/deploy/adapters/ansible/roles/moon/templates/admin-openrc.sh
new file mode 100644
index 00000000..6ba620ff
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/templates/admin-openrc.sh
@@ -0,0 +1,15 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# Verify the Identity Service installation
+export OS_PASSWORD={{ ADMIN_PASS }}
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
+export OS_USERNAME=admin
+export OS_VOLUME_API_VERSION=2
+
diff --git a/deploy/adapters/ansible/roles/moon/templates/api-paste.ini b/deploy/adapters/ansible/roles/moon/templates/api-paste.ini
new file mode 100644
index 00000000..f99689b7
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/templates/api-paste.ini
@@ -0,0 +1,106 @@
+############
+# Metadata #
+############
+[composite:metadata]
+use = egg:Paste#urlmap
+/: meta
+
+[pipeline:meta]
+pipeline = cors metaapp
+
+[app:metaapp]
+paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
+
+#############
+# OpenStack #
+#############
+
+[composite:osapi_compute]
+use = call:nova.api.openstack.urlmap:urlmap_factory
+/: oscomputeversions
+# starting in Liberty the v21 implementation replaces the v2
+# implementation and is suggested that you use it as the default. If
+# this causes issues with your clients you can rollback to the
+# *frozen* v2 api by commenting out the above stanza and using the
+# following instead::
+# /v2: openstack_compute_api_legacy_v2
+# if rolling back to v2 fixes your issue please file a critical bug
+# at - https://bugs.launchpad.net/nova/+bugs
+#
+# v21 is an exactly feature match for v2, except it has more stringent
+# input validation on the wsgi surface (prevents fuzzing early on the
+# API). It also provides new features via API microversions which are
+# opt into for clients. Unaware clients will receive the same frozen
+# v2 API feature set, but with some relaxed validation
+/v2: openstack_compute_api_v21_legacy_v2_compatible
+/v2.1: openstack_compute_api_v21
+
+# NOTE: this is deprecated in favor of openstack_compute_api_v21_legacy_v2_compatible
+[composite:openstack_compute_api_legacy_v2]
+use = call:nova.api.auth:pipeline_factory
+noauth2 = cors compute_req_id faultwrap sizelimit noauth2 legacy_ratelimit osapi_compute_app_legacy_v2
+keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext moon legacy_ratelimit osapi_compute_app_legacy_v2
+keystone_nolimit = cors compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_legacy_v2
+
+[composite:openstack_compute_api_v21]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth2 = cors compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
+keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v21
+
+[composite:openstack_compute_api_v21_legacy_v2_compatible]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth2 = cors compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21
+keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext legacy_v2_compatible osapi_compute_app_v21
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware:RequestId.factory
+
+[filter:compute_req_id]
+paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
+
+[filter:noauth2]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+
+[filter:legacy_ratelimit]
+paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = oslo_middleware:RequestBodySizeLimiter.factory
+
+[filter:legacy_v2_compatible]
+paste.filter_factory = nova.api.openstack:LegacyV2CompatibleWrapper.factory
+
+[app:osapi_compute_app_legacy_v2]
+paste.app_factory = nova.api.openstack.compute:APIRouter.factory
+
+[app:osapi_compute_app_v21]
+paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory
+
+[pipeline:oscomputeversions]
+pipeline = faultwrap oscomputeversionapp
+
+[app:oscomputeversionapp]
+paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:cors]
+paste.filter_factory = oslo_middleware.cors:filter_factory
+oslo_config_project = nova
+
+[filter:keystonecontext]
+paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+[filter:moon]
+paste.filter_factory = keystonemiddleware.moon_agent:filter_factory
+authz_login=admin
+authz_password=password
+logfile=/var/log/moon/keystonemiddleware.log
diff --git a/deploy/adapters/ansible/roles/moon/templates/demo-openrc.sh b/deploy/adapters/ansible/roles/moon/templates/demo-openrc.sh
new file mode 100644
index 00000000..5807e868
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/templates/demo-openrc.sh
@@ -0,0 +1,13 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+export OS_USERNAME=demo
+export OS_PASSWORD={{ DEMO_PASS }}
+export OS_TENANT_NAME=demo
+export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
+
diff --git a/deploy/adapters/ansible/roles/moon/templates/keystone-paste.ini b/deploy/adapters/ansible/roles/moon/templates/keystone-paste.ini
new file mode 100644
index 00000000..cd9ebede
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/templates/keystone-paste.ini
@@ -0,0 +1,96 @@
+# Keystone PasteDeploy configuration file.
+
+[pipeline:moon_pipeline]
+pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension moon_service
+
+[app:moon_service]
+use = egg:keystone#moon_service
+
+[filter:debug]
+use = egg:oslo.middleware#debug
+
+[filter:request_id]
+use = egg:oslo.middleware#request_id
+
+[filter:build_auth_context]
+use = egg:keystone#build_auth_context
+
+[filter:token_auth]
+use = egg:keystone#token_auth
+
+[filter:admin_token_auth]
+# This is deprecated in the M release and will be removed in the O release.
+# Use `keystone-manage bootstrap` and remove this from the pipelines below.
+use = egg:keystone#admin_token_auth
+
+[filter:json_body]
+use = egg:keystone#json_body
+
+[filter:cors]
+use = egg:oslo.middleware#cors
+oslo_config_project = keystone
+
+[filter:ec2_extension]
+use = egg:keystone#ec2_extension
+
+[filter:ec2_extension_v3]
+use = egg:keystone#ec2_extension_v3
+
+[filter:s3_extension]
+use = egg:keystone#s3_extension
+
+[filter:url_normalize]
+use = egg:keystone#url_normalize
+
+[filter:sizelimit]
+use = egg:oslo.middleware#sizelimit
+
+[app:public_service]
+use = egg:keystone#public_service
+
+[app:service_v3]
+use = egg:keystone#service_v3
+
+[app:admin_service]
+use = egg:keystone#admin_service
+
+[pipeline:public_api]
+# The last item in this pipeline must be public_service or an equivalent
+# application. It cannot be a filter.
+pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension public_service
+
+[pipeline:admin_api]
+# The last item in this pipeline must be admin_service or an equivalent
+# application. It cannot be a filter.
+pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension s3_extension admin_service
+
+[pipeline:api_v3]
+# The last item in this pipeline must be service_v3 or an equivalent
+# application. It cannot be a filter.
+pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension_v3 s3_extension service_v3
+
+[app:public_version_service]
+use = egg:keystone#public_version_service
+
+[app:admin_version_service]
+use = egg:keystone#admin_version_service
+
+[pipeline:public_version_api]
+pipeline = cors sizelimit url_normalize public_version_service
+
+[pipeline:admin_version_api]
+pipeline = cors sizelimit url_normalize admin_version_service
+
+[composite:main]
+use = egg:Paste#urlmap
+/moon = moon_pipeline
+/v2.0 = public_api
+/v3 = api_v3
+/ = public_version_api
+
+[composite:admin]
+use = egg:Paste#urlmap
+/moon = moon_pipeline
+/v2.0 = admin_api
+/v3 = api_v3
+/ = admin_version_api
diff --git a/deploy/adapters/ansible/roles/moon/templates/keystone.conf b/deploy/adapters/ansible/roles/moon/templates/keystone.conf
new file mode 100644
index 00000000..649fc32c
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/templates/keystone.conf
@@ -0,0 +1,59 @@
+{% set memcached_servers = [] %}
+{% set rabbitmq_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% set _ = rabbitmq_servers.append('%s:5672'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+{% set rabbitmq_servers = rabbitmq_servers|join(',') %}
+[DEFAULT]
+admin_token={{ ADMIN_TOKEN }}
+debug={{ DEBUG }}
+log_dir = /var/log/keystone
+
+[cache]
+backend=keystone.cache.memcache_pool
+memcache_servers={{ memcached_servers}}
+enabled=true
+
+[revoke]
+driver=sql
+expiration_buffer=3600
+caching=true
+
+[database]
+connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone?charset=utf8
+idle_timeout=30
+min_pool_size=5
+max_pool_size=120
+pool_timeout=30
+
+
+[identity]
+default_domain_id=default
+driver=sql
+
+[assignment]
+driver=sql
+
+[resource]
+driver=sql
+caching=true
+cache_time=3600
+
+[token]
+enforce_token_bind=permissive
+expiration=43200
+provider=uuid
+driver=sql
+caching=true
+cache_time=3600
+
+[eventlet_server]
+public_bind_host= {{ identity_host }}
+admin_bind_host= {{ identity_host }}
+
+[oslo_messaging_rabbit]
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_hosts = {{ rabbitmq_servers }}
diff --git a/deploy/adapters/ansible/roles/moon/templates/proxy-server.conf b/deploy/adapters/ansible/roles/moon/templates/proxy-server.conf
new file mode 100644
index 00000000..9bea7a8e
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/templates/proxy-server.conf
@@ -0,0 +1,775 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+[DEFAULT]
+bind_ip = {{ internal_ip }}
+bind_port = 8080
+# bind_timeout = 30
+# backlog = 4096
+swift_dir = /etc/swift
+user = swift
+
+# Enables exposing configuration settings via HTTP GET /info.
+# expose_info = true
+
+# Key to use for admin calls that are HMAC signed. Default is empty,
+# which will disable admin calls to /info.
+# admin_key = secret_admin_key
+#
+# Allows the ability to withhold sections from showing up in the public calls
+# to /info. You can withhold subsections by separating the dict level with a
+# ".". The following would cause the sections 'container_quotas' and 'tempurl'
+# to not be listed, and the key max_failed_deletes would be removed from
+# bulk_delete. Default value is 'swift.valid_api_versions' which allows all
+# registered features to be listed via HTTP GET /info except
+# swift.valid_api_versions information
+# disallowed_sections = swift.valid_api_versions, container_quotas, tempurl
+
+# Use an integer to override the number of pre-forked processes that will
+# accept connections. Should default to the number of effective cpu
+# cores in the system. It's worth noting that individual workers will
+# use many eventlet co-routines to service multiple concurrent requests.
+# workers = auto
+#
+# Maximum concurrent requests per worker
+# max_clients = 1024
+#
+# Set the following two lines to enable SSL. This is for testing only.
+# cert_file = /etc/swift/proxy.crt
+# key_file = /etc/swift/proxy.key
+#
+# expiring_objects_container_divisor = 86400
+# expiring_objects_account_name = expiring_objects
+#
+# You can specify default log routing here if you want:
+# log_name = swift
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_headers = false
+# log_address = /dev/log
+# The following caps the length of log lines to the value given; no limit if
+# set to 0, the default.
+# log_max_line_length = 0
+#
+# This optional suffix (default is empty) that would be appended to the swift transaction
+# id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
+# This is very useful when one is managing more than one swift cluster.
+# trans_id_suffix =
+#
+# comma separated list of functions to call to setup custom log handlers.
+# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
+# adapted_logger
+# log_custom_handlers =
+#
+# If set, log_udp_host will override log_address
+# log_udp_host =
+# log_udp_port = 514
+#
+# You can enable StatsD logging here:
+# log_statsd_host =
+# log_statsd_port = 8125
+# log_statsd_default_sample_rate = 1.0
+# log_statsd_sample_rate_factor = 1.0
+# log_statsd_metric_prefix =
+#
+# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
+# cors_allow_origin =
+# strict_cors_mode = True
+#
+# client_timeout = 60
+# eventlet_debug = false
+
+[pipeline:main]
+# This sample pipeline uses tempauth and is used for SAIO dev work and
+# testing. See below for a pipeline using keystone.
+#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
+pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging moon proxy-server
+
+# The following pipeline shows keystone integration. Comment out the one
+# above and uncomment this one. Additional steps for integrating keystone are
+# covered further below in the filter sections for authtoken and keystoneauth.
+#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
+
+[app:proxy-server]
+use = egg:swift#proxy
+account_autocreate = True
+# You can override the default log routing for this app here:
+# set log_name = proxy-server
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_address = /dev/log
+#
+# log_handoffs = true
+# recheck_account_existence = 60
+# recheck_container_existence = 60
+# object_chunk_size = 65536
+# client_chunk_size = 65536
+#
+# How long the proxy server will wait on responses from the a/c/o servers.
+# node_timeout = 10
+#
+# How long the proxy server will wait for an initial response and to read a
+# chunk of data from the object servers while serving GET / HEAD requests.
+# Timeouts from these requests can be recovered from so setting this to
+# something lower than node_timeout would provide quicker error recovery
+# while allowing for a longer timeout for non-recoverable requests (PUTs).
+# Defaults to node_timeout, should be overriden if node_timeout is set to a
+# high number to prevent client timeouts from firing before the proxy server
+# has a chance to retry.
+# recoverable_node_timeout = node_timeout
+#
+# conn_timeout = 0.5
+#
+# How long to wait for requests to finish after a quorum has been established.
+# post_quorum_timeout = 0.5
+#
+# How long without an error before a node's error count is reset. This will
+# also be how long before a node is reenabled after suppression is triggered.
+# error_suppression_interval = 60
+#
+# How many errors can accumulate before a node is temporarily ignored.
+# error_suppression_limit = 10
+#
+# If set to 'true' any authorized user may create and delete accounts; if
+# 'false' no one, even authorized, can.
+# allow_account_management = false
+#
+# Set object_post_as_copy = false to turn on fast posts where only the metadata
+# changes are stored anew and the original data file is kept in place. This
+# makes for quicker posts.
+# object_post_as_copy = true
+#
+# If set to 'true' authorized accounts that do not yet exist within the Swift
+# cluster will be automatically created.
+# account_autocreate = false
+#
+# If set to a positive value, trying to create a container when the account
+# already has at least this maximum containers will result in a 403 Forbidden.
+# Note: This is a soft limit, meaning a user might exceed the cap for
+# recheck_account_existence before the 403s kick in.
+# max_containers_per_account = 0
+#
+# This is a comma separated list of account hashes that ignore the
+# max_containers_per_account cap.
+# max_containers_whitelist =
+#
+# Comma separated list of Host headers to which the proxy will deny requests.
+# deny_host_headers =
+#
+# Prefix used when automatically creating accounts.
+# auto_create_account_prefix = .
+#
+# Depth of the proxy put queue.
+# put_queue_depth = 10
+#
+# Storage nodes can be chosen at random (shuffle), by using timing
+# measurements (timing), or by using an explicit match (affinity).
+# Using timing measurements may allow for lower overall latency, while
+# using affinity allows for finer control. In both the timing and
+# affinity cases, equally-sorting nodes are still randomly chosen to
+# spread load.
+# The valid values for sorting_method are "affinity", "shuffle", or "timing".
+# sorting_method = shuffle
+#
+# If the "timing" sorting_method is used, the timings will only be valid for
+# the number of seconds configured by timing_expiry.
+# timing_expiry = 300
+#
+# By default on a GET/HEAD swift will connect to a storage node one at a time
+# in a single thread. There is smarts in the order they are hit however. If you
+# turn on concurrent_gets below, then replica count threads will be used.
+# With addition of the concurrency_timeout option this will allow swift to send
+# out GET/HEAD requests to the storage nodes concurrently and answer with the
+# first to respond. With an EC policy the parameter only affects HEAD requests.
+# concurrent_gets = off
+#
+# This parameter controls how long to wait before firing off the next
+# concurrent_get thread. A value of 0 would be fully concurrent, any other
+# number will stagger the firing of the threads. This number should be
+# between 0 and node_timeout. The default is what ever you set for the
+# conn_timeout parameter.
+# concurrency_timeout = 0.5
+#
+# Set to the number of nodes to contact for a normal request. You can use
+# '* replicas' at the end to have it use the number given times the number of
+# replicas for the ring being used for the request.
+# request_node_count = 2 * replicas
+#
+# Which backend servers to prefer on reads. Format is r<N> for region
+# N or r<N>z<M> for region N, zone M. The value after the equals is
+# the priority; lower numbers are higher priority.
+#
+# Example: first read from region 1 zone 1, then region 1 zone 2, then
+# anything in region 2, then everything else:
+# read_affinity = r1z1=100, r1z2=200, r2=300
+# Default is empty, meaning no preference.
+# read_affinity =
+#
+# Which backend servers to prefer on writes. Format is r<N> for region
+# N or r<N>z<M> for region N, zone M. If this is set, then when
+# handling an object PUT request, some number (see setting
+# write_affinity_node_count) of local backend servers will be tried
+# before any nonlocal ones.
+#
+# Example: try to write to regions 1 and 2 before writing to any other
+# nodes:
+# write_affinity = r1, r2
+# Default is empty, meaning no preference.
+# write_affinity =
+#
+# The number of local (as governed by the write_affinity setting)
+# nodes to attempt to contact first, before any non-local ones. You
+# can use '* replicas' at the end to have it use the number given
+# times the number of replicas for the ring being used for the
+# request.
+# write_affinity_node_count = 2 * replicas
+#
+# These are the headers whose values will only be shown to swift_owners. The
+# exact definition of a swift_owner is up to the auth system in use, but
+# usually indicates administrative responsibilities.
+# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control
+
+[filter:tempauth]
+use = egg:swift#tempauth
+# You can override the default log routing for this filter here:
+# set log_name = tempauth
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# The reseller prefix will verify a token begins with this prefix before even
+# attempting to validate it. Also, with authorization, only Swift storage
+# accounts with this prefix will be authorized by this middleware. Useful if
+# multiple auth systems are in use for one Swift cluster.
+# The reseller_prefix may contain a comma separated list of items. The first
+# item is used for the token as mentioned above. If second and subsequent
+# items exist, the middleware will handle authorization for an account with
+# that prefix. For example, for prefixes "AUTH, SERVICE", a path of
+# /v1/SERVICE_account is handled the same as /v1/AUTH_account. If an empty
+# (blank) reseller prefix is required, it must be first in the list. Two
+# single quote characters indicates an empty (blank) reseller prefix.
+# reseller_prefix = AUTH
+
+#
+# The require_group parameter names a group that must be presented by
+# either X-Auth-Token or X-Service-Token. Usually this parameter is
+# used only with multiple reseller prefixes (e.g., SERVICE_require_group=blah).
+# By default, no group is needed. Do not use .admin.
+# require_group =
+
+# The auth prefix will cause requests beginning with this prefix to be routed
+# to the auth subsystem, for granting tokens, etc.
+# auth_prefix = /auth/
+# token_life = 86400
+#
+# This allows middleware higher in the WSGI pipeline to override auth
+# processing, useful for middleware such as tempurl and formpost. If you know
+# you're not going to use such middleware and you want a bit of extra security,
+# you can set this to false.
+# allow_overrides = true
+#
+# This specifies what scheme to return with storage urls:
+# http, https, or default (chooses based on what the server is running as)
+# This can be useful with an SSL load balancer in front of a non-SSL server.
+# storage_url_scheme = default
+#
+# Lastly, you need to list all the accounts/users you want here. The format is:
+# user_<account>_<user> = <key> [group] [group] [...] [storage_url]
+# or if you want underscores in <account> or <user>, you can base64 encode them
+# (with no equal signs) and use this format:
+# user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url]
+# There are special groups of:
+# .reseller_admin = can do anything to any account for this auth
+# .admin = can do anything within the account
+# If neither of these groups are specified, the user can only access containers
+# that have been explicitly allowed for them by a .admin or .reseller_admin.
+# The trailing optional storage_url allows you to specify an alternate url to
+# hand back to the user upon authentication. If not specified, this defaults to
+# $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve
+# to what the requester would need to use to reach this host.
+# Here are example entries, required for running the tests:
+user_admin_admin = admin .admin .reseller_admin
+user_test_tester = testing .admin
+user_test2_tester2 = testing2 .admin
+user_test_tester3 = testing3
+user_test5_tester5 = testing5 service
+
+# To enable Keystone authentication you need to have the auth token
+# middleware first to be configured. Here is an example below, please
+# refer to the keystone's documentation for details about the
+# different settings.
+#
+# You'll also need to have the keystoneauth middleware enabled and have it in
+# your main pipeline, as show in the sample pipeline at the top of this file.
+#
+# Following parameters are known to work with keystonemiddleware v2.3.0
+# (above v2.0.0), but checking the latest information in the wiki page[1]
+# is recommended.
+# 1. http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html#configuration
+#
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+identity_uri = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+#auth_plugin = password
+auth_type = password
+project_domain_id = default
+user_domain_id = default
+project_name = service
+username = swift
+password = {{ CINDER_PASS }}
+delay_auth_decision = True
+admin_user=admin
+admin_password={{ ADMIN_PASS }}
+admin_token={{ ADMIN_TOKEN }}
+#
+# delay_auth_decision defaults to False, but leaving it as false will
+# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from
+# working. This value must be explicitly set to True.
+# delay_auth_decision = False
+#
+# cache = swift.cache
+# include_service_catalog = False
+#
+[filter:keystoneauth]
+use = egg:swift#keystoneauth
+operator_roles = admin,user
+# The reseller_prefix option lists account namespaces that this middleware is
+# responsible for. The prefix is placed before the Keystone project id.
+# For example, for project 12345678, and prefix AUTH, the account is
+# named AUTH_12345678 (i.e., path is /v1/AUTH_12345678/...).
+# Several prefixes are allowed by specifying a comma-separated list
+# as in: "reseller_prefix = AUTH, SERVICE". The empty string indicates a
+# single blank/empty prefix. If an empty prefix is required in a list of
+# prefixes, a value of '' (two single quote characters) indicates a
+# blank/empty prefix. Except for the blank/empty prefix, an underscore ('_')
+# character is appended to the value unless already present.
+# reseller_prefix = AUTH
+#
+# The user must have at least one role named by operator_roles on a
+# project in order to create, delete and modify containers and objects
+# and to set and read privileged headers such as ACLs.
+# If there are several reseller prefix items, you can prefix the
+# parameter so it applies only to those accounts (for example
+# the parameter SERVICE_operator_roles applies to the /v1/SERVICE_<project>
+# path). If you omit the prefix, the option applies to all reseller
+# prefix items. For the blank/empty prefix, prefix with '' (do not put
+# underscore after the two single quote characters).
+# operator_roles = admin, swiftoperator
+#
+# The reseller admin role has the ability to create and delete accounts
+# reseller_admin_role = ResellerAdmin
+#
+# This allows middleware higher in the WSGI pipeline to override auth
+# processing, useful for middleware such as tempurl and formpost. If you know
+# you're not going to use such middleware and you want a bit of extra security,
+# you can set this to false.
+# allow_overrides = true
+#
+# If the service_roles parameter is present, an X-Service-Token must be
+# present in the request that when validated, grants at least one role listed
+# in the parameter. The X-Service-Token may be scoped to any project.
+# If there are several reseller prefix items, you can prefix the
+# parameter so it applies only to those accounts (for example
+# the parameter SERVICE_service_roles applies to the /v1/SERVICE_<project>
+# path). If you omit the prefix, the option applies to all reseller
+# prefix items. For the blank/empty prefix, prefix with '' (do not put
+# underscore after the two single quote characters).
+# By default, no service_roles are required.
+# service_roles =
+#
+# For backwards compatibility, keystoneauth will match names in cross-tenant
+# access control lists (ACLs) when both the requesting user and the tenant
+# are in the default domain i.e the domain to which existing tenants are
+# migrated. The default_domain_id value configured here should be the same as
+# the value used during migration of tenants to keystone domains.
+# default_domain_id = default
+#
+# For a new installation, or an installation in which keystone projects may
+# move between domains, you should disable backwards compatible name matching
+# in ACLs by setting allow_names_in_acls to false:
+# allow_names_in_acls = true
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+# An optional filesystem path, which if present, will cause the healthcheck
+# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE".
+# This facility may be used to temporarily remove a Swift node from a load
+# balancer pool during maintenance or upgrade (remove the file to allow the
+# node back into the load balancer pool).
+# disable_path =
+
+[filter:cache]
+use = egg:swift#memcache
+memcache_servers = {{ memcached_servers }}
+# You can override the default log routing for this filter here:
+# set log_name = cache
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# If not set here, the value for memcache_servers will be read from
+# memcache.conf (see memcache.conf-sample) or lacking that file, it will
+# default to the value below. You can specify multiple servers separated with
+# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 (IPv6 addresses must
+# follow rfc3986 section-3.2.2, i.e. [::1]:11211)
+# memcache_servers = 127.0.0.1:11211
+#
+# Sets how memcache values are serialized and deserialized:
+# 0 = older, insecure pickle serialization
+# 1 = json serialization but pickles can still be read (still insecure)
+# 2 = json serialization only (secure and the default)
+# If not set here, the value for memcache_serialization_support will be read
+# from /etc/swift/memcache.conf (see memcache.conf-sample).
+# To avoid an instant full cache flush, existing installations should
+# upgrade with 0, then set to 1 and reload, then after some time (24 hours)
+# set to 2 and reload.
+# In the future, the ability to use pickle serialization will be removed.
+# memcache_serialization_support = 2
+#
+# Sets the maximum number of connections to each memcached server per worker
+# memcache_max_connections = 2
+#
+# More options documented in memcache.conf-sample
+
+[filter:ratelimit]
+use = egg:swift#ratelimit
+# You can override the default log routing for this filter here:
+# set log_name = ratelimit
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# clock_accuracy should represent how accurate the proxy servers' system clocks
+# are with each other. 1000 means that all the proxies' clock are accurate to
+# each other within 1 millisecond. No ratelimit should be higher than the
+# clock accuracy.
+# clock_accuracy = 1000
+#
+# max_sleep_time_seconds = 60
+#
+# log_sleep_time_seconds of 0 means disabled
+# log_sleep_time_seconds = 0
+#
+# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
+# rate_buffer_seconds = 5
+#
+# account_ratelimit of 0 means disabled
+# account_ratelimit = 0
+
+# DEPRECATED- these will continue to work but will be replaced
+# by the X-Account-Sysmeta-Global-Write-Ratelimit flag.
+# Please see ratelimiting docs for details.
+# these are comma separated lists of account names
+# account_whitelist = a,b
+# account_blacklist = c,d
+
+# with container_limit_x = r
+# for containers of size x limit write requests per second to r. The container
+# rate will be linearly interpolated from the values given. With the values
+# below, a container of size 5 will get a rate of 75.
+# container_ratelimit_0 = 100
+# container_ratelimit_10 = 50
+# container_ratelimit_50 = 20
+
+# Similarly to the above container-level write limits, the following will limit
+# container GET (listing) requests.
+# container_listing_ratelimit_0 = 100
+# container_listing_ratelimit_10 = 50
+# container_listing_ratelimit_50 = 20
+
+[filter:domain_remap]
+use = egg:swift#domain_remap
+# You can override the default log routing for this filter here:
+# set log_name = domain_remap
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# storage_domain = example.com
+# path_root = v1
+
+# Browsers can convert a host header to lowercase, so check that reseller
+# prefix on the account is the correct case. This is done by comparing the
+# items in the reseller_prefixes config option to the found prefix. If they
+# match except for case, the item from reseller_prefixes will be used
+# instead of the found reseller prefix. When none match, the default reseller
+# prefix is used. When no default reseller prefix is configured, any request
+# with an account prefix not in that list will be ignored by this middleware.
+# reseller_prefixes = AUTH
+# default_reseller_prefix =
+
+[filter:catch_errors]
+use = egg:swift#catch_errors
+# You can override the default log routing for this filter here:
+# set log_name = catch_errors
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+
+[filter:cname_lookup]
+# Note: this middleware requires python-dnspython
+use = egg:swift#cname_lookup
+# You can override the default log routing for this filter here:
+# set log_name = cname_lookup
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# Specify the storage_domain that match your cloud, multiple domains
+# can be specified separated by a comma
+# storage_domain = example.com
+#
+# lookup_depth = 1
+
+# Note: Put staticweb just after your auth filter(s) in the pipeline
+[filter:staticweb]
+use = egg:swift#staticweb
+# You can override the default log routing for this filter here:
+# set log_name = staticweb
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+
+# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline
+[filter:tempurl]
+use = egg:swift#tempurl
+# The methods allowed with Temp URLs.
+# methods = GET HEAD PUT POST DELETE
+#
+# The headers to remove from incoming requests. Simply a whitespace delimited
+# list of header names and names can optionally end with '*' to indicate a
+# prefix match. incoming_allow_headers is a list of exceptions to these
+# removals.
+# incoming_remove_headers = x-timestamp
+#
+# The headers allowed as exceptions to incoming_remove_headers. Simply a
+# whitespace delimited list of header names and names can optionally end with
+# '*' to indicate a prefix match.
+# incoming_allow_headers =
+#
+# The headers to remove from outgoing responses. Simply a whitespace delimited
+# list of header names and names can optionally end with '*' to indicate a
+# prefix match. outgoing_allow_headers is a list of exceptions to these
+# removals.
+# outgoing_remove_headers = x-object-meta-*
+#
+# The headers allowed as exceptions to outgoing_remove_headers. Simply a
+# whitespace delimited list of header names and names can optionally end with
+# '*' to indicate a prefix match.
+# outgoing_allow_headers = x-object-meta-public-*
+
+# Note: Put formpost just before your auth filter(s) in the pipeline
+[filter:formpost]
+use = egg:swift#formpost
+
+# Note: Just needs to be placed before the proxy-server in the pipeline.
+[filter:name_check]
+use = egg:swift#name_check
+# forbidden_chars = '"`<>
+# maximum_length = 255
+# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$
+
+[filter:list-endpoints]
+use = egg:swift#list_endpoints
+# list_endpoints_path = /endpoints/
+
+[filter:proxy-logging]
+use = egg:swift#proxy_logging
+# If not set, logging directives from [DEFAULT] without "access_" will be used
+# access_log_name = swift
+# access_log_facility = LOG_LOCAL0
+# access_log_level = INFO
+# access_log_address = /dev/log
+#
+# If set, access_log_udp_host will override access_log_address
+# access_log_udp_host =
+# access_log_udp_port = 514
+#
+# You can use log_statsd_* from [DEFAULT] or override them here:
+# access_log_statsd_host =
+# access_log_statsd_port = 8125
+# access_log_statsd_default_sample_rate = 1.0
+# access_log_statsd_sample_rate_factor = 1.0
+# access_log_statsd_metric_prefix =
+# access_log_headers = false
+#
+# If access_log_headers is True and access_log_headers_only is set only
+# these headers are logged. Multiple headers can be defined as comma separated
+# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime
+# access_log_headers_only =
+#
+# By default, the X-Auth-Token is logged. To obscure the value,
+# set reveal_sensitive_prefix to the number of characters to log.
+# For example, if set to 12, only the first 12 characters of the
+# token appear in the log. An unauthorized access of the log file
+# won't allow unauthorized usage of the token. However, the first
+# 12 or so characters is unique enough that you can trace/debug
+# token usage. Set to 0 to suppress the token completely (replaced
+# by '...' in the log).
+# Note: reveal_sensitive_prefix will not affect the value
+# logged with access_log_headers=True.
+# reveal_sensitive_prefix = 16
+#
+# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
+# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
+# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
+#
+# Note: The double proxy-logging in the pipeline is not a mistake. The
+# left-most proxy-logging is there to log requests that were handled in
+# middleware and never made it through to the right-most middleware (and
+# proxy server). Double logging is prevented for normal requests. See
+# proxy-logging docs.
+
+# Note: Put before both ratelimit and auth in the pipeline.
+[filter:bulk]
+use = egg:swift#bulk
+# max_containers_per_extraction = 10000
+# max_failed_extractions = 1000
+# max_deletes_per_request = 10000
+# max_failed_deletes = 1000
+
+# In order to keep a connection active during a potentially long bulk request,
+# Swift may return whitespace prepended to the actual response body. This
+# whitespace will be yielded no more than every yield_frequency seconds.
+# yield_frequency = 10
+
+# Note: The following parameter is used during a bulk delete of objects and
+# their container. This would frequently fail because it is very likely
+# that all replicated objects have not been deleted by the time the middleware got a
+# successful response. It can be configured the number of retries. And the
+# number of seconds to wait between each retry will be 1.5**retry
+
+# delete_container_retry_count = 0
+
+# Note: Put after auth and staticweb in the pipeline.
+[filter:slo]
+use = egg:swift#slo
+# max_manifest_segments = 1000
+# max_manifest_size = 2097152
+#
+# Rate limiting applies only to segments smaller than this size (bytes).
+# rate_limit_under_size = 1048576
+#
+# Start rate-limiting SLO segment serving after the Nth small segment of a
+# segmented object.
+# rate_limit_after_segment = 10
+#
+# Once segment rate-limiting kicks in for an object, limit segments served
+# to N per second. 0 means no rate-limiting.
+# rate_limit_segments_per_sec = 1
+#
+# Time limit on GET requests (seconds)
+# max_get_time = 86400
+
+# Note: Put after auth and staticweb in the pipeline.
+# If you don't put it in the pipeline, it will be inserted for you.
+[filter:dlo]
+use = egg:swift#dlo
+# Start rate-limiting DLO segment serving after the Nth segment of a
+# segmented object.
+# rate_limit_after_segment = 10
+#
+# Once segment rate-limiting kicks in for an object, limit segments served
+# to N per second. 0 means no rate-limiting.
+# rate_limit_segments_per_sec = 1
+#
+# Time limit on GET requests (seconds)
+# max_get_time = 86400
+
+# Note: Put after auth in the pipeline.
+[filter:container-quotas]
+use = egg:swift#container_quotas
+
+# Note: Put after auth in the pipeline.
+[filter:account-quotas]
+use = egg:swift#account_quotas
+
+[filter:gatekeeper]
+use = egg:swift#gatekeeper
+# Set this to false if you want to allow clients to set arbitrary X-Timestamps
+# on uploaded objects. This may be used to preserve timestamps when migrating
+# from a previous storage system, but risks allowing users to upload
+# difficult-to-delete data.
+# shunt_inbound_x_timestamp = true
+#
+# You can override the default log routing for this filter here:
+# set log_name = gatekeeper
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+
+[filter:container_sync]
+use = egg:swift#container_sync
+# Set this to false if you want to disallow any full url values to be set for
+# any new X-Container-Sync-To headers. This will keep any new full urls from
+# coming in, but won't change any existing values already in the cluster.
+# Updating those will have to be done manually, as knowing what the true realm
+# endpoint should be cannot always be guessed.
+# allow_full_urls = true
+# Set this to specify this clusters //realm/cluster as "current" in /info
+# current = //REALM/CLUSTER
+
+# Note: Put it at the beginning of the pipeline to profile all middleware. But
+# it is safer to put this after catch_errors, gatekeeper and healthcheck.
+[filter:xprofile]
+use = egg:swift#xprofile
+# This option enable you to switch profilers which should inherit from python
+# standard profiler. Currently the supported value can be 'cProfile',
+# 'eventlet.green.profile' etc.
+# profile_module = eventlet.green.profile
+#
+# This prefix will be used to combine process ID and timestamp to name the
+# profile data file. Make sure the executing user has permission to write
+# into this path (missing path segments will be created, if necessary).
+# If you enable profiling in more than one type of daemon, you must override
+# it with an unique value like: /var/log/swift/profile/proxy.profile
+# log_filename_prefix = /tmp/log/swift/profile/default.profile
+#
+# the profile data will be dumped to local disk based on above naming rule
+# in this interval.
+# dump_interval = 5.0
+#
+# Be careful, this option will enable profiler to dump data into the file with
+# time stamp which means there will be lots of files piled up in the directory.
+# dump_timestamp = false
+#
+# This is the path of the URL to access the mini web UI.
+# path = /__profile__
+#
+# Clear the data when the wsgi server shutdown.
+# flush_at_shutdown = false
+#
+# unwind the iterator of applications
+# unwind = false
+
+# Note: Put after slo, dlo in the pipeline.
+# If you don't put it in the pipeline, it will be inserted automatically.
+[filter:versioned_writes]
+use = egg:swift#versioned_writes
+# Enables using versioned writes middleware and exposing configuration
+# settings via HTTP GET /info.
+# WARNING: Setting this option bypasses the "allow_versions" option
+# in the container configuration file, which will be eventually
+# deprecated. See documentation for more details.
+# allow_versioned_writes = false
+
+
+[filter:moon]
+paste.filter_factory = keystonemiddleware.moon_agent:filter_factory
+authz_login=admin
+authz_password={{ ADMIN_PASS }}
+auth_host = {{ internal_vip.ip }}
+logfile=/var/log/moon/keystonemiddleware.log
diff --git a/deploy/adapters/ansible/roles/moon/templates/wsgi-keystone.conf.j2 b/deploy/adapters/ansible/roles/moon/templates/wsgi-keystone.conf.j2
new file mode 100644
index 00000000..64d864af
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/templates/wsgi-keystone.conf.j2
@@ -0,0 +1,46 @@
+ {% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
+<VirtualHost {{ internal_ip }}:5000>
+ WSGIDaemonProcess keystone-public processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
+ WSGIProcessGroup keystone-public
+ WSGIScriptAlias / /usr/bin/keystone-wsgi-public
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/{{ http_service_name }}/keystone.log
+ CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined
+
+ <Directory /usr/bin>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
+
+<VirtualHost {{ internal_ip }}:35357>
+ WSGIDaemonProcess keystone-admin processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
+ WSGIProcessGroup keystone-admin
+ WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/{{ http_service_name }}/keystone.log
+ CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined
+
+ <Directory /usr/bin>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
diff --git a/deploy/adapters/ansible/roles/moon/vars/Debian.yml b/deploy/adapters/ansible/roles/moon/vars/Debian.yml
new file mode 100644
index 00000000..0da81179
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/vars/Debian.yml
@@ -0,0 +1,168 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+packages:
+ - adduser
+ - dbconfig-common
+ - init-system-helpers
+ - python-keystone
+ - q-text-as-data
+ - sqlite3
+ - ssl-cert
+ - debconf
+ - lsb-base
+ - python:any
+ - libjs-sphinxdoc
+ - python-pip
+ - unzip
+ - apache2
+ - libapache2-mod-wsgi
+
+dependency_packages:
+ - python-cryptography
+ - python-dateutil
+ - python-dogpile.cache
+ - python-eventlet
+ - python-greenlet
+ - python-jsonschema
+ - python-keystoneclient
+ - python-keystonemiddleware
+ - python-ldap
+ - python-ldappool
+ - python-lxml
+ - python-memcache
+ - python-migrate
+ - python-msgpack
+ - python-mysqldb
+ - python-oauthlib
+ - python-openstackclient
+ - python-oslo.cache
+ - python-oslo.concurrency
+ - python-oslo.config
+ - python-oslo.context
+ - python-oslo.db
+ - python-oslo.i18n
+ - python-oslo.log
+ - python-oslo.messaging
+ - python-oslo.middleware
+ - python-oslo.policy
+ - python-oslo.serialization
+ - python-oslo.service
+ - python-oslo.utils
+ - python-pam
+ - python-passlib
+ - python-paste
+ - python-pastedeploy
+ - python-pbr
+ - python-pycadf
+ - python-pymysql
+ - python-pysaml2
+ - python-pysqlite2
+ - python-routes
+ - python-six
+ - python-sqlalchemy
+ - python-stevedore
+ - python-webob
+ - unzip
+ - python3-keystoneauth1
+ - python3-keystoneclient
+ - python3-oslo.config
+ - python3-oslo.context
+ - python3-oslo.i18n
+ - python3-oslo.serialization
+ - python-oslo.service
+ - python-oslo.utils
+ - python-pam
+ - python-passlib
+ - python-paste
+ - python-pastedeploy
+ - python-pbr
+ - python-pycadf
+ - python-pymysql
+ - python-pysaml2
+ - python-pysqlite2
+ - python-routes
+ - python-six
+ - python-sqlalchemy
+ - python-stevedore
+ - python-webob
+ - unzip
+ - python3-keystoneauth1
+ - python3-keystoneclient
+ - python3-oslo.config
+ - python3-oslo.context
+ - python3-oslo.i18n
+ - python3-oslo.serialization
+ - python3-oslo.utils
+ - apache2
+ - libapache2-mod-wsgi
+ - python3-cryptography
+ - python3-dateutil
+ - python3-dogpile.cache
+ - python3-eventlet
+ - python3-greenlet
+ - python3-jsonschema
+ - python3-keystoneclient
+ - python3-keystonemiddleware
+ - python3-lxml
+ - python3-memcache
+ - python3-migrate
+ - python3-msgpack
+ - python3-mysqldb
+ - python3-oauthlib
+ - python3-openstackclient
+ - python3-oslo.cache
+ - python3-oslo.concurrency
+ - python3-oslo.config
+ - python3-oslo.context
+ - python3-oslo.db
+ - python3-oslo.i18n
+ - python3-oslo.log
+ - python3-oslo.messaging
+ - python3-oslo.middleware
+ - python3-oslo.policy
+ - python3-oslo.serialization
+ - python3-oslo.service
+ - python3-oslo.utils
+ - python3-pam
+ - python3-passlib
+ - python3-paste
+ - python3-pastedeploy
+ - python3-pbr
+ - python3-pycadf
+ - python3-pymysql
+ - python3-pysaml2
+ - python3-routes
+ - python3-six
+ - python3-sqlalchemy
+ - python3-stevedore
+ - python3-webob
+ - python3-oslo.service
+ - python3-oslo.utils
+ - python3-pam
+ - python3-passlib
+ - python3-paste
+ - python3-pastedeploy
+ - python3-pbr
+ - python3-pycadf
+ - python3-pymysql
+ - python3-pysaml2
+ - python3-routes
+ - python3-six
+ - python3-sqlalchemy
+ - python3-stevedore
+ - python3-webob
+
+services:
+ - apache2
+
+
+apache_config_dir: /etc/apache2
+http_service_name: apache2
diff --git a/deploy/adapters/ansible/roles/moon/vars/main.yml b/deploy/adapters/ansible/roles/moon/vars/main.yml
new file mode 100644
index 00000000..cff8c7c2
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/vars/main.yml
@@ -0,0 +1,172 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages_noarch: []
+
+services_noarch: []
+
+os_services:
+ - name: keystone
+ type: identity
+ region: RegionOne
+ description: "OpenStack Identity"
+ publicurl: "http://{{ public_vip.ip }}:5000/v2.0"
+ internalurl: "http://{{ internal_vip.ip }}:5000/v2.0"
+ adminurl: "http://{{ internal_vip.ip }}:35357/v2.0"
+
+ - name: glance
+ type: image
+ region: RegionOne
+ description: "OpenStack Image Service"
+ publicurl: "http://{{ public_vip.ip }}:9292"
+ internalurl: "http://{{ internal_vip.ip }}:9292"
+ adminurl: "http://{{ internal_vip.ip }}:9292"
+
+ - name: nova
+ type: compute
+ region: RegionOne
+ description: "OpenStack Compute"
+ publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s"
+ internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
+ adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
+
+ - name: neutron
+ type: network
+ region: RegionOne
+ description: "OpenStack Networking"
+ publicurl: "http://{{ public_vip.ip }}:9696"
+ internalurl: "http://{{ internal_vip.ip }}:9696"
+ adminurl: "http://{{ internal_vip.ip }}:9696"
+
+ - name: ceilometer
+ type: metering
+ region: RegionOne
+ description: "OpenStack Telemetry"
+ publicurl: "http://{{ public_vip.ip }}:8777"
+ internalurl: "http://{{ internal_vip.ip }}:8777"
+ adminurl: "http://{{ internal_vip.ip }}:8777"
+
+ - name: aodh
+ type: alarming
+ region: RegionOne
+ description: "OpenStack Telemetry"
+ publicurl: "http://{{ public_vip.ip }}:8042"
+ internalurl: "http://{{ internal_vip.ip }}:8042"
+ adminurl: "http://{{ internal_vip.ip }}:8042"
+
+# - name: cinder
+# type: volume
+# region: RegionOne
+# description: "OpenStack Block Storage"
+# publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s"
+# internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
+# adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
+#
+# - name: cinderv2
+# type: volumev2
+# region: RegionOne
+# description: "OpenStack Block Storage v2"
+# publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s"
+# internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
+# adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
+
+ - name: heat
+ type: orchestration
+ region: RegionOne
+ description: "OpenStack Orchestration"
+ publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s"
+ internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
+ adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
+
+ - name: heat-cfn
+ type: cloudformation
+ region: RegionOne
+ description: "OpenStack CloudFormation Orchestration"
+ publicurl: "http://{{ public_vip.ip }}:8000/v1"
+ internalurl: "http://{{ internal_vip.ip }}:8000/v1"
+ adminurl: "http://{{ internal_vip.ip }}:8000/v1"
+
+# - name: swift
+# type: object-store
+# region: RegionOne
+# description: "OpenStack Object Storage"
+# publicurl: "http://{{ public_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
+# internalurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
+# adminurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
+
+os_users:
+ - user: admin
+ password: "{{ ADMIN_PASS }}"
+ email: admin@admin.com
+ role: admin
+ tenant: admin
+ tenant_description: "Admin Tenant"
+
+ - user: glance
+ password: "{{ GLANCE_PASS }}"
+ email: glance@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: nova
+ password: "{{ NOVA_PASS }}"
+ email: nova@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: keystone
+ password: "{{ KEYSTONE_PASS }}"
+ email: keystone@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: neutron
+ password: "{{ NEUTRON_PASS }}"
+ email: neutron@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: ceilometer
+ password: "{{ CEILOMETER_PASS }}"
+ email: ceilometer@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: cinder
+ password: "{{ CINDER_PASS }}"
+ email: cinder@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: heat
+ password: "{{ HEAT_PASS }}"
+ email: heat@admin.com
+ role: admin
+ tenant: service
+ tenant_description: "Service Tenant"
+
+ - user: demo
+ password: ""
+ email: heat@demo.com
+ role: heat_stack_user
+ tenant: demo
+ tenant_description: "Demo Tenant"
+
+# - user: swift
+# password: "{{ CINDER_PASS }}"
+# email: swift@admin.com
+# role: admin
+# tenant: service
+# tenant_description: "Service Tenant"
diff --git a/deploy/adapters/ansible/roles/neutron-compute/handlers/main.yml b/deploy/adapters/ansible/roles/neutron-compute/handlers/main.yml
index d5444946..ca4e8088 100644
--- a/deploy/adapters/ansible/roles/neutron-compute/handlers/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-compute/handlers/main.yml
@@ -10,3 +10,6 @@
- name: restart neutron compute service
service: name={{ item }} state=restarted enabled=yes
with_items: services | union(services_noarch)
+
+- name: restart nova-compute services
+ service: name=nova-compute state=restarted enabled=yes
diff --git a/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml b/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
index 3e4b24bc..375e325d 100644
--- a/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
@@ -44,6 +44,12 @@
systemctl daemon-reload
when: ansible_os_family == 'RedHat'
+- name: fix openstack neutron plugin config file ubuntu
+ shell: |
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
+ when: ansible_os_family == "Debian"
+
- name: generate neutron compute service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
@@ -57,7 +63,7 @@
file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
- name: config neutron
- template: src=templates/neutron.conf
+ template: src=neutron.conf
dest=/etc/neutron/neutron.conf backup=yes
notify:
- restart neutron compute service
diff --git a/deploy/adapters/ansible/roles/neutron-compute/templates/neutron.conf b/deploy/adapters/ansible/roles/neutron-compute/templates/neutron.conf
new file mode 100644
index 00000000..a676e951
--- /dev/null
+++ b/deploy/adapters/ansible/roles/neutron-compute/templates/neutron.conf
@@ -0,0 +1,105 @@
+[DEFAULT]
+verbose = {{ VERBOSE }}
+debug = {{ VERBOSE }}
+state_path = /var/lib/neutron
+lock_path = $state_path/lock
+notify_nova_on_port_status_changes = True
+notify_nova_on_port_data_changes = True
+log_dir = /var/log/neutron
+bind_host = {{ network_server_host }}
+bind_port = 9696
+core_plugin = ml2
+service_plugins = router
+api_paste_config = api-paste.ini
+auth_strategy = keystone
+dhcp_lease_duration = 86400
+allow_overlapping_ips = True
+rpc_backend = rabbit
+rpc_thread_pool_size = 240
+rpc_conn_pool_size = 100
+rpc_response_timeout = 300
+rpc_cast_timeout = 300
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+default_notification_level = INFO
+notification_topics = notifications
+agent_down_time = 75
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+api_workers = 8
+rpc_workers = 8
+notify_nova_on_port_status_changes = True
+notify_nova_on_port_data_changes = True
+nova_url = http://{{ internal_vip.ip }}:8774/v3
+nova_region_name = RegionOne
+nova_admin_username = nova
+nova_admin_password = {{ NOVA_PASS }}
+nova_admin_auth_url = http://{{ internal_vip.ip }}:35357/v3
+send_events_interval = 2
+
+[quotas]
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+quota_items = network,subnet,port
+default_quota = -1
+quota_network = 100
+quota_subnet = 100
+quota_port = 8000
+quota_security_group = 1000
+quota_security_group_rule = 1000
+
+[agent]
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+report_interval = 30
+
+[keystone_authtoken]
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = neutron
+password = {{ NEUTRON_PASS }}
+
+identity_uri = http://{{ internal_vip.ip }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+slave_connection =
+max_retries = 10
+retry_interval = 10
+min_pool_size = 1
+max_pool_size = 100
+idle_timeout = 30
+use_db_reconnect = True
+max_overflow = 100
+connection_debug = 0
+connection_trace = False
+pool_timeout = 10
+
+[service_providers]
+service_provider=FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewllDriver:default
+
+{% if enable_fwaas %}
+[fwaas]
+driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
+enabled = True
+{% endif %}
+
+[nova]
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = nova
+password = {{ NOVA_PASS }}
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_port = 5672
+rabbit_userid = {{ RABBIT_USER }}
diff --git a/deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml b/deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml
index 8319e42c..83d7f323 100644
--- a/deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml
@@ -11,9 +11,9 @@
packages:
- neutron-common
- neutron-plugin-ml2
- - openvswitch-datapath-dkms
+ - openvswitch-switch-dpdk
- openvswitch-switch
- neutron-plugin-openvswitch-agent
services:
- - neutron-plugin-openvswitch-agent
+ - neutron-openvswitch-agent
diff --git a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
index be64c41c..917a8356 100644
--- a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
+++ b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
@@ -31,7 +31,9 @@
with_items: services | union(services_noarch)
- name: get tenant id to fill neutron.conf
- shell: keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 tenant-get service | grep id | awk '{print $4}'
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack project show service | grep id | sed -n "2,1p" | awk '{print $4}'
register: NOVA_ADMIN_TENANT_ID
- name: update neutron conf
diff --git a/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml b/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
index 99b21135..31f7f17c 100644
--- a/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
@@ -58,6 +58,12 @@
systemctl daemon-reload
when: ansible_os_family == 'RedHat'
+- name: fix openstack neutron plugin config file ubuntu
+ shell: |
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf
+ sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
+ when: ansible_os_family == "Debian"
+
- name: config l3 agent
template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini
backup=yes
diff --git a/deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml b/deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml
index 86d1af67..1a78ca8c 100644
--- a/deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml
@@ -9,7 +9,7 @@
---
packages:
- neutron-plugin-ml2
- - openvswitch-datapath-dkms
+ - openvswitch-switch-dpdk
- openvswitch-switch
- neutron-l3-agent
- neutron-dhcp-agent
@@ -17,7 +17,7 @@ packages:
services:
- openvswitch-switch
- - neutron-plugin-openvswitch-agent
+ - neutron-openvswitch-agent
openvswitch_agent: neutron-plugin-openvswitch-agent
diff --git a/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml b/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
index 5b80f400..16315b36 100644
--- a/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
@@ -20,22 +20,24 @@
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
with_items: packages | union(packages_noarch)
+- name: restart virtlogd
+ service: name=virtlogd state=started enabled=yes
+ when: ansible_os_family == "Debian"
+
- name: enable auto start
file:
path=/usr/sbin/policy-rc.d
state=absent
when: ansible_os_family == "Debian"
-- name: update nova-compute conf
- template: src={{ item }} dest=/etc/nova/{{ item }}
- with_items:
- - nova.conf
- notify:
- - restart nova-compute services
+- name: get number of cpu support virtualization
+ shell: egrep -c '(vmx|svm)' /proc/cpuinfo
+ register: kvm_cpu_num
- name: update nova-compute conf
template: src={{ item }} dest=/etc/nova/{{ item }}
with_items:
+ - nova.conf
- nova-compute.conf
notify:
- restart nova-compute services
@@ -43,8 +45,13 @@
- name: generate neutron control service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
-
+#'
- name: remove nova sqlite db
shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed
- meta: flush_handlers
+
+- name: restart nova-compute and libvirt-bin
+ shell: >
+ service nova-compute restart;
+ service libvirt-bin restart;
diff --git a/deploy/adapters/ansible/roles/nova-compute/templates/nova-compute.conf b/deploy/adapters/ansible/roles/nova-compute/templates/nova-compute.conf
index 1ac775b1..305d408b 100644
--- a/deploy/adapters/ansible/roles/nova-compute/templates/nova-compute.conf
+++ b/deploy/adapters/ansible/roles/nova-compute/templates/nova-compute.conf
@@ -2,7 +2,7 @@
compute_driver=libvirt.LibvirtDriver
force_raw_images = true
[libvirt]
-{% if deploy_type == 'virtual' %}
+{% if kvm_cpu_num.stdout_lines[0]|int == 0 %}
virt_type=qemu
{% else %}
virt_type=kvm
diff --git a/deploy/adapters/ansible/roles/nova-compute/templates/nova.conf b/deploy/adapters/ansible/roles/nova-compute/templates/nova.conf
index e1e915c8..8d7e9a5f 100644
--- a/deploy/adapters/ansible/roles/nova-compute/templates/nova.conf
+++ b/deploy/adapters/ansible/roles/nova-compute/templates/nova.conf
@@ -1,77 +1,104 @@
[DEFAULT]
-block_device_allocate_retries=5
-block_device_allocate_retries_interval=300
+transport_url = rabbit://{{ RABBIT_USER }}:{{ RABBIT_PASS }}@{{ rabbit_host }}
+auth_strategy = keystone
+my_ip = {{ internal_ip }}
+use_neutron = True
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
+log-dir=/var/log/nova
state_path=/var/lib/nova
-lock_path=/var/lib/nova/tmp
force_dhcp_release=True
+verbose={{ VERBOSE }}
+ec2_private_dns_show_ip=True
+enabled_apis=osapi_compute,metadata
+default_floating_pool={{ public_net_info.network }}
+metadata_listen={{ internal_ip }}
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
debug={{ DEBUG }}
-ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
-enabled_apis=osapi_compute,metadata
-
-default_floating_pool={{ public_net_info.network }}
-auth_strategy = keystone
-
rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
osapi_compute_listen={{ internal_ip }}
-metadata_listen={{ internal_ip }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
security_group_api = neutron
-
instance_usage_audit = True
instance_usage_audit_period = hour
notify_on_state_change = vm_and_task_state
notification_driver = nova.openstack.common.notifier.rpc_notifier
notification_driver = ceilometer.compute.nova_notifier
+[api_database]
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova_api
+idle_timeout = 30
+pool_timeout = 10
+use_db_reconnect = True
+
[database]
-# The SQLAlchemy connection string used to connect to the database
connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
idle_timeout = 30
-use_db_reconnect = True
pool_timeout = 10
+use_db_reconnect = True
+
+[glance]
+api_servers = http://{{ internal_vip.ip }}:9292
+host = {{ internal_vip.ip }}
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = nova
+password = {{ NOVA_PASS }}
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = nova
admin_password = {{ NOVA_PASS }}
-[glance]
-host = {{ internal_vip.ip }}
+[libvirt]
+use_virtio_for_bridges=True
[neutron]
url = http://{{ internal_vip.ip }}:9696
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+region_name = RegionOne
+project_name = service
+username = neutron
+password = {{ NEUTRON_PASS }}
+service_metadata_proxy = True
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
auth_strategy = keystone
admin_tenant_name = service
admin_username = neutron
admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
-service_metadata_proxy = True
-metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+admin_auth_url = http://{{ internal_vip.ip }}:35357/v3
+
+[oslo_concurrency]
+lock_path=/var/lib/nova/tmp
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+[vnc]
+enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+[wsgi]
+api_paste_config=/etc/nova/api-paste.ini
diff --git a/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml b/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml
index bf1b0f6b..f332c97a 100644
--- a/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml
+++ b/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml
@@ -7,6 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+- name: nova api db sync
+ shell: su -s /bin/sh -c "nova-manage api_db sync" nova
+ ignore_errors: True
+ notify:
+ - restart nova service
+
- name: nova db sync
nova_manage: action=dbsync
notify:
diff --git a/deploy/adapters/ansible/roles/odl_cluster/files/opendaylight.service b/deploy/adapters/ansible/roles/odl_cluster/files/opendaylight.service
new file mode 100644
index 00000000..6c9e4c44
--- /dev/null
+++ b/deploy/adapters/ansible/roles/odl_cluster/files/opendaylight.service
@@ -0,0 +1,21 @@
+[Unit]
+Description=OpenDaylight
+After=
+
+
+[Service]
+User=root
+Group=root
+Type=simple
+EnvironmentFile=-/opt/moon-environment
+WorkingDirectory=/opt/opendaylight-0.3.0
+PermissionsStartOnly=true
+ExecStartPre=
+ExecStart=/usr/lib/jvm/java-8-oracle/bin/java -Djava.security.properties=/opt/opendaylight-0.3.0/etc/odl.java.security -server -Xms128M -Xmx2048m -XX:+UnlockDiagnosticVMOptions -XX:+UnsyncloadClass -XX:MaxPermSize=512m -Dcom.sun.management.jmxremote -Djava.endorsed.dirs=/usr/lib/jvm/java-8-oracle/jre/lib/endorsed:/usr/lib/jvm/java-8-oracle/lib/endorsed:/opt/opendaylight-0.3.0/lib/endorsed -Djava.ext.dirs=/usr/lib/jvm/java-8-oracle/jre/lib/ext:/usr/lib/jvm/java-8-oracle/lib/ext:/opt/opendaylight-0.3.0/lib/ext -Dkaraf.instances=/opt/opendaylight-0.3.0/instances -Dkaraf.home=/opt/opendaylight-0.3.0 -Dkaraf.base=/opt/opendaylight-0.3.0 -Dkaraf.data=/opt/opendaylight-0.3.0/data -Dkaraf.etc=/opt/opendaylight-0.3.0/etc -Djava.io.tmpdir=/opt/opendaylight-0.3.0/data/tmp -Djava.util.logging.config.file=/opt/opendaylight-0.3.0/etc/java.util.logging.properties -Dkaraf.startLocalConsole=false -Dkaraf.startRemoteShell=true -classpath /opt/opendaylight-0.3.0/lib/karaf-jaas-boot.jar:/opt/opendaylight-0.3.0/lib/karaf-jmx-boot.jar:/opt/opendaylight-0.3.0/lib/karaf-org.osgi.core.jar:/opt/opendaylight-0.3.0/lib/karaf.branding-1.2.2-Beryllium-SR2.jar:/opt/opendaylight-0.3.0/lib/karaf.jar org.apache.karaf.main.Main
+Restart=on-failure
+LimitNOFILE=65535
+TimeoutStopSec=15
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_00_download_packages.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_00_download_packages.yml
index da7356dc..efd359db 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_00_download_packages.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_00_download_packages.yml
@@ -20,10 +20,15 @@
- name: download oracle-jdk8 script file
get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/
-# "
+#"
- name: download odl package
get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/odl/{{ odl_pkg_url }}" dest=/opt/{{ odl_pkg_name }}
+# "
+- name: download odl pip package
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/pip/{{ networking_odl_pkg_name }}" dest=/opt/{{ networking_odl_pkg_name }}
+
+#"
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml
index 507d12ce..8d71606f 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_03_copy_odl_configuration_files.yml
@@ -8,7 +8,7 @@
##############################################################################
---
- name: opendaylight system file
- template:
+ copy:
src: "{{ service_file.src }}"
dest: "{{ service_file.dst }}"
mode: 0755
@@ -44,3 +44,10 @@
template:
src: tomcat-server.xml
dest: "{{ odl_home }}/configuration/tomcat-server.xml"
+
+- name: create tomcat config
+ template:
+ src: jetty.xml
+ dest: "{{ odl_home }}/etc/jetty.xml"
+
+
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_04_install_pip_packages.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_04_install_pip_packages.yml
index 85bb534a..869d264a 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_04_install_pip_packages.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_04_install_pip_packages.yml
@@ -8,8 +8,19 @@
##############################################################################
---
-- name: install odl pip packages
- pip: name={{ item }} state=present
- with_items: odl_pip
-
+- name: patch odl pip package
+ shell: |
+ cd /opt
+ tar xf /opt/{{ networking_odl_pkg_name }}
+ rm -rf /opt/{{ networking_odl_pkg_name }}
+ sed -i 's/^neutron-lib.*/neutron-lib/' networking-odl-2.0.0/requirements.txt
+ tar zcf /opt/{{ networking_odl_pkg_name }} networking-odl-2.0.0
+ rm -rf networking-odl-2.0.0
+ cd -
+- name: odl pip package install
+ shell: |
+ cd /opt
+ pip install {{ networking_odl_pkg_name }}
+ rm -rf {{ networking_odl_pkg_name }}
+ cd -
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml
index 8dfaf4df..f44b373b 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_06_stop_openstack_services.yml
@@ -20,23 +20,3 @@
- name: turn off keepalived on control node
service: name=keepalived state=stopped
when: ansible_os_family == "Debian"
-
-################ l3 agent remove ###################
-
-- name: turn off neutron-l3-agent on control node
- service: name=neutron-l3-agent state=stopped
- when: odl_l3_agent == "Enable"
-
-- name: remove neutron-l3-agent daemon
- shell: >
- sed -i 'neutron-l3-agent/d' /opt/service ;
- mv /etc/init.d/neutron-l3-agent /home/ ;
- mv /etc/init/neutron-l3-agent.conf /home/ ;
- when: odl_l3_agent == "Enable" and ansible_os_family == "Debian"
-
-- name: remove neutron-l3-agent daemon
- shell: >
- sed -i 'neutron-l3-agent/d' /opt/service ;
- mv /lib/systemd/system/neutron-l3-agent.service /home/ ;
- when: odl_l3_agent == "Enable" and ansible_os_family == "RedHat"
-
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_odl_controller.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_odl_controller.yml
index 7ca38f17..d78a76e0 100755..100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_odl_controller.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_odl_controller.yml
@@ -29,6 +29,14 @@
- name: stop openstack services
include: 01_06_stop_openstack_services.yml
+- name: set opendaylight cluster
+ include: 05_set_opendaylight_cluster.yml
+ when: groups['odl']|length > 1
+
+- name: install moon
+ include: moon-odl.yml
+ when: moon == "Enable"
+
- name: start and check odl
include: 01_07_start_check_odl.yml
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
index c312490b..04f0ec61 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
@@ -8,6 +8,16 @@
##############################################################################
---
+- name: restart keepalived to recover external IP before check br-int
+ shell: service keepalived restart
+ when: inventory_hostname in groups['odl']
+ ignore_errors: True
+
+- name: restart opendaylight (for newton, opendaylight doesn't listen 6640 port, need restart)
+ shell: service opendaylight restart; sleep 60
+ when: inventory_hostname in groups['odl']
+ ignore_errors: True
+
- name: set opendaylight as the manager
command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ internal_vip.ip }}:6640;"
@@ -18,4 +28,3 @@
shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config={'local_ip'=' {{ internal_ip }} '};
#'
-
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml
new file mode 100644
index 00000000..7eddf7fa
--- /dev/null
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/05_set_opendaylight_cluster.yml
@@ -0,0 +1,34 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: combine odl controller
+ shell: rm -f /opt/cluster; touch /opt/cluster;
+
+- name: combine odl controller
+ shell: echo "{{ ip_settings[item.1]['mgmt']['ip'] }} \c" >> /opt/cluster; >> /opt/cluster;
+ with_indexed_items: groups['odl']
+
+- name: combine odl controller
+ shell: cat /opt/cluster
+ register: cluster
+
+#- debug: msg="{{ cluster.stdout_lines[0] }}"
+
+- name: combine odl controller
+ shell: uname -n | cut -b 5,5
+ register: number
+
+#- debug: msg="{{ number.stdout_lines[0] }}"
+
+- debug: msg="{{ odl_home }}/bin/configure_cluster.sh {{ number.stdout_lines[0] }} {{ cluster.stdout_lines[0] }}"
+
+- name: configure odl controller in cluster
+ shell: "{{ odl_home }}/bin/configure_cluster.sh {{ number.stdout_lines[0] }} {{ cluster.stdout_lines[0] }}"
+
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml
index 50f40db7..32952c51 100755..100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml
@@ -22,7 +22,3 @@
- name: Provision ODL on Compute nodes
include: 02_odl_compute.yml
when: groups['odl']|length !=0 and inventory_hostname not in groups['odl']
-
-- name: Config nova
- include: 04_odl_l3_nova.yml
- when: groups['odl']|length !=0 and odl_l3_agent == "Enable"
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/moon-odl.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/moon-odl.yml
new file mode 100644
index 00000000..b89b2823
--- /dev/null
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/moon-odl.yml
@@ -0,0 +1,61 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: delete data journal snapshots
+ shell: rm -rf {{ odl_home }}/{{ item }}
+ with_items:
+ - journal
+ - data
+ - snapshots
+
+- name: remove aaa feature
+ shell: rm -rf {{ odl_home }}/system/org/opendaylight/aaa/
+
+- name: download apache maven package file
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/moon/apache-maven-3.3.9-bin.tar.gz" dest=/opt/apache-maven-3.3.9-bin.tar.gz
+
+- name: create maven folder
+ shell: mkdir -p /opt/apache-maven-3.3.9/
+
+- name: extract maven
+ command: su -s /bin/sh -c "tar zxf /opt/apache-maven-3.3.9-bin.tar.gz -C /opt/apache-maven-3.3.9/ --strip-components 1 --no-overwrite-dir -k --skip-old-files" root
+
+- name: install maven
+ shell: ln -s /opt/apache-maven-3.3.9/bin/mvn /usr/local/bin/mvn;
+
+- name: create m2 directory
+ file: path=/root/.m2/ state=directory mode=0755
+
+- name: copy settings.xml
+ template: src=settings.xml dest=/root/.m2/settings.xml
+
+#- name: upload swift lib
+# unarchive: src=odl-aaa-moon.tar.gz dest=/home/
+
+- name: download odl-aaa-moon package
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/moon/{{ odl_aaa_moon }}" dest=/home/
+
+- name: unarchive odl-aaa-moon package
+ command: su -s /bin/sh -c "tar xvf /home/{{ odl_aaa_moon }} -C /home/"
+
+- name: install aaa
+ shell: >
+ export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/usr/lib/jvm/java-8-oracle/bin:/opt/apache-maven-3.3.3/bin";
+ export JAVA_HOME="/usr/lib/jvm/java-8-oracle";
+ export _JAVA_OPTIONS="-Djava.net.preferIPv4Stack=true";
+ export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=512m";
+ cd /home/odl-aaa-moon/aaa/;
+ mvn clean install -DskipTests;
+
+- name: remove shiro ini
+ shell: rm -f {{ odl_home }}/etc/shiro.ini
+
+- name: set moon env
+ template: src=moon-environment dest=/opt/moon-environment
diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/jetty.xml b/deploy/adapters/ansible/roles/odl_cluster/templates/jetty.xml
index 3ee37509..50ac7c35 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/templates/jetty.xml
+++ b/deploy/adapters/ansible/roles/odl_cluster/templates/jetty.xml
@@ -34,7 +34,7 @@ DTD Configure//EN" "http://jetty.mortbay.org/configure.dtd">
<Arg>
<New class="org.eclipse.jetty.server.nio.SelectChannelConnector">
<Set name="host">
- <Property name="jetty.host" default="{{ internal_ip }}"/>
+ <Property name="jetty.host"/>
</Set>
<Set name="port">
<Property name="jetty.port" default="8181" />
@@ -48,24 +48,6 @@ DTD Configure//EN" "http://jetty.mortbay.org/configure.dtd">
</New>
</Arg>
</Call>
- <Call name="addConnector">
- <Arg>
- <New class="org.eclipse.jetty.server.nio.SelectChannelConnector">
- <Set name="host">
- <Property name="jetty.host" default="{{ internal_ip }}"/>
- </Set>
- <Set name="port">
- <Property name="jetty.port" default="8080" />
- </Set>
- <Set name="maxIdleTime">300000</Set>
- <Set name="Acceptors">2</Set>
- <Set name="statsOn">false</Set>
- <Set name="confidentialPort">8443</Set>
- <Set name="lowResourcesConnections">20000</Set>
- <Set name="lowResourcesMaxIdleTime">5000</Set>
- </New>
- </Arg>
- </Call>
<!-- =========================================================== -->
<!-- Configure Authentication Realms -->
diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/ml2_conf.sh b/deploy/adapters/ansible/roles/odl_cluster/templates/ml2_conf.sh
index 0d42e48b..5e3627bf 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/templates/ml2_conf.sh
+++ b/deploy/adapters/ansible/roles/odl_cluster/templates/ml2_conf.sh
@@ -10,5 +10,5 @@ cat <<EOT>> /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2_odl]
password = admin
username = admin
-url = http://{{ internal_vip.ip }}:8080/controller/nb/v2/neutron
+url = http://{{ internal_vip.ip }}:8181/controller/nb/v2/neutron
EOT
diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/moon-environment b/deploy/adapters/ansible/roles/odl_cluster/templates/moon-environment
new file mode 100644
index 00000000..9a13da8e
--- /dev/null
+++ b/deploy/adapters/ansible/roles/odl_cluster/templates/moon-environment
@@ -0,0 +1,3 @@
+MOON_SERVER_ADDR={{ internal_vip.ip }}
+MOON_SERVER_PORT=5000
+no_proxy="localhost,127.0.0.1"
diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/settings.xml b/deploy/adapters/ansible/roles/odl_cluster/templates/settings.xml
new file mode 100644
index 00000000..5ba3b50c
--- /dev/null
+++ b/deploy/adapters/ansible/roles/odl_cluster/templates/settings.xml
@@ -0,0 +1,82 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=2 tabstop=2: -->
+<!--
+ Copyright (c) 2014, 2015 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
+ <localRepository>{{ odl_home }}/system/ </localRepository>
+ <profiles>
+ <profile>
+ <id>opendaylight-release</id>
+ <repositories>
+ <repository>
+ <id>opendaylight-mirror</id>
+ <name>opendaylight-mirror</name>
+ <url>https://nexus.opendaylight.org/content/repositories/public/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>never</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>opendaylight-mirror</id>
+ <name>opendaylight-mirror</name>
+ <url>https://nexus.opendaylight.org/content/repositories/public/</url>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>never</updatePolicy>
+ </releases>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+
+ <profile>
+ <id>opendaylight-snapshots</id>
+ <repositories>
+ <repository>
+ <id>opendaylight-snapshot</id>
+ <name>opendaylight-snapshot</name>
+ <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>opendaylight-snapshot</id>
+ <name>opendaylight-snapshot</name>
+ <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ </profiles>
+
+ <activeProfiles>
+ <activeProfile>opendaylight-release</activeProfile>
+ <activeProfile>opendaylight-snapshots</activeProfile>
+ </activeProfiles>
+</settings>
diff --git a/deploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml b/deploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml
index b44107e2..640a264a 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml
@@ -12,8 +12,10 @@ common_packages:
- crudini
service_ovs_name: openvswitch-switch
-service_ovs_agent_name: neutron-plugin-openvswitch-agent
+service_ovs_agent_name: neutron-openvswitch-agent
service_file:
- src: opendaylight.conf
- dst: /etc/init/opendaylight.conf
+ src: opendaylight.service
+ dst: /lib/systemd/system/opendaylight.service
+
+networking_odl_pkg_name: networking-odl-2.0.0.tar.gz
diff --git a/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml b/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml
index 5b2676a8..e5f52b42 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml
@@ -9,7 +9,7 @@
---
odl_username: admin
odl_password: admin
-odl_api_port: 8080
+odl_api_port: 8181
#odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.3.0-Lithium/distribution-karaf-0.3.0-Lithium.tar.gz
odl_pkg_url: karaf.tar.gz
@@ -18,7 +18,8 @@ odl_home: "/opt/opendaylight-0.3.0/"
odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'odl-restconf','odl-l2switch-switch','odl-openflowplugin-all','odl-mdsal-apidocs','odl-dlux-all','odl-adsal-northbound','odl-nsf-all','odl-ovsdb-openstack','odl-ovsdb-northbound','odl-dlux-core']
odl_extra_features: ['odl-restconf-all','odl-mdsal-clustering','odl-openflowplugin-flow-services','http','jolokia-osgi']
odl_features: "{{ odl_base_features + odl_extra_features }}"
-odl_api_port: 8080
+
+odl_aaa_moon: odl-aaa-moon.tar.gz
jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz
jdk8_script_name: install_jdk8.tar
diff --git a/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml b/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml
index 64fff472..c8ce1155 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml
@@ -46,8 +46,6 @@
include: onos_controller.yml
when: inventory_hostname in groups['onos']
-- name: Install ONOS Cluster on Compute
+- name: Config ONOS Cluster
include: openvswitch.yml
when: groups['onos']|length !=0
-# when: groups['onos']|length !=0 and inventory_hostname not in groups['onos']
-
diff --git a/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml b/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml
index 6d62a2e9..d51151a9 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml
@@ -7,25 +7,41 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- name: upload onos driver package
- unarchive: src=networking-onos.tar dest=/opt/
+
+- name: get image http server
+ shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
+ register: http_server
+
+- name: download onos driver packages
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_driver }}" dest=/opt/
+
+- name: upload onos sfc driver package
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_sfc_driver }}" dest=/opt/
+
+- name: unarchive onos driver package
+ command: su -s /bin/sh -c "tar xvf /opt/networking-onos.tar -C /opt/"
+
+- name: upload onos sfc driver package
+ command: su -s /bin/sh -c "tar xvf /opt/networking-sfc.tar -C /opt/"
- name: install onos driver
command: su -s /bin/sh -c "/opt/networking-onos/install_driver.sh"
+- name: install onos sfc driver
+ command: su -s /bin/sh -c "/opt/networking-sfc/install_driver.sh"
+
- name: install onos required packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
with_items: packages
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
- name: download oracle-jdk8 package file
get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }}
-
-- name: upload install_jdk8 scripts
- unarchive: src=install_jdk8.tar dest=/opt/
+
+- name: download oracle-jdk8 script file
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/
+
+- name: unarchive onos driver package
+ command: su -s /bin/sh -c "tar xvf /opt/install_jdk8.tar -C /opt/"
- name: install install_jdk8 package
command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh"
@@ -58,7 +74,7 @@
ignore_errors: True
- name: download jar repository
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/repository.tar" dest=~/.m2/
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ repository }}" dest=~/.m2/
- name: extract jar repository
command: su -s /bin/sh -c "tar xvf ~/.m2/repository.tar -C ~/.m2/"
@@ -75,47 +91,19 @@
sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' {{ onos_home }}/init/onos.conf;
cp -rf {{ onos_home }}/init/onos.conf /etc/init/;
cp -rf {{ onos_home }}/init/onos.conf /etc/init.d/;
-# notify:
-# - restart onos service
-
+
- name: configure onos boot feature
shell: >
sed -i '/^featuresBoot=/c\featuresBoot={{ onos_boot_features }}' {{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg;
-#- name: create cluster json
-# template:
-# src: cluster.json
-# dest: "{{ onos_home }}/config/cluster.json"
-# notify:
-# - restart onos service
-
-#- name: create tablets json
-# template:
-# src: tablets.json
-# dest: "{{ onos_home }}/config/tablets.json"
-# notify:
-# - restart onos service
-
- name: wait for config time
shell: "sleep 10"
- name: start onos service
service: name=onos state=started enabled=yes
-- name: wait for restart time
- shell: "sleep 60"
-
-- name: start onos service
- service: name=onos state=restarted enabled=yes
-
- name: wait for onos start time
- shell: "sleep 60"
-
-- name: start onos service
- service: name=onos state=restarted enabled=yes
-
-- name: wait for onos start time
- shell: "sleep 100"
+ shell: "sleep 200"
- name: add onos auto start
shell: >
@@ -124,12 +112,9 @@
##########################################################################################################
################################ ONOS connect with OpenStack ################################
##########################################################################################################
-#- name: Run OpenVSwitch Script
-# include: openvswitch.yml
-
- name: Configure Neutron1
shell: >
- crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins onos_router;
+ crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_sfc.services.sfc.plugin.SfcPlugin, networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin, onos_router;
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers onos_ml2;
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers vxlan
@@ -143,13 +128,13 @@
- name: Configure Neutron2
command: su -s /bin/sh -c "/opt/ml2_conf.sh;"
-
- name: Configure Neutron3
shell: >
mysql -e "drop database if exists neutron_ml2;";
mysql -e "create database neutron_ml2 character set utf8;";
mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';";
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron;
+ su -s /bin/sh -c "neutron-db-manage --subproject networking-sfc upgrade head" neutron;
- name: Restart neutron-server
service: name=neutron-server state=restarted
diff --git a/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml
index 47f0f6e8..aac787ea 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml
@@ -7,38 +7,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-#- name: remove neutron-plugin-openvswitch-agent auto start
-# shell: >
-# update-rc.d neutron-plugin-openvswitch-agent remove;
-# sed -i /neutron-plugin-openvswitch-agent/d /opt/service
-#- name: shut down and disable Neutron's agent services
-# service: name=neutron-plugin-openvswitch-agent state=stopped
-
-#- name: Stop the Open vSwitch service and clear existing OVSDB
-# shell: >
-# ovs-vsctl del-br br-int ;
-# ovs-vsctl del-br br-tun ;
-# ovs-vsctl del-manager ;
-
-#- name: get image http server
-# shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
-# register: http_server
-#
-#- name: download ovs
-# get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/openvswitch.tar" dest=/opt/openvswitch.tar
-#
-#- name: extract ovs
-# command: su -s /bin/sh -c "tar xvf /opt/openvswitch.tar -C /opt/"
-#
-#- name: update ovs
-# shell: >
-# cd /opt/openvswitch;
-# dpkg -i openvswitch-common_2.3.0-1_amd64.deb;
-# dpkg -i openvswitch-switch_2.3.0-1_amd64.deb;
-
-#- name: start up onos-external nic
-# command: su -s /bin/sh -c "ifconfig eth2 0 up"
- name: set veth port
shell: >
ip link add onos_port1 type veth peer name onos_port2;
@@ -46,26 +15,28 @@
ifconfig onos_port2 up;
ignore_errors: True
-- name: set veth to ovs
- shell: >
- export externamMac=`ifconfig eth1 | grep -Eo '\<[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'`;
- ifconfig onos_port2 hw ether $externamMac;
- ovs-vsctl add-port br-prv onos_port1;
- ignore_errors: True
+- name: add openflow-base feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow-base'";
+ when: inventory_hostname in groups['onos']
-#- name: wait for onos start time
-# shell: "sleep 200"
+- name: add openflow feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow'";
+ when: inventory_hostname in groups['onos']
- name: add ovsdatabase feature
command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdatabase'";
- when: inventory_hostname == groups['onos'][0]
+ when: inventory_hostname in groups['onos']
-- name: add openflow-base feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow-base'";
+- name: add ovsdb-base feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-base'";
when: inventory_hostname in groups['onos']
-- name: add openflow feature
- command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow'";
+- name: add onos driver ovsdb feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-drivers-ovsdb'";
+ when: inventory_hostname in groups['onos']
+
+- name: add ovsdb provider host feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-provider-host'";
when: inventory_hostname in groups['onos']
- name: add vtn feature
@@ -79,23 +50,6 @@
- name: Set ONOS as the manager
command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:6640;"
-- name: create public network
- shell: >
- export OS_PASSWORD=console;
- export OS_TENANT_NAME=admin;
- export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0;
- export OS_USERNAME=ADMIN;
- neutron net-create ext-net --shared --router:external=True;
- neutron subnet-create ext-net {{ public_net_info.floating_ip_cidr }} --name ext-subnet --allocation-pool start={{ public_net_info.floating_ip_start }},end={{ public_net_info.floating_ip_end }};
- when: inventory_hostname == groups['controller'][0]
-
-- name: set gateway mac address
- shell: >
- ping -c 1 {{ ansible_default_ipv4.gateway }};
- gatewayMac=`arp -a {{ ansible_default_ipv4.gateway }} | awk '{print $4}'`;
- /opt/onos/bin/onos "externalgateway-update -m $gatewayMac";
- when: inventory_hostname in groups['onos']
-
- name: delete default gateway
shell: >
route delete default;
diff --git a/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml b/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml
index 1cbc070d..f11f1102 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml
@@ -6,9 +6,14 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-onos_pkg_url: http://downloads.onosproject.org/release/onos-1.3.0.tar.gz
-onos_pkg_name: onos-1.3.0.tar.gz
+onos_pkg_name: onos-1.6.0.tar.gz
onos_home: /opt/onos/
-karaf_dist: apache-karaf-3.0.3
+karaf_dist: apache-karaf-3.0.5
jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz
-onos_boot_features: config,standard,region,package,kar,ssh,management,webconsole,onos-api,onos-core,onos-incubator,onos-cli,onos-rest,onos-gui,onos-openflow-base,onos-openflow,onos-ovsdatabase, onos-app-vtn-onosfw
+jdk8_script_name: install_jdk8.tar
+onos_driver: networking-onos.tar
+onos_sfc_driver: networking-sfc.tar
+repository: repository.tar
+onos_boot_features: config,standard,region,package,kar,ssh,management,webconsole,onos-api,onos-core,onos-incubator,onos-cli,onos-rest,onos-gui,onos-openflow-base, onos-openflow, onos-ovsdatabase, onos-ovsdb-base, onos-drivers-ovsdb, onos-ovsdb-provider-host, onos-app-vtn-onosfw
+
+
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml
index 0714d2e9..836cb78b 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/uninstall-openvswitch.yml
@@ -4,17 +4,17 @@
- name: remove ovs and ovs-plugin daeman
shell: >
- sed -i '/neutron-plugin-openvswitch-agent/d' /opt/service ;
+ sed -i '/neutron-openvswitch-agent/d' /opt/service ;
sed -i '/openvswitch-switch/d' /opt/service ;
- name: stop ovs and ovs-plugin
- shell: service openvswitch-switch stop; service neutron-plugin-openvswitch-agent stop;
+ shell: service openvswitch-switch stop; service neutron-openvswitch-agent stop;
- name: remove ovs and ovs-plugin files
shell: >
- update-rc.d -f neutron-plugin-openvswitch-agent remove;
- mv /etc/init.d/neutron-plugin-openvswitch-agent /home/neutron-plugin-openvswitch-agent;
- mv /etc/init/neutron-plugin-openvswitch-agent.conf /home/neutron-plugin-openvswitch-agent.conf;
+ update-rc.d -f neutron-openvswitch-agent remove;
+ mv /etc/init.d/neutron-openvswitch-agent /home/neutron-openvswitch-agent;
+ mv /etc/init/neutron-openvswitch-agent.conf /home/neutron-openvswitch-agent.conf;
update-rc.d -f openvswitch-switch remove ;
mv /etc/init.d/openvswitch-switch /home/openvswitch-switch ;
mv /etc/init/openvswitch-switch.conf /home/openvswitch-switch.conf ;
diff --git a/deploy/adapters/ansible/roles/secgroup/templates/neutron.j2 b/deploy/adapters/ansible/roles/secgroup/templates/neutron.j2
index 9f3652c4..e7107660 100644
--- a/deploy/adapters/ansible/roles/secgroup/templates/neutron.j2
+++ b/deploy/adapters/ansible/roles/secgroup/templates/neutron.j2
@@ -1,6 +1,6 @@
[securitygroup]
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-enable_security_group = False
+firewall_driver = neutron.agent.firewall.NoopFirewallDriver
+enable_security_group = True
[agent]
prevent_arp_spoofing = False
diff --git a/deploy/adapters/ansible/roles/secgroup/templates/nova.j2 b/deploy/adapters/ansible/roles/secgroup/templates/nova.j2
index 91fa6cd2..7dbc216a 100644
--- a/deploy/adapters/ansible/roles/secgroup/templates/nova.j2
+++ b/deploy/adapters/ansible/roles/secgroup/templates/nova.j2
@@ -1,3 +1,3 @@
[DEFAULT]
firewall_driver = nova.virt.firewall.NoopFirewallDriver
-security_group_api = nova
+security_group_api = neutron
diff --git a/deploy/adapters/ansible/roles/secgroup/vars/Debian.yml b/deploy/adapters/ansible/roles/secgroup/vars/Debian.yml
index a6669088..221a3d92 100644
--- a/deploy/adapters/ansible/roles/secgroup/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/secgroup/vars/Debian.yml
@@ -25,11 +25,11 @@ controller_services:
- nova-novncproxy
- nova-scheduler
- neutron-server
- - neutron-plugin-openvswitch-agent
+ - neutron-openvswitch-agent
- neutron-l3-agent
- neutron-dhcp-agent
- neutron-metadata-agent
compute_services:
- nova-compute
- - neutron-plugin-openvswitch-agent
+ - neutron-openvswitch-agent
diff --git a/deploy/adapters/ansible/roles/setup-network/files/setup_networks/net_init b/deploy/adapters/ansible/roles/setup-network/files/setup_networks/net_init
index c27a8bf8..41ccb988 100755
--- a/deploy/adapters/ansible/roles/setup-network/files/setup_networks/net_init
+++ b/deploy/adapters/ansible/roles/setup-network/files/setup_networks/net_init
@@ -1,13 +1,17 @@
-#!/bin/bash
-## BEGIN INIT INFO
-# Provides: anamon.init
-# Default-Start: 3 5
-# Default-Stop: 0 1 2 4 6
-# Required-Start: $network
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides: anamon.init
+# Required-Start: $network
+# Required-Stop:
+# Should-Start:
+# Should-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
# Short-Description: Starts the cobbler anamon boot notification program
-# Description: anamon runs the first time a machine is booted after
-# installation.
-## END INIT INFO
+# Description: anamon runs the first time a machine is booted after installation.
+### END INIT INFO
+
+
#
# anamon.init: Starts the cobbler post-install boot notification program
diff --git a/deploy/adapters/ansible/roles/storage/files/storage b/deploy/adapters/ansible/roles/storage/files/storage
index 775e8fd7..3acc6115 100755
--- a/deploy/adapters/ansible/roles/storage/files/storage
+++ b/deploy/adapters/ansible/roles/storage/files/storage
@@ -1,2 +1,10 @@
#! /bin/bash
+### BEGIN INIT INFO
+# Provides: Storage
+# Required-Start: $remote_fs $network
+# Required-Stop: $remote_fs $network
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Description: Storage
+### END INIT INFO
loop_dev=`sh /opt/setup_storage/losetup.sh`
diff --git a/deploy/adapters/ansible/roles/swift/tasks/main.yml b/deploy/adapters/ansible/roles/swift/tasks/main.yml
new file mode 100644
index 00000000..0f083146
--- /dev/null
+++ b/deploy/adapters/ansible/roles/swift/tasks/main.yml
@@ -0,0 +1,11 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include: swift.yml
+ when: moon == "Enable"
diff --git a/deploy/adapters/ansible/roles/swift/tasks/swift-compute1.yml b/deploy/adapters/ansible/roles/swift/tasks/swift-compute1.yml
new file mode 100644
index 00000000..be00484b
--- /dev/null
+++ b/deploy/adapters/ansible/roles/swift/tasks/swift-compute1.yml
@@ -0,0 +1,80 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: disable auto start
+ copy:
+ content: "#!/bin/sh\nexit 101"
+ dest: "/usr/sbin/policy-rc.d"
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
+- name: install swift-compute packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: compute_packages | union(compute_packages_noarch)
+
+- name: enable auto start
+ file:
+ path=/usr/sbin/policy-rc.d
+ state=absent
+ when: ansible_os_family == "Debian"
+
+- name: format devices
+ shell: >
+ dd if=/dev/zero of=/var/swift1 bs=1G count=10;
+ dd if=/dev/zero of=/var/swift2 bs=1G count=10;
+ mkfs.xfs /var/swift1;
+ mkfs.xfs /var/swift2;
+
+- name: create mount point dirertory
+ shell: >
+ mkdir -p /srv/node/swift1;
+ mkdir -p /srv/node/swift2;
+
+- name: edit /etc/fstab
+ shell: >
+ echo "/var/swift1 /srv/node/swift1/ xfs noatime,nodiratime,nobarrier,logbufs=8 0 2" >> /etc/fstab;
+ echo "/var/swift2 /srv/node/swift2/ xfs noatime,nodiratime,nobarrier,logbufs=8 0 2" >> /etc/fstab;
+ mount /srv/node/swift1;
+ mount /srv/node/swift2;
+
+- name: edit /etc/default/rsync
+ shell: sed -i 's/RSYNC_ENABLE=false/RSYNC_ENABLE=true/g' /etc/default/rsync
+
+- name: restart rsync service
+ service: name=rsync state=restarted enabled=yes
+
+- name: copy scripts
+ template: src={{ item }} dest=/etc/swift/ backup=yes
+ with_items:
+ - account-server.conf
+ - container-server.conf
+ - object-server.conf
+
+- name: change directory
+ shell: >
+ chown -R swift:swift /srv/node;
+ mkdir -p /var/cache/swift;
+ chown -R root:swift /var/cache/swift;
+ chmod -R 775 /var/cache/swift;
+
+#- name: copy swift lib
+# copy: src=swift-lib.tar.gz dest=/tmp/swift-lib.tar.gz
+#
+#- name: upload swift lib
+# unarchive: src=swift-lib.tar.gz dest=/tmp/
+#
+#- name: copy swift lib
+# shell: command: su -s /bin/sh -c "cp /tmp/swift-lib/* /usr/lib/"
+#
+#- name: untar swift lib
+# shell: >
+# tar zxf /tmp/swift-lib.tar.gz;
+# cp /tmp/swift-lib/* /usr/lib/;
+
diff --git a/deploy/adapters/ansible/roles/swift/tasks/swift-controller1.yml b/deploy/adapters/ansible/roles/swift/tasks/swift-controller1.yml
new file mode 100644
index 00000000..36d05040
--- /dev/null
+++ b/deploy/adapters/ansible/roles/swift/tasks/swift-controller1.yml
@@ -0,0 +1,34 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: disable auto start
+ copy:
+ content: "#!/bin/sh\nexit 101"
+ dest: "/usr/sbin/policy-rc.d"
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
+- name: install swift-controllor packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: controller_packages | union(controller_packages_noarch)
+
+- name: enable auto start
+ file:
+ path=/usr/sbin/policy-rc.d
+ state=absent
+ when: ansible_os_family == "Debian"
+
+- name: make swift directory
+ file: path=/etc/swift state=directory mode=0755
+
+- name: update proxy-server conf
+ template: src=proxy-server.conf dest=/etc/swift/proxy-server.conf backup=yes
+
+
diff --git a/deploy/adapters/ansible/roles/swift/tasks/swift-controller2.yml b/deploy/adapters/ansible/roles/swift/tasks/swift-controller2.yml
new file mode 100644
index 00000000..92d4ab22
--- /dev/null
+++ b/deploy/adapters/ansible/roles/swift/tasks/swift-controller2.yml
@@ -0,0 +1,93 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: create account.builder file
+ shell: >
+ cd /etc/swift ;
+ swift-ring-builder account.builder create 10 3 1;
+
+- name: add each storage node to the ring
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder account.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6002 --device swift1 --weight 100 ;
+ swift-ring-builder account.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6002 --device swift2 --weight 100 ;
+ with_indexed_items: groups['compute']
+
+- name: verify the ring contents 1
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder account.builder;
+
+- name: rebalance the ring
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder account.builder rebalance;
+
+
+#####################
+- name: create contrainer builder file
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder container.builder create 10 3 1;
+
+- name: add each storage node to the ring
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder container.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6001 --device swift1 --weight 100;
+ swift-ring-builder container.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6001 --device swift2 --weight 100;
+ with_indexed_items: groups['compute']
+
+- name: verify the ring contents 2
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder container.builder;
+
+- name: rebalance the ring
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder container.builder rebalance;
+
+#############################
+
+- name: create object builder file
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder object.builder create 10 3 1;
+
+- name: add each storage node to the ring
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder object.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6000 --device swift1 --weight 100;
+ swift-ring-builder object.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6000 --device swift2 --weight 100;
+ with_indexed_items: groups['compute']
+
+- name: verify the ring contents
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder object.builder;
+
+- name: rebalance the ring
+ shell: >
+ cd /etc/swift;
+ swift-ring-builder object.builder rebalance;
+
+##########################
+
+- name: distribute ring configuration files to the other controller
+ shell: >
+ cd /etc/swift;
+ scp account.ring.gz container.ring.gz object.ring.gz root@{{ ip_settings[item.1]['mgmt']['ip'] }}:/etc/swift/;
+ with_indexed_items: groups['controller']
+
+- name: distribute ring configuration files to the all compute
+ shell: >
+ cd /etc/swift;
+ scp account.ring.gz container.ring.gz object.ring.gz root@{{ ip_settings[item.1]['mgmt']['ip'] }}:/etc/swift/;
+ with_indexed_items: groups['compute']
diff --git a/deploy/adapters/ansible/roles/swift/tasks/swift.yml b/deploy/adapters/ansible/roles/swift/tasks/swift.yml
new file mode 100644
index 00000000..4e2651a7
--- /dev/null
+++ b/deploy/adapters/ansible/roles/swift/tasks/swift.yml
@@ -0,0 +1,79 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- include: swift-controller1.yml
+ when: inventory_hostname in groups['controller']
+
+- include: swift-compute1.yml
+ when: inventory_hostname in groups['compute']
+
+- include: swift-controller2.yml
+ when: inventory_hostname == haproxy_hosts.keys()[0]
+
+- name: copy swift.conf
+ template: src=swift.conf dest=/etc/swift/swift.conf backup=yes
+
+- name: chown /etc/swift
+ shell: chown -R root:swift /etc/swift
+
+- name: restart tasks on controller
+ service: name={{ item }} state=restarted enabled=yes
+ with_items:
+ - memcached
+ - swift-proxy
+ when: inventory_hostname in groups['controller']
+
+- name: restart tasks on compute
+ shell: swift-init all start
+ when: inventory_hostname in groups['compute']
+ ignore_errors: True
+
+- name: restart tasks on controller
+ service: name={{ item }} state=restarted enabled=yes
+ with_items:
+ - rsync
+ when: inventory_hostname in groups['compute']
+
+- name: upload swift lib
+ unarchive: src=swift-lib.tar.gz dest=/tmp/
+
+- name: copy swift lib
+ command: su -s /bin/sh -c "cp /tmp/swift-lib/* /usr/lib/"
+
+- name: wait 30 seconds
+ shell: sleep 30
+
+- name: create swift task script
+ shell: echo {{ item }} >> /opt/swift-service
+ with_items:
+ - swift-account
+ - swift-account-replicator
+ - swift-container-replicator
+ - swift-object
+ - swift-object-updater
+ - swift-account-auditor
+ - swift-container
+ - swift-container-sync
+ - swift-object-auditor
+ - swift-account-reaper
+ - swift-container-auditor
+ - swift-container-updater
+ - swift-object-replicator
+ when: inventory_hostname in groups['compute']
+ ignore_errors: True
+
+- name: restart swift task
+ shell: >
+ for i in `cat /opt/swift-service`; do service $i start; done;
+ sleep 10;
+ for i in `cat /opt/swift-service`; do service $i restart; done;
+ when: inventory_hostname in groups['compute']
+ ignore_errors: True
diff --git a/deploy/adapters/ansible/roles/swift/templates/account-server.conf b/deploy/adapters/ansible/roles/swift/templates/account-server.conf
new file mode 100644
index 00000000..ea84799f
--- /dev/null
+++ b/deploy/adapters/ansible/roles/swift/templates/account-server.conf
@@ -0,0 +1,200 @@
+[DEFAULT]
+bind_ip = {{ internal_ip }}
+bind_port = 6002
+# bind_timeout = 30
+# backlog = 4096
+user = swift
+swift_dir = /etc/swift
+devices = /srv/node
+mount_check = true
+# disable_fallocate = false
+#
+# Use an integer to override the number of pre-forked processes that will
+# accept connections.
+# workers = auto
+#
+# Maximum concurrent requests per worker
+# max_clients = 1024
+#
+# You can specify default log routing here if you want:
+# log_name = swift
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+# The following caps the length of log lines to the value given; no limit if
+# set to 0, the default.
+# log_max_line_length = 0
+#
+# comma separated list of functions to call to setup custom log handlers.
+# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
+# adapted_logger
+# log_custom_handlers =
+#
+# If set, log_udp_host will override log_address
+# log_udp_host =
+# log_udp_port = 514
+#
+# You can enable StatsD logging here:
+# log_statsd_host =
+# log_statsd_port = 8125
+# log_statsd_default_sample_rate = 1.0
+# log_statsd_sample_rate_factor = 1.0
+# log_statsd_metric_prefix =
+#
+# If you don't mind the extra disk space usage in overhead, you can turn this
+# on to preallocate disk space with SQLite databases to decrease fragmentation.
+# db_preallocation = off
+#
+# eventlet_debug = false
+#
+# You can set fallocate_reserve to the number of bytes you'd like fallocate to
+# reserve, whether there is space for the given file size or not.
+# fallocate_reserve = 0
+
+[pipeline:main]
+pipeline = healthcheck recon account-server
+
+[app:account-server]
+use = egg:swift#account
+# You can override the default log routing for this app here:
+# set log_name = account-server
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_requests = true
+# set log_address = /dev/log
+#
+# auto_create_account_prefix = .
+#
+# Configure parameter for creating specific server
+# To handle all verbs, including replication verbs, do not specify
+# "replication_server" (this is the default). To only handle replication,
+# set to a True value (e.g. "True" or "1"). To handle only non-replication
+# verbs, set to "False". Unless you have a separate replication network, you
+# should not specify any value for "replication_server". Default is empty.
+# replication_server = false
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+# An optional filesystem path, which if present, will cause the healthcheck
+# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
+# disable_path =
+
+[filter:recon]
+use = egg:swift#recon
+recon_cache_path = /var/cache/swift
+
+[account-replicator]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = account-replicator
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# Maximum number of database rows that will be sync'd in a single HTTP
+# replication request. Databases with less than or equal to this number of
+# differing rows will always be sync'd using an HTTP replication request rather
+# than using rsync.
+# per_diff = 1000
+#
+# Maximum number of HTTP replication requests attempted on each replication
+# pass for any one container. This caps how long the replicator will spend
+# trying to sync a given database per pass so the other databases don't get
+# starved.
+# max_diffs = 100
+#
+# Number of replication workers to spawn.
+# concurrency = 8
+#
+# Time in seconds to wait between replication passes
+# interval = 30
+# run_pause is deprecated, use interval instead
+# run_pause = 30
+#
+# node_timeout = 10
+# conn_timeout = 0.5
+#
+# The replicator also performs reclamation
+# reclaim_age = 604800
+#
+# Allow rsync to compress data which is transmitted to destination node
+# during sync. However, this is applicable only when destination node is in
+# a different region than the local one.
+# rsync_compress = no
+#
+# Format of the rysnc module where the replicator will send data. See
+# etc/rsyncd.conf-sample for some usage examples.
+# rsync_module = {replication_ip}::account
+#
+# recon_cache_path = /var/cache/swift
+
+[account-auditor]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = account-auditor
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# Will audit each account at most once per interval
+# interval = 1800
+#
+# accounts_per_second = 200
+# recon_cache_path = /var/cache/swift
+
+[account-reaper]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = account-reaper
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# concurrency = 25
+# interval = 3600
+# node_timeout = 10
+# conn_timeout = 0.5
+#
+# Normally, the reaper begins deleting account information for deleted accounts
+# immediately; you can set this to delay its work however. The value is in
+# seconds; 2592000 = 30 days for example.
+# delay_reaping = 0
+#
+# If the account fails to be be reaped due to a persistent error, the
+# account reaper will log a message such as:
+# Account <name> has not been reaped since <date>
+# You can search logs for this message if space is not being reclaimed
+# after you delete account(s).
+# Default is 2592000 seconds (30 days). This is in addition to any time
+# requested by delay_reaping.
+# reap_warn_after = 2592000
+
+# Note: Put it at the beginning of the pipeline to profile all middleware. But
+# it is safer to put this after healthcheck.
+[filter:xprofile]
+use = egg:swift#xprofile
+# This option enable you to switch profilers which should inherit from python
+# standard profiler. Currently the supported value can be 'cProfile',
+# 'eventlet.green.profile' etc.
+# profile_module = eventlet.green.profile
+#
+# This prefix will be used to combine process ID and timestamp to name the
+# profile data file. Make sure the executing user has permission to write
+# into this path (missing path segments will be created, if necessary).
+# If you enable profiling in more than one type of daemon, you must override
+# it with an unique value like: /var/log/swift/profile/account.profile
+# log_filename_prefix = /tmp/log/swift/profile/default.profile
+#
+# the profile data will be dumped to local disk based on above naming rule
+# in this interval.
+# dump_interval = 5.0
+#
+# Be careful, this option will enable profiler to dump data into the file with
+# time stamp which means there will be lots of files piled up in the directory.
+# dump_timestamp = false
+#
+# This is the path of the URL to access the mini web UI.
+# path = /__profile__
+#
+# Clear the data when the wsgi server shutdown.
+# flush_at_shutdown = false
+#
+# unwind the iterator of applications
+# unwind = false
diff --git a/deploy/adapters/ansible/roles/swift/templates/container-server.conf b/deploy/adapters/ansible/roles/swift/templates/container-server.conf
new file mode 100644
index 00000000..88cd2ebb
--- /dev/null
+++ b/deploy/adapters/ansible/roles/swift/templates/container-server.conf
@@ -0,0 +1,229 @@
+[DEFAULT]
+bind_ip = {{ internal_ip }}
+bind_port = 6001
+# bind_timeout = 30
+# backlog = 4096
+user = swift
+swift_dir = /etc/swift
+devices = /srv/node
+mount_check = true
+# disable_fallocate = false
+#
+# Use an integer to override the number of pre-forked processes that will
+# accept connections.
+# workers = auto
+#
+# Maximum concurrent requests per worker
+# max_clients = 1024
+#
+# This is a comma separated list of hosts allowed in the X-Container-Sync-To
+# field for containers. This is the old-style of using container sync. It is
+# strongly recommended to use the new style of a separate
+# container-sync-realms.conf -- see container-sync-realms.conf-sample
+# allowed_sync_hosts = 127.0.0.1
+#
+# You can specify default log routing here if you want:
+# log_name = swift
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+# The following caps the length of log lines to the value given; no limit if
+# set to 0, the default.
+# log_max_line_length = 0
+#
+# comma separated list of functions to call to setup custom log handlers.
+# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
+# adapted_logger
+# log_custom_handlers =
+#
+# If set, log_udp_host will override log_address
+# log_udp_host =
+# log_udp_port = 514
+#
+# You can enable StatsD logging here:
+# log_statsd_host =
+# log_statsd_port = 8125
+# log_statsd_default_sample_rate = 1.0
+# log_statsd_sample_rate_factor = 1.0
+# log_statsd_metric_prefix =
+#
+# If you don't mind the extra disk space usage in overhead, you can turn this
+# on to preallocate disk space with SQLite databases to decrease fragmentation.
+# db_preallocation = off
+#
+# eventlet_debug = false
+#
+# You can set fallocate_reserve to the number of bytes you'd like fallocate to
+# reserve, whether there is space for the given file size or not.
+# fallocate_reserve = 0
+
+[pipeline:main]
+pipeline = healthcheck recon container-server
+
+[app:container-server]
+use = egg:swift#container
+# You can override the default log routing for this app here:
+# set log_name = container-server
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_requests = true
+# set log_address = /dev/log
+#
+# node_timeout = 3
+# conn_timeout = 0.5
+# allow_versions = false
+# auto_create_account_prefix = .
+#
+# Configure parameter for creating specific server
+# To handle all verbs, including replication verbs, do not specify
+# "replication_server" (this is the default). To only handle replication,
+# set to a True value (e.g. "True" or "1"). To handle only non-replication
+# verbs, set to "False". Unless you have a separate replication network, you
+# should not specify any value for "replication_server".
+# replication_server = false
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+# An optional filesystem path, which if present, will cause the healthcheck
+# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
+# disable_path =
+
+[filter:recon]
+use = egg:swift#recon
+recon_cache_path = /var/cache/swift
+
+[container-replicator]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = container-replicator
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# Maximum number of database rows that will be sync'd in a single HTTP
+# replication request. Databases with less than or equal to this number of
+# differing rows will always be sync'd using an HTTP replication request rather
+# than using rsync.
+# per_diff = 1000
+#
+# Maximum number of HTTP replication requests attempted on each replication
+# pass for any one container. This caps how long the replicator will spend
+# trying to sync a given database per pass so the other databases don't get
+# starved.
+# max_diffs = 100
+#
+# Number of replication workers to spawn.
+# concurrency = 8
+#
+# Time in seconds to wait between replication passes
+# interval = 30
+# run_pause is deprecated, use interval instead
+# run_pause = 30
+#
+# node_timeout = 10
+# conn_timeout = 0.5
+#
+# The replicator also performs reclamation
+# reclaim_age = 604800
+#
+# Allow rsync to compress data which is transmitted to destination node
+# during sync. However, this is applicable only when destination node is in
+# a different region than the local one.
+# rsync_compress = no
+#
+# Format of the rysnc module where the replicator will send data. See
+# etc/rsyncd.conf-sample for some usage examples.
+# rsync_module = {replication_ip}::container
+#
+# recon_cache_path = /var/cache/swift
+
+[container-updater]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = container-updater
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# interval = 300
+# concurrency = 4
+# node_timeout = 3
+# conn_timeout = 0.5
+#
+# slowdown will sleep that amount between containers
+# slowdown = 0.01
+#
+# Seconds to suppress updating an account that has generated an error
+# account_suppression_time = 60
+#
+# recon_cache_path = /var/cache/swift
+
+[container-auditor]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = container-auditor
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# Will audit each container at most once per interval
+# interval = 1800
+#
+# containers_per_second = 200
+# recon_cache_path = /var/cache/swift
+
+[container-sync]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = container-sync
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# If you need to use an HTTP Proxy, set it here; defaults to no proxy.
+# You can also set this to a comma separated list of HTTP Proxies and they will
+# be randomly used (simple load balancing).
+# sync_proxy = http://10.1.1.1:8888,http://10.1.1.2:8888
+#
+# Will sync each container at most once per interval
+# interval = 300
+#
+# Maximum amount of time to spend syncing each container per pass
+# container_time = 60
+#
+# Maximum amount of time in seconds for the connection attempt
+# conn_timeout = 5
+# Server errors from requests will be retried by default
+# request_tries = 3
+#
+# Internal client config file path
+# internal_client_conf_path = /etc/swift/internal-client.conf
+
+# Note: Put it at the beginning of the pipeline to profile all middleware. But
+# it is safer to put this after healthcheck.
+[filter:xprofile]
+use = egg:swift#xprofile
+# This option enable you to switch profilers which should inherit from python
+# standard profiler. Currently the supported value can be 'cProfile',
+# 'eventlet.green.profile' etc.
+# profile_module = eventlet.green.profile
+#
+# This prefix will be used to combine process ID and timestamp to name the
+# profile data file. Make sure the executing user has permission to write
+# into this path (missing path segments will be created, if necessary).
+# If you enable profiling in more than one type of daemon, you must override
+# it with an unique value like: /var/log/swift/profile/container.profile
+# log_filename_prefix = /tmp/log/swift/profile/default.profile
+#
+# the profile data will be dumped to local disk based on above naming rule
+# in this interval.
+# dump_interval = 5.0
+#
+# Be careful, this option will enable profiler to dump data into the file with
+# time stamp which means there will be lots of files piled up in the directory.
+# dump_timestamp = false
+#
+# This is the path of the URL to access the mini web UI.
+# path = /__profile__
+#
+# Clear the data when the wsgi server shutdown.
+# flush_at_shutdown = false
+#
+# unwind the iterator of applications
+# unwind = false
diff --git a/deploy/adapters/ansible/roles/swift/templates/object-server.conf b/deploy/adapters/ansible/roles/swift/templates/object-server.conf
new file mode 100644
index 00000000..effd4f22
--- /dev/null
+++ b/deploy/adapters/ansible/roles/swift/templates/object-server.conf
@@ -0,0 +1,347 @@
+[DEFAULT]
+bind_ip = {{ internal_ip }}
+bind_port = 6000
+# bind_timeout = 30
+# backlog = 4096
+user = swift
+swift_dir = /etc/swift
+devices = /srv/node
+mount_check = true
+# disable_fallocate = false
+# expiring_objects_container_divisor = 86400
+# expiring_objects_account_name = expiring_objects
+#
+# Use an integer to override the number of pre-forked processes that will
+# accept connections. NOTE: if servers_per_port is set, this setting is
+# ignored.
+# workers = auto
+#
+# Make object-server run this many worker processes per unique port of
+# "local" ring devices across all storage policies. This can help provide
+# the isolation of threads_per_disk without the severe overhead. The default
+# value of 0 disables this feature.
+# servers_per_port = 0
+#
+# Maximum concurrent requests per worker
+# max_clients = 1024
+#
+# You can specify default log routing here if you want:
+# log_name = swift
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+# The following caps the length of log lines to the value given; no limit if
+# set to 0, the default.
+# log_max_line_length = 0
+#
+# comma separated list of functions to call to setup custom log handlers.
+# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
+# adapted_logger
+# log_custom_handlers =
+#
+# If set, log_udp_host will override log_address
+# log_udp_host =
+# log_udp_port = 514
+#
+# You can enable StatsD logging here:
+# log_statsd_host =
+# log_statsd_port = 8125
+# log_statsd_default_sample_rate = 1.0
+# log_statsd_sample_rate_factor = 1.0
+# log_statsd_metric_prefix =
+#
+# eventlet_debug = false
+#
+# You can set fallocate_reserve to the number of bytes you'd like fallocate to
+# reserve, whether there is space for the given file size or not.
+# fallocate_reserve = 0
+#
+# Time to wait while attempting to connect to another backend node.
+# conn_timeout = 0.5
+# Time to wait while sending each chunk of data to another backend node.
+# node_timeout = 3
+# Time to wait while sending a container update on object update.
+# container_update_timeout = 1.0
+# Time to wait while receiving each chunk of data from a client or another
+# backend node.
+# client_timeout = 60
+#
+# network_chunk_size = 65536
+# disk_chunk_size = 65536
+
+[pipeline:main]
+pipeline = healthcheck recon object-server
+
+[app:object-server]
+use = egg:swift#object
+# You can override the default log routing for this app here:
+# set log_name = object-server
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_requests = true
+# set log_address = /dev/log
+#
+# max_upload_time = 86400
+#
+# slow is the total amount of seconds an object PUT/DELETE request takes at
+# least. If it is faster, the object server will sleep this amount of time minus
+# the already passed transaction time. This is only useful for simulating slow
+# devices on storage nodes during testing and development.
+# slow = 0
+#
+# Objects smaller than this are not evicted from the buffercache once read
+# keep_cache_size = 5242880
+#
+# If true, objects for authenticated GET requests may be kept in buffer cache
+# if small enough
+# keep_cache_private = false
+#
+# on PUTs, sync data every n MB
+# mb_per_sync = 512
+#
+# Comma separated list of headers that can be set in metadata on an object.
+# This list is in addition to X-Object-Meta-* headers and cannot include
+# Content-Type, etag, Content-Length, or deleted
+# allowed_headers = Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object
+#
+# auto_create_account_prefix = .
+#
+# A value of 0 means "don't use thread pools". A reasonable starting point is
+# 4.
+# threads_per_disk = 0
+#
+# Configure parameter for creating specific server
+# To handle all verbs, including replication verbs, do not specify
+# "replication_server" (this is the default). To only handle replication,
+# set to a True value (e.g. "True" or "1"). To handle only non-replication
+# verbs, set to "False". Unless you have a separate replication network, you
+# should not specify any value for "replication_server".
+# replication_server = false
+#
+# Set to restrict the number of concurrent incoming SSYNC requests
+# Set to 0 for unlimited
+# Note that SSYNC requests are only used by the object reconstructor or the
+# object replicator when configured to use ssync.
+# replication_concurrency = 4
+#
+# Restricts incoming SSYNC requests to one per device,
+# replication_currency above allowing. This can help control I/O to each
+# device, but you may wish to set this to False to allow multiple SSYNC
+# requests (up to the above replication_concurrency setting) per device.
+# replication_one_per_device = True
+#
+# Number of seconds to wait for an existing replication device lock before
+# giving up.
+# replication_lock_timeout = 15
+#
+# These next two settings control when the SSYNC subrequest handler will
+# abort an incoming SSYNC attempt. An abort will occur if there are at
+# least threshold number of failures and the value of failures / successes
+# exceeds the ratio. The defaults of 100 and 1.0 means that at least 100
+# failures have to occur and there have to be more failures than successes for
+# an abort to occur.
+# replication_failure_threshold = 100
+# replication_failure_ratio = 1.0
+#
+# Use splice() for zero-copy object GETs. This requires Linux kernel
+# version 3.0 or greater. If you set "splice = yes" but the kernel
+# does not support it, error messages will appear in the object server
+# logs at startup, but your object servers should continue to function.
+#
+# splice = no
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+# An optional filesystem path, which if present, will cause the healthcheck
+# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
+# disable_path =
+
+[filter:recon]
+use = egg:swift#recon
+recon_cache_path = /var/cache/swift
+recon_lock_path = /var/lock
+
+[object-replicator]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = object-replicator
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# daemonize = on
+#
+# Time in seconds to wait between replication passes
+# interval = 30
+# run_pause is deprecated, use interval instead
+# run_pause = 30
+#
+# concurrency = 1
+# stats_interval = 300
+#
+# default is rsync, alternative is ssync
+# sync_method = rsync
+#
+# max duration of a partition rsync
+# rsync_timeout = 900
+#
+# bandwidth limit for rsync in kB/s. 0 means unlimited
+# rsync_bwlimit = 0
+#
+# passed to rsync for io op timeout
+# rsync_io_timeout = 30
+#
+# Allow rsync to compress data which is transmitted to destination node
+# during sync. However, this is applicable only when destination node is in
+# a different region than the local one.
+# NOTE: Objects that are already compressed (for example: .tar.gz, .mp3) might
+# slow down the syncing process.
+# rsync_compress = no
+#
+# Format of the rysnc module where the replicator will send data. See
+# etc/rsyncd.conf-sample for some usage examples.
+# rsync_module = {replication_ip}::object
+#
+# node_timeout = <whatever's in the DEFAULT section or 10>
+# max duration of an http request; this is for REPLICATE finalization calls and
+# so should be longer than node_timeout
+# http_timeout = 60
+#
+# attempts to kill all workers if nothing replicates for lockup_timeout seconds
+# lockup_timeout = 1800
+#
+# The replicator also performs reclamation
+# reclaim_age = 604800
+#
+# ring_check_interval = 15
+# recon_cache_path = /var/cache/swift
+#
+# limits how long rsync error log lines are
+# 0 means to log the entire line
+# rsync_error_log_line_length = 0
+#
+# handoffs_first and handoff_delete are options for a special case
+# such as disk full in the cluster. These two options SHOULD NOT BE
+# CHANGED, except for such an extreme situations. (e.g. disks filled up
+# or are about to fill up. Anyway, DO NOT let your drives fill up)
+# handoffs_first is the flag to replicate handoffs prior to canonical
+# partitions. It allows to force syncing and deleting handoffs quickly.
+# If set to a True value(e.g. "True" or "1"), partitions
+# that are not supposed to be on the node will be replicated first.
+# handoffs_first = False
+#
+# handoff_delete is the number of replicas which are ensured in swift.
+# If the number less than the number of replicas is set, object-replicator
+# could delete local handoffs even if all replicas are not ensured in the
+# cluster. Object-replicator would remove local handoff partition directories
+# after syncing partition when the number of successful responses is greater
+# than or equal to this number. By default(auto), handoff partitions will be
+# removed when it has successfully replicated to all the canonical nodes.
+# handoff_delete = auto
+
+[object-reconstructor]
+# You can override the default log routing for this app here (don't use set!):
+# Unless otherwise noted, each setting below has the same meaning as described
+# in the [object-replicator] section, however these settings apply to the EC
+# reconstructor
+#
+# log_name = object-reconstructor
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# daemonize = on
+#
+# Time in seconds to wait between reconstruction passes
+# interval = 30
+# run_pause is deprecated, use interval instead
+# run_pause = 30
+#
+# concurrency = 1
+# stats_interval = 300
+# node_timeout = 10
+# http_timeout = 60
+# lockup_timeout = 1800
+# reclaim_age = 604800
+# ring_check_interval = 15
+# recon_cache_path = /var/cache/swift
+# handoffs_first = False
+
+[object-updater]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = object-updater
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# interval = 300
+# concurrency = 1
+# node_timeout = <whatever's in the DEFAULT section or 10>
+# slowdown will sleep that amount between objects
+# slowdown = 0.01
+#
+# recon_cache_path = /var/cache/swift
+
+[object-auditor]
+# You can override the default log routing for this app here (don't use set!):
+# log_name = object-auditor
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# Time in seconds to wait between auditor passes
+# interval = 30
+#
+# You can set the disk chunk size that the auditor uses making it larger if
+# you like for more efficient local auditing of larger objects
+# disk_chunk_size = 65536
+# files_per_second = 20
+# concurrency = 1
+# bytes_per_second = 10000000
+# log_time = 3600
+# zero_byte_files_per_second = 50
+# recon_cache_path = /var/cache/swift
+
+# Takes a comma separated list of ints. If set, the object auditor will
+# increment a counter for every object whose size is <= to the given break
+# points and report the result after a full scan.
+# object_size_stats =
+
+# The auditor will cleanup old rsync tempfiles after they are "old
+# enough" to delete. You can configure the time elapsed in seconds
+# before rsync tempfiles will be unlinked, or the default value of
+# "auto" try to use object-replicator's rsync_timeout + 900 and fallback
+# to 86400 (1 day).
+# rsync_tempfile_timeout = auto
+
+# Note: Put it at the beginning of the pipleline to profile all middleware. But
+# it is safer to put this after healthcheck.
+[filter:xprofile]
+use = egg:swift#xprofile
+# This option enable you to switch profilers which should inherit from python
+# standard profiler. Currently the supported value can be 'cProfile',
+# 'eventlet.green.profile' etc.
+# profile_module = eventlet.green.profile
+#
+# This prefix will be used to combine process ID and timestamp to name the
+# profile data file. Make sure the executing user has permission to write
+# into this path (missing path segments will be created, if necessary).
+# If you enable profiling in more than one type of daemon, you must override
+# it with an unique value like: /var/log/swift/profile/object.profile
+# log_filename_prefix = /tmp/log/swift/profile/default.profile
+#
+# the profile data will be dumped to local disk based on above naming rule
+# in this interval.
+# dump_interval = 5.0
+#
+# Be careful, this option will enable profiler to dump data into the file with
+# time stamp which means there will be lots of files piled up in the directory.
+# dump_timestamp = false
+#
+# This is the path of the URL to access the mini web UI.
+# path = /__profile__
+#
+# Clear the data when the wsgi server shutdown.
+# flush_at_shutdown = false
+#
+# unwind the iterator of applications
+# unwind = false
diff --git a/deploy/adapters/ansible/roles/swift/templates/proxy-server.conf b/deploy/adapters/ansible/roles/swift/templates/proxy-server.conf
new file mode 100644
index 00000000..b76796cf
--- /dev/null
+++ b/deploy/adapters/ansible/roles/swift/templates/proxy-server.conf
@@ -0,0 +1,764 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+[DEFAULT]
+bind_ip = {{ internal_ip }}
+bind_port = 8080
+# bind_timeout = 30
+# backlog = 4096
+swift_dir = /etc/swift
+user = swift
+
+# Enables exposing configuration settings via HTTP GET /info.
+# expose_info = true
+
+# Key to use for admin calls that are HMAC signed. Default is empty,
+# which will disable admin calls to /info.
+# admin_key = secret_admin_key
+#
+# Allows the ability to withhold sections from showing up in the public calls
+# to /info. You can withhold subsections by separating the dict level with a
+# ".". The following would cause the sections 'container_quotas' and 'tempurl'
+# to not be listed, and the key max_failed_deletes would be removed from
+# bulk_delete. Default value is 'swift.valid_api_versions' which allows all
+# registered features to be listed via HTTP GET /info except
+# swift.valid_api_versions information
+# disallowed_sections = swift.valid_api_versions, container_quotas, tempurl
+
+# Use an integer to override the number of pre-forked processes that will
+# accept connections. Should default to the number of effective cpu
+# cores in the system. It's worth noting that individual workers will
+# use many eventlet co-routines to service multiple concurrent requests.
+# workers = auto
+#
+# Maximum concurrent requests per worker
+# max_clients = 1024
+#
+# Set the following two lines to enable SSL. This is for testing only.
+# cert_file = /etc/swift/proxy.crt
+# key_file = /etc/swift/proxy.key
+#
+# expiring_objects_container_divisor = 86400
+# expiring_objects_account_name = expiring_objects
+#
+# You can specify default log routing here if you want:
+# log_name = swift
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_headers = false
+# log_address = /dev/log
+# The following caps the length of log lines to the value given; no limit if
+# set to 0, the default.
+# log_max_line_length = 0
+#
+# This optional suffix (default is empty) that would be appended to the swift transaction
+# id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
+# This is very useful when one is managing more than one swift cluster.
+# trans_id_suffix =
+#
+# comma separated list of functions to call to setup custom log handlers.
+# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
+# adapted_logger
+# log_custom_handlers =
+#
+# If set, log_udp_host will override log_address
+# log_udp_host =
+# log_udp_port = 514
+#
+# You can enable StatsD logging here:
+# log_statsd_host =
+# log_statsd_port = 8125
+# log_statsd_default_sample_rate = 1.0
+# log_statsd_sample_rate_factor = 1.0
+# log_statsd_metric_prefix =
+#
+# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
+# cors_allow_origin =
+# strict_cors_mode = True
+#
+# client_timeout = 60
+# eventlet_debug = false
+
+[pipeline:main]
+# This sample pipeline uses tempauth and is used for SAIO dev work and
+# testing. See below for a pipeline using keystone.
+#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
+pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
+
+# The following pipeline shows keystone integration. Comment out the one
+# above and uncomment this one. Additional steps for integrating keystone are
+# covered further below in the filter sections for authtoken and keystoneauth.
+#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
+
+[app:proxy-server]
+use = egg:swift#proxy
+account_autocreate = True
+# You can override the default log routing for this app here:
+# set log_name = proxy-server
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_address = /dev/log
+#
+# log_handoffs = true
+# recheck_account_existence = 60
+# recheck_container_existence = 60
+# object_chunk_size = 65536
+# client_chunk_size = 65536
+#
+# How long the proxy server will wait on responses from the a/c/o servers.
+# node_timeout = 10
+#
+# How long the proxy server will wait for an initial response and to read a
+# chunk of data from the object servers while serving GET / HEAD requests.
+# Timeouts from these requests can be recovered from so setting this to
+# something lower than node_timeout would provide quicker error recovery
+# while allowing for a longer timeout for non-recoverable requests (PUTs).
+# Defaults to node_timeout, should be overriden if node_timeout is set to a
+# high number to prevent client timeouts from firing before the proxy server
+# has a chance to retry.
+# recoverable_node_timeout = node_timeout
+#
+# conn_timeout = 0.5
+#
+# How long to wait for requests to finish after a quorum has been established.
+# post_quorum_timeout = 0.5
+#
+# How long without an error before a node's error count is reset. This will
+# also be how long before a node is reenabled after suppression is triggered.
+# error_suppression_interval = 60
+#
+# How many errors can accumulate before a node is temporarily ignored.
+# error_suppression_limit = 10
+#
+# If set to 'true' any authorized user may create and delete accounts; if
+# 'false' no one, even authorized, can.
+# allow_account_management = false
+#
+# Set object_post_as_copy = false to turn on fast posts where only the metadata
+# changes are stored anew and the original data file is kept in place. This
+# makes for quicker posts.
+# object_post_as_copy = true
+#
+# If set to 'true' authorized accounts that do not yet exist within the Swift
+# cluster will be automatically created.
+# account_autocreate = false
+#
+# If set to a positive value, trying to create a container when the account
+# already has at least this maximum containers will result in a 403 Forbidden.
+# Note: This is a soft limit, meaning a user might exceed the cap for
+# recheck_account_existence before the 403s kick in.
+# max_containers_per_account = 0
+#
+# This is a comma separated list of account hashes that ignore the
+# max_containers_per_account cap.
+# max_containers_whitelist =
+#
+# Comma separated list of Host headers to which the proxy will deny requests.
+# deny_host_headers =
+#
+# Prefix used when automatically creating accounts.
+# auto_create_account_prefix = .
+#
+# Depth of the proxy put queue.
+# put_queue_depth = 10
+#
+# Storage nodes can be chosen at random (shuffle), by using timing
+# measurements (timing), or by using an explicit match (affinity).
+# Using timing measurements may allow for lower overall latency, while
+# using affinity allows for finer control. In both the timing and
+# affinity cases, equally-sorting nodes are still randomly chosen to
+# spread load.
+# The valid values for sorting_method are "affinity", "shuffle", or "timing".
+# sorting_method = shuffle
+#
+# If the "timing" sorting_method is used, the timings will only be valid for
+# the number of seconds configured by timing_expiry.
+# timing_expiry = 300
+#
+# By default on a GET/HEAD swift will connect to a storage node one at a time
+# in a single thread. There is smarts in the order they are hit however. If you
+# turn on concurrent_gets below, then replica count threads will be used.
+# With addition of the concurrency_timeout option this will allow swift to send
+# out GET/HEAD requests to the storage nodes concurrently and answer with the
+# first to respond. With an EC policy the parameter only affects HEAD requests.
+# concurrent_gets = off
+#
+# This parameter controls how long to wait before firing off the next
+# concurrent_get thread. A value of 0 would be fully concurrent, any other
+# number will stagger the firing of the threads. This number should be
+# between 0 and node_timeout. The default is what ever you set for the
+# conn_timeout parameter.
+# concurrency_timeout = 0.5
+#
+# Set to the number of nodes to contact for a normal request. You can use
+# '* replicas' at the end to have it use the number given times the number of
+# replicas for the ring being used for the request.
+# request_node_count = 2 * replicas
+#
+# Which backend servers to prefer on reads. Format is r<N> for region
+# N or r<N>z<M> for region N, zone M. The value after the equals is
+# the priority; lower numbers are higher priority.
+#
+# Example: first read from region 1 zone 1, then region 1 zone 2, then
+# anything in region 2, then everything else:
+# read_affinity = r1z1=100, r1z2=200, r2=300
+# Default is empty, meaning no preference.
+# read_affinity =
+#
+# Which backend servers to prefer on writes. Format is r<N> for region
+# N or r<N>z<M> for region N, zone M. If this is set, then when
+# handling an object PUT request, some number (see setting
+# write_affinity_node_count) of local backend servers will be tried
+# before any nonlocal ones.
+#
+# Example: try to write to regions 1 and 2 before writing to any other
+# nodes:
+# write_affinity = r1, r2
+# Default is empty, meaning no preference.
+# write_affinity =
+#
+# The number of local (as governed by the write_affinity setting)
+# nodes to attempt to contact first, before any non-local ones. You
+# can use '* replicas' at the end to have it use the number given
+# times the number of replicas for the ring being used for the
+# request.
+# write_affinity_node_count = 2 * replicas
+#
+# These are the headers whose values will only be shown to swift_owners. The
+# exact definition of a swift_owner is up to the auth system in use, but
+# usually indicates administrative responsibilities.
+# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control
+
+[filter:tempauth]
+use = egg:swift#tempauth
+# You can override the default log routing for this filter here:
+# set log_name = tempauth
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# The reseller prefix will verify a token begins with this prefix before even
+# attempting to validate it. Also, with authorization, only Swift storage
+# accounts with this prefix will be authorized by this middleware. Useful if
+# multiple auth systems are in use for one Swift cluster.
+# The reseller_prefix may contain a comma separated list of items. The first
+# item is used for the token as mentioned above. If second and subsequent
+# items exist, the middleware will handle authorization for an account with
+# that prefix. For example, for prefixes "AUTH, SERVICE", a path of
+# /v1/SERVICE_account is handled the same as /v1/AUTH_account. If an empty
+# (blank) reseller prefix is required, it must be first in the list. Two
+# single quote characters indicates an empty (blank) reseller prefix.
+# reseller_prefix = AUTH
+
+#
+# The require_group parameter names a group that must be presented by
+# either X-Auth-Token or X-Service-Token. Usually this parameter is
+# used only with multiple reseller prefixes (e.g., SERVICE_require_group=blah).
+# By default, no group is needed. Do not use .admin.
+# require_group =
+
+# The auth prefix will cause requests beginning with this prefix to be routed
+# to the auth subsystem, for granting tokens, etc.
+# auth_prefix = /auth/
+# token_life = 86400
+#
+# This allows middleware higher in the WSGI pipeline to override auth
+# processing, useful for middleware such as tempurl and formpost. If you know
+# you're not going to use such middleware and you want a bit of extra security,
+# you can set this to false.
+# allow_overrides = true
+#
+# This specifies what scheme to return with storage urls:
+# http, https, or default (chooses based on what the server is running as)
+# This can be useful with an SSL load balancer in front of a non-SSL server.
+# storage_url_scheme = default
+#
+# Lastly, you need to list all the accounts/users you want here. The format is:
+# user_<account>_<user> = <key> [group] [group] [...] [storage_url]
+# or if you want underscores in <account> or <user>, you can base64 encode them
+# (with no equal signs) and use this format:
+# user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url]
+# There are special groups of:
+# .reseller_admin = can do anything to any account for this auth
+# .admin = can do anything within the account
+# If neither of these groups are specified, the user can only access containers
+# that have been explicitly allowed for them by a .admin or .reseller_admin.
+# The trailing optional storage_url allows you to specify an alternate url to
+# hand back to the user upon authentication. If not specified, this defaults to
+# $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve
+# to what the requester would need to use to reach this host.
+# Here are example entries, required for running the tests:
+user_admin_admin = admin .admin .reseller_admin
+user_test_tester = testing .admin
+user_test2_tester2 = testing2 .admin
+user_test_tester3 = testing3
+user_test5_tester5 = testing5 service
+
+# To enable Keystone authentication you need to have the auth token
+# middleware first to be configured. Here is an example below, please
+# refer to the keystone's documentation for details about the
+# different settings.
+#
+# You'll also need to have the keystoneauth middleware enabled and have it in
+# your main pipeline, as show in the sample pipeline at the top of this file.
+#
+# Following parameters are known to work with keystonemiddleware v2.3.0
+# (above v2.0.0), but checking the latest information in the wiki page[1]
+# is recommended.
+# 1. http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html#configuration
+#
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+identity_uri = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+#auth_plugin = password
+auth_type = password
+project_domain_id = default
+user_domain_id = default
+project_name = service
+username = swift
+password = {{ CINDER_PASS }}
+delay_auth_decision = True
+#
+# delay_auth_decision defaults to False, but leaving it as false will
+# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from
+# working. This value must be explicitly set to True.
+# delay_auth_decision = False
+#
+# cache = swift.cache
+# include_service_catalog = False
+#
+[filter:keystoneauth]
+use = egg:swift#keystoneauth
+operator_roles = admin,user
+# The reseller_prefix option lists account namespaces that this middleware is
+# responsible for. The prefix is placed before the Keystone project id.
+# For example, for project 12345678, and prefix AUTH, the account is
+# named AUTH_12345678 (i.e., path is /v1/AUTH_12345678/...).
+# Several prefixes are allowed by specifying a comma-separated list
+# as in: "reseller_prefix = AUTH, SERVICE". The empty string indicates a
+# single blank/empty prefix. If an empty prefix is required in a list of
+# prefixes, a value of '' (two single quote characters) indicates a
+# blank/empty prefix. Except for the blank/empty prefix, an underscore ('_')
+# character is appended to the value unless already present.
+# reseller_prefix = AUTH
+#
+# The user must have at least one role named by operator_roles on a
+# project in order to create, delete and modify containers and objects
+# and to set and read privileged headers such as ACLs.
+# If there are several reseller prefix items, you can prefix the
+# parameter so it applies only to those accounts (for example
+# the parameter SERVICE_operator_roles applies to the /v1/SERVICE_<project>
+# path). If you omit the prefix, the option applies to all reseller
+# prefix items. For the blank/empty prefix, prefix with '' (do not put
+# underscore after the two single quote characters).
+# operator_roles = admin, swiftoperator
+#
+# The reseller admin role has the ability to create and delete accounts
+# reseller_admin_role = ResellerAdmin
+#
+# This allows middleware higher in the WSGI pipeline to override auth
+# processing, useful for middleware such as tempurl and formpost. If you know
+# you're not going to use such middleware and you want a bit of extra security,
+# you can set this to false.
+# allow_overrides = true
+#
+# If the service_roles parameter is present, an X-Service-Token must be
+# present in the request that when validated, grants at least one role listed
+# in the parameter. The X-Service-Token may be scoped to any project.
+# If there are several reseller prefix items, you can prefix the
+# parameter so it applies only to those accounts (for example
+# the parameter SERVICE_service_roles applies to the /v1/SERVICE_<project>
+# path). If you omit the prefix, the option applies to all reseller
+# prefix items. For the blank/empty prefix, prefix with '' (do not put
+# underscore after the two single quote characters).
+# By default, no service_roles are required.
+# service_roles =
+#
+# For backwards compatibility, keystoneauth will match names in cross-tenant
+# access control lists (ACLs) when both the requesting user and the tenant
+# are in the default domain i.e the domain to which existing tenants are
+# migrated. The default_domain_id value configured here should be the same as
+# the value used during migration of tenants to keystone domains.
+# default_domain_id = default
+#
+# For a new installation, or an installation in which keystone projects may
+# move between domains, you should disable backwards compatible name matching
+# in ACLs by setting allow_names_in_acls to false:
+# allow_names_in_acls = true
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+# An optional filesystem path, which if present, will cause the healthcheck
+# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE".
+# This facility may be used to temporarily remove a Swift node from a load
+# balancer pool during maintenance or upgrade (remove the file to allow the
+# node back into the load balancer pool).
+# disable_path =
+
+[filter:cache]
+use = egg:swift#memcache
+memcache_servers = {{ memcached_servers }}
+# You can override the default log routing for this filter here:
+# set log_name = cache
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# If not set here, the value for memcache_servers will be read from
+# memcache.conf (see memcache.conf-sample) or lacking that file, it will
+# default to the value below. You can specify multiple servers separated with
+# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 (IPv6 addresses must
+# follow rfc3986 section-3.2.2, i.e. [::1]:11211)
+# memcache_servers = 127.0.0.1:11211
+#
+# Sets how memcache values are serialized and deserialized:
+# 0 = older, insecure pickle serialization
+# 1 = json serialization but pickles can still be read (still insecure)
+# 2 = json serialization only (secure and the default)
+# If not set here, the value for memcache_serialization_support will be read
+# from /etc/swift/memcache.conf (see memcache.conf-sample).
+# To avoid an instant full cache flush, existing installations should
+# upgrade with 0, then set to 1 and reload, then after some time (24 hours)
+# set to 2 and reload.
+# In the future, the ability to use pickle serialization will be removed.
+# memcache_serialization_support = 2
+#
+# Sets the maximum number of connections to each memcached server per worker
+# memcache_max_connections = 2
+#
+# More options documented in memcache.conf-sample
+
+[filter:ratelimit]
+use = egg:swift#ratelimit
+# You can override the default log routing for this filter here:
+# set log_name = ratelimit
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# clock_accuracy should represent how accurate the proxy servers' system clocks
+# are with each other. 1000 means that all the proxies' clock are accurate to
+# each other within 1 millisecond. No ratelimit should be higher than the
+# clock accuracy.
+# clock_accuracy = 1000
+#
+# max_sleep_time_seconds = 60
+#
+# log_sleep_time_seconds of 0 means disabled
+# log_sleep_time_seconds = 0
+#
+# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
+# rate_buffer_seconds = 5
+#
+# account_ratelimit of 0 means disabled
+# account_ratelimit = 0
+
+# DEPRECATED- these will continue to work but will be replaced
+# by the X-Account-Sysmeta-Global-Write-Ratelimit flag.
+# Please see ratelimiting docs for details.
+# these are comma separated lists of account names
+# account_whitelist = a,b
+# account_blacklist = c,d
+
+# with container_limit_x = r
+# for containers of size x limit write requests per second to r. The container
+# rate will be linearly interpolated from the values given. With the values
+# below, a container of size 5 will get a rate of 75.
+# container_ratelimit_0 = 100
+# container_ratelimit_10 = 50
+# container_ratelimit_50 = 20
+
+# Similarly to the above container-level write limits, the following will limit
+# container GET (listing) requests.
+# container_listing_ratelimit_0 = 100
+# container_listing_ratelimit_10 = 50
+# container_listing_ratelimit_50 = 20
+
+[filter:domain_remap]
+use = egg:swift#domain_remap
+# You can override the default log routing for this filter here:
+# set log_name = domain_remap
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# storage_domain = example.com
+# path_root = v1
+
+# Browsers can convert a host header to lowercase, so check that reseller
+# prefix on the account is the correct case. This is done by comparing the
+# items in the reseller_prefixes config option to the found prefix. If they
+# match except for case, the item from reseller_prefixes will be used
+# instead of the found reseller prefix. When none match, the default reseller
+# prefix is used. When no default reseller prefix is configured, any request
+# with an account prefix not in that list will be ignored by this middleware.
+# reseller_prefixes = AUTH
+# default_reseller_prefix =
+
+[filter:catch_errors]
+use = egg:swift#catch_errors
+# You can override the default log routing for this filter here:
+# set log_name = catch_errors
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+
+[filter:cname_lookup]
+# Note: this middleware requires python-dnspython
+use = egg:swift#cname_lookup
+# You can override the default log routing for this filter here:
+# set log_name = cname_lookup
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+#
+# Specify the storage_domain that match your cloud, multiple domains
+# can be specified separated by a comma
+# storage_domain = example.com
+#
+# lookup_depth = 1
+
+# Note: Put staticweb just after your auth filter(s) in the pipeline
+[filter:staticweb]
+use = egg:swift#staticweb
+# You can override the default log routing for this filter here:
+# set log_name = staticweb
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+
+# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline
+[filter:tempurl]
+use = egg:swift#tempurl
+# The methods allowed with Temp URLs.
+# methods = GET HEAD PUT POST DELETE
+#
+# The headers to remove from incoming requests. Simply a whitespace delimited
+# list of header names and names can optionally end with '*' to indicate a
+# prefix match. incoming_allow_headers is a list of exceptions to these
+# removals.
+# incoming_remove_headers = x-timestamp
+#
+# The headers allowed as exceptions to incoming_remove_headers. Simply a
+# whitespace delimited list of header names and names can optionally end with
+# '*' to indicate a prefix match.
+# incoming_allow_headers =
+#
+# The headers to remove from outgoing responses. Simply a whitespace delimited
+# list of header names and names can optionally end with '*' to indicate a
+# prefix match. outgoing_allow_headers is a list of exceptions to these
+# removals.
+# outgoing_remove_headers = x-object-meta-*
+#
+# The headers allowed as exceptions to outgoing_remove_headers. Simply a
+# whitespace delimited list of header names and names can optionally end with
+# '*' to indicate a prefix match.
+# outgoing_allow_headers = x-object-meta-public-*
+
+# Note: Put formpost just before your auth filter(s) in the pipeline
+[filter:formpost]
+use = egg:swift#formpost
+
+# Note: Just needs to be placed before the proxy-server in the pipeline.
+[filter:name_check]
+use = egg:swift#name_check
+# forbidden_chars = '"`<>
+# maximum_length = 255
+# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$
+
+[filter:list-endpoints]
+use = egg:swift#list_endpoints
+# list_endpoints_path = /endpoints/
+
+[filter:proxy-logging]
+use = egg:swift#proxy_logging
+# If not set, logging directives from [DEFAULT] without "access_" will be used
+# access_log_name = swift
+# access_log_facility = LOG_LOCAL0
+# access_log_level = INFO
+# access_log_address = /dev/log
+#
+# If set, access_log_udp_host will override access_log_address
+# access_log_udp_host =
+# access_log_udp_port = 514
+#
+# You can use log_statsd_* from [DEFAULT] or override them here:
+# access_log_statsd_host =
+# access_log_statsd_port = 8125
+# access_log_statsd_default_sample_rate = 1.0
+# access_log_statsd_sample_rate_factor = 1.0
+# access_log_statsd_metric_prefix =
+# access_log_headers = false
+#
+# If access_log_headers is True and access_log_headers_only is set only
+# these headers are logged. Multiple headers can be defined as comma separated
+# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime
+# access_log_headers_only =
+#
+# By default, the X-Auth-Token is logged. To obscure the value,
+# set reveal_sensitive_prefix to the number of characters to log.
+# For example, if set to 12, only the first 12 characters of the
+# token appear in the log. An unauthorized access of the log file
+# won't allow unauthorized usage of the token. However, the first
+# 12 or so characters is unique enough that you can trace/debug
+# token usage. Set to 0 to suppress the token completely (replaced
+# by '...' in the log).
+# Note: reveal_sensitive_prefix will not affect the value
+# logged with access_log_headers=True.
+# reveal_sensitive_prefix = 16
+#
+# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
+# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
+# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
+#
+# Note: The double proxy-logging in the pipeline is not a mistake. The
+# left-most proxy-logging is there to log requests that were handled in
+# middleware and never made it through to the right-most middleware (and
+# proxy server). Double logging is prevented for normal requests. See
+# proxy-logging docs.
+
+# Note: Put before both ratelimit and auth in the pipeline.
+[filter:bulk]
+use = egg:swift#bulk
+# max_containers_per_extraction = 10000
+# max_failed_extractions = 1000
+# max_deletes_per_request = 10000
+# max_failed_deletes = 1000
+
+# In order to keep a connection active during a potentially long bulk request,
+# Swift may return whitespace prepended to the actual response body. This
+# whitespace will be yielded no more than every yield_frequency seconds.
+# yield_frequency = 10
+
+# Note: The following parameter is used during a bulk delete of objects and
+# their container. This would frequently fail because it is very likely
+# that all replicated objects have not been deleted by the time the middleware got a
+# successful response. It can be configured the number of retries. And the
+# number of seconds to wait between each retry will be 1.5**retry
+
+# delete_container_retry_count = 0
+
+# Note: Put after auth and staticweb in the pipeline.
+[filter:slo]
+use = egg:swift#slo
+# max_manifest_segments = 1000
+# max_manifest_size = 2097152
+#
+# Rate limiting applies only to segments smaller than this size (bytes).
+# rate_limit_under_size = 1048576
+#
+# Start rate-limiting SLO segment serving after the Nth small segment of a
+# segmented object.
+# rate_limit_after_segment = 10
+#
+# Once segment rate-limiting kicks in for an object, limit segments served
+# to N per second. 0 means no rate-limiting.
+# rate_limit_segments_per_sec = 1
+#
+# Time limit on GET requests (seconds)
+# max_get_time = 86400
+
+# Note: Put after auth and staticweb in the pipeline.
+# If you don't put it in the pipeline, it will be inserted for you.
+[filter:dlo]
+use = egg:swift#dlo
+# Start rate-limiting DLO segment serving after the Nth segment of a
+# segmented object.
+# rate_limit_after_segment = 10
+#
+# Once segment rate-limiting kicks in for an object, limit segments served
+# to N per second. 0 means no rate-limiting.
+# rate_limit_segments_per_sec = 1
+#
+# Time limit on GET requests (seconds)
+# max_get_time = 86400
+
+# Note: Put after auth in the pipeline.
+[filter:container-quotas]
+use = egg:swift#container_quotas
+
+# Note: Put after auth in the pipeline.
+[filter:account-quotas]
+use = egg:swift#account_quotas
+
+[filter:gatekeeper]
+use = egg:swift#gatekeeper
+# Set this to false if you want to allow clients to set arbitrary X-Timestamps
+# on uploaded objects. This may be used to preserve timestamps when migrating
+# from a previous storage system, but risks allowing users to upload
+# difficult-to-delete data.
+# shunt_inbound_x_timestamp = true
+#
+# You can override the default log routing for this filter here:
+# set log_name = gatekeeper
+# set log_facility = LOG_LOCAL0
+# set log_level = INFO
+# set log_headers = false
+# set log_address = /dev/log
+
+[filter:container_sync]
+use = egg:swift#container_sync
+# Set this to false if you want to disallow any full url values to be set for
+# any new X-Container-Sync-To headers. This will keep any new full urls from
+# coming in, but won't change any existing values already in the cluster.
+# Updating those will have to be done manually, as knowing what the true realm
+# endpoint should be cannot always be guessed.
+# allow_full_urls = true
+# Set this to specify this clusters //realm/cluster as "current" in /info
+# current = //REALM/CLUSTER
+
+# Note: Put it at the beginning of the pipeline to profile all middleware. But
+# it is safer to put this after catch_errors, gatekeeper and healthcheck.
+[filter:xprofile]
+use = egg:swift#xprofile
+# This option enable you to switch profilers which should inherit from python
+# standard profiler. Currently the supported value can be 'cProfile',
+# 'eventlet.green.profile' etc.
+# profile_module = eventlet.green.profile
+#
+# This prefix will be used to combine process ID and timestamp to name the
+# profile data file. Make sure the executing user has permission to write
+# into this path (missing path segments will be created, if necessary).
+# If you enable profiling in more than one type of daemon, you must override
+# it with an unique value like: /var/log/swift/profile/proxy.profile
+# log_filename_prefix = /tmp/log/swift/profile/default.profile
+#
+# the profile data will be dumped to local disk based on above naming rule
+# in this interval.
+# dump_interval = 5.0
+#
+# Be careful, this option will enable profiler to dump data into the file with
+# time stamp which means there will be lots of files piled up in the directory.
+# dump_timestamp = false
+#
+# This is the path of the URL to access the mini web UI.
+# path = /__profile__
+#
+# Clear the data when the wsgi server shutdown.
+# flush_at_shutdown = false
+#
+# unwind the iterator of applications
+# unwind = false
+
+# Note: Put after slo, dlo in the pipeline.
+# If you don't put it in the pipeline, it will be inserted automatically.
+[filter:versioned_writes]
+use = egg:swift#versioned_writes
+# Enables using versioned writes middleware and exposing configuration
+# settings via HTTP GET /info.
+# WARNING: Setting this option bypasses the "allow_versions" option
+# in the container configuration file, which will be eventually
+# deprecated. See documentation for more details.
+# allow_versioned_writes = false
diff --git a/deploy/adapters/ansible/roles/swift/templates/rsyncd.conf b/deploy/adapters/ansible/roles/swift/templates/rsyncd.conf
new file mode 100644
index 00000000..703c55eb
--- /dev/null
+++ b/deploy/adapters/ansible/roles/swift/templates/rsyncd.conf
@@ -0,0 +1,23 @@
+uid = swift
+gid = swift
+log file = /var/log/rsyncd.log
+pid file = /var/run/rsyncd.pid
+address = {{ internal_ip }}
+
+[account]
+max connections = 2
+path = /srv/node/
+read only = False
+lock file = /var/lock/account.lock
+
+[container]
+max connections = 2
+path = /srv/node/
+read only = False
+lock file = /var/lock/container.lock
+
+[object]
+max connections = 2
+path = /srv/node/
+read only = False
+lock file = /var/lock/object.lock
diff --git a/deploy/adapters/ansible/roles/swift/templates/swift.conf b/deploy/adapters/ansible/roles/swift/templates/swift.conf
new file mode 100644
index 00000000..9a31501b
--- /dev/null
+++ b/deploy/adapters/ansible/roles/swift/templates/swift.conf
@@ -0,0 +1,183 @@
+[swift-hash]
+
+# swift_hash_path_suffix and swift_hash_path_prefix are used as part of the
+# the hashing algorithm when determining data placement in the cluster.
+# These values should remain secret and MUST NOT change
+# once a cluster has been deployed.
+# Use only printable chars (python -c "import string; print(string.printable)")
+
+swift_hash_path_suffix = 7c6a7cd34d07aed5
+swift_hash_path_prefix = 0c4629166f4de441
+
+# storage policies are defined here and determine various characteristics
+# about how objects are stored and treated. Policies are specified by name on
+# a per container basis. Names are case-insensitive. The policy index is
+# specified in the section header and is used internally. The policy with
+# index 0 is always used for legacy containers and can be given a name for use
+# in metadata however the ring file name will always be 'object.ring.gz' for
+# backwards compatibility. If no policies are defined a policy with index 0
+# will be automatically created for backwards compatibility and given the name
+# Policy-0. A default policy is used when creating new containers when no
+# policy is specified in the request. If no other policies are defined the
+# policy with index 0 will be declared the default. If multiple policies are
+# defined you must define a policy with index 0 and you must specify a
+# default. It is recommended you always define a section for
+# storage-policy:0. Aliases are not required when defining a storage policy.
+#
+# A 'policy_type' argument is also supported but is not mandatory. Default
+# policy type 'replication' is used when 'policy_type' is unspecified.
+[storage-policy:0]
+name = Policy-0
+default = yes
+#policy_type = replication
+aliases = yellow, orange
+
+# the following section would declare a policy called 'silver', the number of
+# replicas will be determined by how the ring is built. In this example the
+# 'silver' policy could have a lower or higher # of replicas than the
+# 'Policy-0' policy above. The ring filename will be 'object-1.ring.gz'. You
+# may only specify one storage policy section as the default. If you changed
+# this section to specify 'silver' as the default, when a client created a new
+# container w/o a policy specified, it will get the 'silver' policy because
+# this config has specified it as the default. However if a legacy container
+# (one created with a pre-policy version of swift) is accessed, it is known
+# implicitly to be assigned to the policy with index 0 as opposed to the
+# current default. Note that even without specifying any aliases, a policy
+# always has at least the default name stored in aliases because this field is
+# used to contain all human readable names for a storage policy.
+#
+#[storage-policy:1]
+#name = silver
+#policy_type = replication
+
+# The following declares a storage policy of type 'erasure_coding' which uses
+# Erasure Coding for data reliability. Please refer to Swift documentation for
+# details on how the 'erasure_coding' storage policy is implemented.
+#
+# Swift uses PyECLib, a Python Erasure coding API library, for encode/decode
+# operations. Please refer to Swift documentation for details on how to
+# install PyECLib.
+#
+# When defining an EC policy, 'policy_type' needs to be 'erasure_coding' and
+# EC configuration parameters 'ec_type', 'ec_num_data_fragments' and
+# 'ec_num_parity_fragments' must be specified. 'ec_type' is chosen from the
+# list of EC backends supported by PyECLib. The ring configured for the
+# storage policy must have it's "replica" count configured to
+# 'ec_num_data_fragments' + 'ec_num_parity_fragments' - this requirement is
+# validated when services start. 'ec_object_segment_size' is the amount of
+# data that will be buffered up before feeding a segment into the
+# encoder/decoder. More information about these configuration options and
+# supported `ec_type` schemes is available in the Swift documentation. Please
+# refer to Swift documentation for details on how to configure EC policies.
+#
+# The example 'deepfreeze10-4' policy defined below is a _sample_
+# configuration with an alias of 'df10-4' as well as 10 'data' and 4 'parity'
+# fragments. 'ec_type' defines the Erasure Coding scheme.
+# 'liberasurecode_rs_vand' (Reed-Solomon Vandermonde) is used as an example
+# below.
+#
+#[storage-policy:2]
+#name = deepfreeze10-4
+#aliases = df10-4
+#policy_type = erasure_coding
+#ec_type = liberasurecode_rs_vand
+#ec_num_data_fragments = 10
+#ec_num_parity_fragments = 4
+#ec_object_segment_size = 1048576
+
+
+# The swift-constraints section sets the basic constraints on data
+# saved in the swift cluster. These constraints are automatically
+# published by the proxy server in responses to /info requests.
+
+[swift-constraints]
+
+# max_file_size is the largest "normal" object that can be saved in
+# the cluster. This is also the limit on the size of each segment of
+# a "large" object when using the large object manifest support.
+# This value is set in bytes. Setting it to lower than 1MiB will cause
+# some tests to fail. It is STRONGLY recommended to leave this value at
+# the default (5 * 2**30 + 2).
+
+#max_file_size = 5368709122
+
+
+# max_meta_name_length is the max number of bytes in the utf8 encoding
+# of the name portion of a metadata header.
+
+#max_meta_name_length = 128
+
+
+# max_meta_value_length is the max number of bytes in the utf8 encoding
+# of a metadata value
+
+#max_meta_value_length = 256
+
+
+# max_meta_count is the max number of metadata keys that can be stored
+# on a single account, container, or object
+
+#max_meta_count = 90
+
+
+# max_meta_overall_size is the max number of bytes in the utf8 encoding
+# of the metadata (keys + values)
+
+#max_meta_overall_size = 4096
+
+# max_header_size is the max number of bytes in the utf8 encoding of each
+# header. Using 8192 as default because eventlet use 8192 as max size of
+# header line. This value may need to be increased when using identity
+# v3 API tokens including more than 7 catalog entries.
+# See also include_service_catalog in proxy-server.conf-sample
+# (documented in overview_auth.rst)
+
+#max_header_size = 8192
+
+
+# By default the maximum number of allowed headers depends on the number of max
+# allowed metadata settings plus a default value of 32 for regular http
+# headers. If for some reason this is not enough (custom middleware for
+# example) it can be increased with the extra_header_count constraint.
+
+#extra_header_count = 0
+
+
+# max_object_name_length is the max number of bytes in the utf8 encoding
+# of an object name
+
+#max_object_name_length = 1024
+
+
+# container_listing_limit is the default (and max) number of items
+# returned for a container listing request
+
+#container_listing_limit = 10000
+
+
+# account_listing_limit is the default (and max) number of items returned
+# for an account listing request
+#account_listing_limit = 10000
+
+
+# max_account_name_length is the max number of bytes in the utf8 encoding
+# of an account name
+
+#max_account_name_length = 256
+
+
+# max_container_name_length is the max number of bytes in the utf8 encoding
+# of a container name
+
+#max_container_name_length = 256
+
+
+# By default all REST API calls should use "v1" or "v1.0" as the version string,
+# for example "/v1/account". This can be manually overridden to make this
+# backward-compatible, in case a different version string has been used before.
+# Use a comma-separated list in case of multiple allowed versions, for example
+# valid_api_versions = v0,v1,v2
+# This is only enforced for account, container and object requests. The allowed
+# api versions are by default excluded from /info.
+
+# valid_api_versions = v1,v1.0
diff --git a/deploy/adapters/ansible/roles/swift/vars/Debian.yml b/deploy/adapters/ansible/roles/swift/vars/Debian.yml
new file mode 100644
index 00000000..39aea32d
--- /dev/null
+++ b/deploy/adapters/ansible/roles/swift/vars/Debian.yml
@@ -0,0 +1,27 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+controller_packages:
+ - swift
+ - swift-proxy
+ - python-swiftclient
+ - python-keystoneclient
+ - memcached
+
+compute_packages:
+ - xfsprogs
+ - rsync
+ - swift
+ - swift-account
+ - swift-container
+ - swift-object
+
+
+services: []
diff --git a/deploy/adapters/ansible/roles/swift/vars/main.yml b/deploy/adapters/ansible/roles/swift/vars/main.yml
new file mode 100644
index 00000000..540068da
--- /dev/null
+++ b/deploy/adapters/ansible/roles/swift/vars/main.yml
@@ -0,0 +1,15 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages_noarch: []
+
+services_noarch: []
+
+controller_packages_noarch: []
+compute_packages_noarch: []
diff --git a/deploy/adapters/ansible/roles/tacker/templates/tacker.j2 b/deploy/adapters/ansible/roles/tacker/templates/tacker.j2
index d7311f62..ae0f644a 100644
--- a/deploy/adapters/ansible/roles/tacker/templates/tacker.j2
+++ b/deploy/adapters/ansible/roles/tacker/templates/tacker.j2
@@ -1,29 +1,426 @@
[DEFAULT]
-bind_host = {{ internal_ip }}
-bind_port = 8888
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = True
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = True
+
+# Where to store Tacker state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/tacker
+
+# Where to store lock files
+lock_path = $state_path/lock
+
auth_strategy = keystone
policy_file = /usr/local/etc/tacker/policy.json
-debug = True
-verbose = True
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
use_syslog = False
-state_path = /var/lib/tacker
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+# log_dir =
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ internal_ip }}
+
+# Port the bind the API server to
+bind_port = 8888
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of tacker.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Tacker core plugin entrypoint to be loaded from the
+# tacker.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the tacker source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+# core_plugin =
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# tacker.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the tacker source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+
+service_plugins = vnfm,nfvo
+
+# Paste configuration file
+# api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+# auth_strategy = keystone
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Tacker is
+# being used in conjunction with nova security groups
+# allow_overlapping_ips = False
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = tacker.openstack.common.rpc.impl_kombu
+# Size of RPC thread pool
+# rpc_thread_pool_size = 64
+# Size of RPC connection pool
+# rpc_conn_pool_size = 30
+# Seconds to wait for a response from call or multicall
+# rpc_response_timeout = 60
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+# rpc_cast_timeout = 30
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = tacker.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = tacker
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# IP address of the RabbitMQ installation
+# rabbit_host = localhost
+# Password of the RabbitMQ server
+# rabbit_password = guest
+# Port where RabbitMQ server is running/listening
+# rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+# rabbit_userid = guest
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+
+# QPID
+# rpc_backend=tacker.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=tacker.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = tacker.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = tacker.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = tacker.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+# default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+# notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+# agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# tacker server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to tacker server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+# api_workers = 0
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+# rpc_workers = 0
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== tacker nova interactions ==========
+# Send notification to nova when port status is active.
+# notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+# notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+# nova_url = http://127.0.0.1:8774/v3
+
+# Name of nova region to use. Useful if keystone manages more than one region
+# nova_region_name =
+
+# Username for connection to nova in admin context
+# nova_admin_username =
+
+# The uuid of the admin nova tenant
+# nova_admin_tenant_id =
+
+# Password for connection to nova in admin context.
+# nova_admin_password =
+
+# Authorization URL for connection to nova in admin context.
+# nova_admin_auth_url =
+
+# CA file for novaclient to verify server certificates
+# nova_ca_certificates_file =
+
+# Boolean to control ignoring SSL errors on the nova url
+# nova_api_insecure = False
+
+# Number of seconds between sending events to nova if there are any events to send
+# send_events_interval = 2
+
+# ======== end of tacker nova interactions ==========
+
+[agent]
+# Use "sudo tacker-rootwrap /etc/tacker/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = sudo /usr/local/bin/tacker-rootwrap /usr/local/etc/tacker/rootwrap.conf
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+# report_interval = 30
+
+# =========== end of items for agent management extension =====
[keystone_authtoken]
+signing_dir = /var/cache/tacker
+#cafile = /opt/stack/data/ca-bundle.pem
+#project_domain_id = default
+project_name = service
+#user_domain_id = default
password = console
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+username = tacker
auth_url = http://{{ internal_vip.ip }}:35357
-project_name = service
+auth_plugin = password
+identity_uri = http://{{ internal_vip.ip }}:5000
+auth_uri = http://{{ internal_vip.ip }}:5000
-[agent]
-root_helper = sudo /usr/local/bin/tacker-rootwrap /usr/local/etc/tacker/rootwrap.conf
[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/tacker
connection = mysql://tacker:TACKER_DBPASS@{{ internal_vip.ip }}:3306/tacker?charset=utf8
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main tacker server. (Leave it as is if the database runs on this host.)
+# connection = sqlite://
+# NOTE: In deployment the [database] section and its connection attribute may
+# be set in the corresponding core plugin '.ini' file. However, it is suggested
+# to put the [database] section and its connection attribute in this
+# configuration file.
+
+# Database engine for which script will be generated when using offline
+# migration
+# engine =
+
+# The SQLAlchemy connection string used to connect to the slave database
+# slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+# max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+# retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+# min_pool_size = 1
-[servicevm_nova]
+# Maximum number of SQL connections to keep open in a pool
+# max_pool_size = 10
+
+# Timeout in seconds before idle sql connections are reaped
+# idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+# max_overflow = 20
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+# connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+# connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# pool_timeout = 10
+
+[tacker]
+# Specify drivers for hosting device
+# infra_driver = heat,nova,noop
+
+# Specify drivers for mgmt
+# mgmt_driver = noop,openwrt
+
+# Specify drivers for monitoring
+# monitor_driver = ping, http_ping
+
+[nfvo_vim]
+# Supported VIM drivers, resource orchestration controllers such as OpenStack, kvm
+#Default VIM driver is OpenStack
+#vim_drivers = openstack
+#Default VIM placement if vim id is not provided
+default_vim = VIM0
+
+[vim_keys]
+#openstack = /etc/tacker/vim/fernet_keys
+[tacker_nova]
+# parameters for novaclient to talk to nova
+region_name = RegionOne
+#project_domain_id = default
+project_name = service
+#user_domain_id = default
password = console
+username = nova
auth_url = http://{{ internal_vip.ip }}:35357
+auth_plugin = password
-[servicevm_heat]
+[tacker_heat]
heat_uri = http://{{ internal_vip.ip }}:8004/v1
-
+stack_retries = 60
+stack_retry_wait = 5