aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbuild.sh8
-rw-r--r--build/build.conf3
-rwxr-xr-xci/deploy_ci.sh4
-rw-r--r--deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml10
-rw-r--r--deploy/adapters/ansible/openstack/templates/nova.conf5
-rw-r--r--deploy/adapters/ansible/roles/common/vars/Debian.yml4
-rw-r--r--deploy/adapters/ansible/roles/memcached/tasks/main.yml15
-rw-r--r--deploy/adapters/ansible/roles/moon/files/controllers.py1062
-rw-r--r--deploy/adapters/ansible/roles/moon/files/deb.conf11
-rw-r--r--deploy/adapters/ansible/roles/moon/files/get_deb_depends.py36
-rw-r--r--deploy/adapters/ansible/roles/moon/files/sources.list7
-rw-r--r--deploy/adapters/ansible/roles/moon/tasks/main.yml4
-rw-r--r--deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml16
-rw-r--r--deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml235
-rwxr-xr-x[-rw-r--r--]deploy/adapters/ansible/roles/moon/tasks/moon.yml207
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/admin-openrc.sh15
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/api-paste.ini106
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/demo-openrc.sh13
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/keystone-paste.ini96
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/keystone.conf59
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/proxy-server.conf775
-rw-r--r--deploy/adapters/ansible/roles/moon/templates/wsgi-keystone.conf.j246
-rw-r--r--deploy/adapters/ansible/roles/moon/vars/Debian.yml168
-rw-r--r--deploy/adapters/ansible/roles/moon/vars/main.yml165
-rw-r--r--deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml4
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml4
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_04_install_pip_packages.yml4
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/01_08_configure_neutron.yml7
-rw-r--r--deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml28
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Debian2
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/vars/main.yml16
-rw-r--r--deploy/adapters/ansible/roles/onos_cluster/files/sfc_plugins.conf4
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/tasks/onos_sfc_controller.yml64
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/vars/main.yml3
-rwxr-xr-xdeploy/adapters/ansible/roles/tacker/vars/RedHat.yml2
-rw-r--r--deploy/client.py45
-rwxr-xr-xdeploy/compass_conf/flavor/openstack_newton.conf2
-rwxr-xr-xdeploy/compass_conf/package_metadata/openstack.conf55
-rwxr-xr-xdeploy/compass_conf/role/openstack_newton_ansible.conf10
-rwxr-xr-xdeploy/compass_conf/templates/ansible_installer/openstack_newton/inventories/HA-ansible-multinodes.tmpl7
-rwxr-xr-xdeploy/compass_conf/templates/ansible_installer/openstack_newton/vars/HA-ansible-multinodes.tmpl9
-rw-r--r--deploy/conf/base.conf2
-rw-r--r--[-rwxr-xr-x]deploy/conf/vm_environment/os-odl_l2-sfc-ha.yml (renamed from deploy/adapters/ansible/roles/moon/handlers/main.yml)41
-rwxr-xr-xdeploy/deploy_host.sh4
-rw-r--r--deploy/template/vm/compass.xml6
-rw-r--r--docs/release/installation/offline-deploy.rst3
-rw-r--r--docs/release/release-notes/release-notes.rst17
-rw-r--r--repo/openstack/make_ppa/ubuntu/xenial/newton/download_pkg.tmpl4
-rw-r--r--repo/openstack/special_pkg/Debian/make_openvswitch-switch.sh2
-rw-r--r--repo/pip/code_url.conf2
-rw-r--r--repo/pip/extra-requirement-tar.txt1
-rw-r--r--repo/repo.conf4
52 files changed, 535 insertions, 2887 deletions
diff --git a/build.sh b/build.sh
index 3dfd54fd..b0b25bad 100755
--- a/build.sh
+++ b/build.sh
@@ -21,7 +21,7 @@ WORK_PATH=$COMPASS_PATH
PACKAGES="fuse fuseiso createrepo genisoimage curl"
# PACKAGE_URL will be reset in Jenkins for different branch
-export PACKAGE_URL=${PACKAGE_URL:-http://205.177.226.237:9999}
+export PACKAGE_URL=${PACKAGE_URL:-http://artifacts.opnfv.org/compass4nfv/package/danube}
mkdir -p $WORK_DIR
@@ -74,6 +74,12 @@ function download_url()
fi
curl --connect-timeout 10 -o $CACHE_DIR/$1 $2
+ local_md5=`md5sum $CACHE_DIR/$1 | cut -d ' ' -f 1`
+ repo_md5=`cat $CACHE_DIR/$1.md5 | cut -d ' ' -f 1`
+ if [[ $local_md5 != $repo_md5 ]]; then
+ echo "ERROR, the md5sum don't match"
+ exit 1
+ fi
}
function download_local()
diff --git a/build/build.conf b/build/build.conf
index cb56e32a..101f01ba 100644
--- a/build/build.conf
+++ b/build/build.conf
@@ -1,8 +1,5 @@
TIMEOUT=10
-# PACKAGE_URL will be reset in Jenkins for different branch
-#export PACKAGE_URL=${PACKAGE_URL:-http://205.177.226.237:9999}
-
# Jumphost OS version
export CENTOS_BASE=${CENTOS_BASE:-$PACKAGE_URL/CentOS-7-x86_64-Minimal-1511.iso}
diff --git a/ci/deploy_ci.sh b/ci/deploy_ci.sh
index 1f206210..7fd9d86f 100755
--- a/ci/deploy_ci.sh
+++ b/ci/deploy_ci.sh
@@ -18,10 +18,6 @@ case $DEPLOY_SCENARIO in
echo "os-odl_l2-moon-ha scenario supports xenial mitaka only"
exit 1
;;
- os-onos-sfc-ha)
- echo "os-onos-sfc-ha scenario supports mitaka only"
- exit 1
- ;;
esac
if [[ $ROOT_BUILD_CAUSE == MANUALTRIGGER ]]; then
diff --git a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
index f328d959..d6a7d7a0 100644
--- a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
+++ b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
@@ -86,7 +86,7 @@
- dashboard
- heat
- aodh
- - congress
+# - congress
- hosts: all
remote_user: root
@@ -103,11 +103,11 @@
- cinder-volume
- ceilometer_compute
-- hosts: all
+- hosts: moon
remote_user: root
max_fail_percentage: 0
- roles: []
-# - moon
+ roles:
+ - moon
- hosts: all
remote_user: root
@@ -232,7 +232,7 @@
remote_user: root
max_fail_percentage: 0
roles:
-# - tacker
+ - tacker
- hosts: controller
remote_user: root
diff --git a/deploy/adapters/ansible/openstack/templates/nova.conf b/deploy/adapters/ansible/openstack/templates/nova.conf
index 3cd2c03b..661a718f 100644
--- a/deploy/adapters/ansible/openstack/templates/nova.conf
+++ b/deploy/adapters/ansible/openstack/templates/nova.conf
@@ -42,7 +42,6 @@ instance_usage_audit_period = hour
notify_on_state_change = vm_and_task_state
notification_driver = nova.openstack.common.notifier.rpc_notifier
notification_driver = ceilometer.compute.nova_notifier
-memcached_servers = {{ memcached_servers }}
osapi_compute_workers = {{ api_workers }}
metadata_workers = {{ api_workers }}
@@ -124,3 +123,7 @@ rabbit_password = {{ RABBIT_PASS }}
rabbit_port = 5672
rabbit_userid = {{ RABBIT_USER }}
+[cache]
+backend = dogpile.cache.memcached
+enabled = True
+memcache_servers = {{ memcached_servers }}
diff --git a/deploy/adapters/ansible/roles/common/vars/Debian.yml b/deploy/adapters/ansible/roles/common/vars/Debian.yml
index ed11bdd6..7cce40e1 100644
--- a/deploy/adapters/ansible/roles/common/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/common/vars/Debian.yml
@@ -10,8 +10,10 @@
packages:
- ubuntu-cloud-keyring
- python-dev
+ - dkms
+ - openvswitch-datapath-dkms
- openvswitch-switch
- - openvswitch-switch-dpdk
+ - openvswitch-common
- python-memcache
- python-iniparse
- python-lxml
diff --git a/deploy/adapters/ansible/roles/memcached/tasks/main.yml b/deploy/adapters/ansible/roles/memcached/tasks/main.yml
index a4457f36..2b4b7ec7 100644
--- a/deploy/adapters/ansible/roles/memcached/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/memcached/tasks/main.yml
@@ -19,6 +19,8 @@
- name: install packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes"
with_items: "{{ packages | union(packages_noarch) }}"
+ notify:
+ - restart memcahed services
- name: enable auto start
file:
@@ -32,4 +34,17 @@
notify:
- restart memcahed services
+- name: change memcache listen ip
+ lineinfile:
+ dest=/etc/sysconfig/memcached
+ regexp="^OPTIONS=.*"
+ line="OPTIONS=\"-l 0.0.0.0\""
+ when: ansible_os_family == "RedHat"
+ notify:
+ - restart memcahed services
+
+- name: generate memcached service list
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
+ with_items: "{{ services | union(services_noarch) }}"
+
- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/moon/files/controllers.py b/deploy/adapters/ansible/roles/moon/files/controllers.py
deleted file mode 100644
index fd107a5e..00000000
--- a/deploy/adapters/ansible/roles/moon/files/controllers.py
+++ /dev/null
@@ -1,1062 +0,0 @@
-# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
-# This software is distributed under the terms and conditions of the
-# 'Apache-2.0' license which can be found in the file 'LICENSE' in this
-# package distribution or at 'http://www.apache.org/licenses/LICENSE-2.0'.
-
-from keystone.common import controller
-from keystone import config
-from keystone import exception
-from keystone.models import token_model
-from keystone.contrib.moon.exception import * # noqa: F403
-from oslo_log import log
-from uuid import uuid4
-import requests
-
-
-CONF = config.CONF
-LOG = log.getLogger(__name__)
-
-
-@dependency.requires('configuration_api') # noqa: 405
-class Configuration(controller.V3Controller):
- collection_name = 'configurations'
- member_name = 'configuration'
-
- def __init__(self):
- super(Configuration, self).__init__()
-
- def _get_user_id_from_token(self, token_id):
- response = self.token_provider_api.validate_token(token_id)
- token_ref = token_model.KeystoneToken(
- token_id=token_id, token_data=response)
- return token_ref.get('user')
-
- @controller.protected()
- def get_policy_templates(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- return self.configuration_api.get_policy_templates_dict(user_id)
-
- @controller.protected()
- def get_aggregation_algorithms(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- return self.configuration_api.get_aggregation_algorithms_dict(user_id)
-
- @controller.protected()
- def get_sub_meta_rule_algorithms(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- return self.configuration_api.get_sub_meta_rule_algorithms_dict(
- user_id)
-
-
-@dependency.requires('tenant_api', 'resource_api') # noqa: 405
-class Tenants(controller.V3Controller):
-
- def __init__(self):
- super(Tenants, self).__init__()
-
- def _get_user_id_from_token(self, token_id):
- response = self.token_provider_api.validate_token(token_id)
- token_ref = token_model.KeystoneToken(
- token_id=token_id, token_data=response)
- return token_ref.get('user')
-
- @controller.protected()
- def get_tenants(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- return self.tenant_api.get_tenants_dict(user_id)
-
- def __get_keystone_tenant_dict(
- self, tenant_id="", tenant_name="", tenant_description="", domain="default"): # noqa
- tenants = self.resource_api.list_projects()
- for tenant in tenants:
- if tenant_id and tenant_id == tenant['id']:
- return tenant
- if tenant_name and tenant_name == tenant['name']:
- return tenant
- if not tenant_id:
- tenant_id = uuid4().hex
- if not tenant_name:
- tenant_name = tenant_id
- tenant = {
- "id": tenant_id,
- "name": tenant_name,
- "description": tenant_description,
- "enabled": True,
- "domain_id": domain
- }
- keystone_tenant = self.resource_api.create_project(
- tenant["id"], tenant)
- return keystone_tenant
-
- @controller.protected()
- def add_tenant(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- k_tenant_dict = self.__get_keystone_tenant_dict(
- tenant_name=kw.get('tenant_name'),
- tenant_description=kw.get(
- 'tenant_description', kw.get('tenant_name')),
- domain=kw.get('tenant_domain', "default"),
-
- )
- tenant_dict = dict()
- tenant_dict['id'] = k_tenant_dict['id']
- tenant_dict['name'] = kw.get('tenant_name', None)
- tenant_dict['description'] = kw.get('tenant_description', None)
- tenant_dict['intra_authz_extension_id'] = kw.get(
- 'tenant_intra_authz_extension_id', None)
- tenant_dict['intra_admin_extension_id'] = kw.get(
- 'tenant_intra_admin_extension_id', None)
- return self.tenant_api.add_tenant_dict(
- user_id, tenant_dict['id'], tenant_dict)
-
- @controller.protected()
- def get_tenant(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- tenant_id = kw.get('tenant_id', None)
- return self.tenant_api.get_tenant_dict(user_id, tenant_id)
-
- @controller.protected()
- def del_tenant(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- tenant_id = kw.get('tenant_id', None)
- return self.tenant_api.del_tenant(user_id, tenant_id)
-
- @controller.protected()
- def set_tenant(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- # Next line will raise an error if tenant doesn't exist
- k_tenant_dict = self.resource_api.get_project(
- kw.get('tenant_id', None))
- tenant_id = kw.get('tenant_id', None)
- tenant_dict = dict()
- tenant_dict['name'] = k_tenant_dict.get('name', None)
- if 'tenant_description' in kw:
- tenant_dict['description'] = kw.get('tenant_description', None)
- if 'tenant_intra_authz_extension_id' in kw:
- tenant_dict['intra_authz_extension_id'] = kw.get(
- 'tenant_intra_authz_extension_id', None)
- if 'tenant_intra_admin_extension_id' in kw:
- tenant_dict['intra_admin_extension_id'] = kw.get(
- 'tenant_intra_admin_extension_id', None)
- self.tenant_api.set_tenant_dict(user_id, tenant_id, tenant_dict)
-
-
-def callback(self, context, prep_info, *args, **kwargs):
- token_ref = ""
- if context.get('token_id') is not None:
- token_ref = token_model.KeystoneToken(
- token_id=context['token_id'],
- token_data=self.token_provider_api.validate_token(
- context['token_id']))
- if not token_ref:
- raise exception.Unauthorized
-
-
-@dependency.requires('authz_api') # noqa: 405
-class Authz_v3(controller.V3Controller):
-
- def __init__(self):
- super(Authz_v3, self).__init__()
-
- @controller.protected(callback)
- def get_authz(self, context, tenant_id, subject_k_id,
- object_name, action_name):
- try:
- return self.authz_api.authz(
- tenant_id, subject_k_id, object_name, action_name)
- except Exception as e:
- return {'authz': False, 'comment': unicode(e)}
-
-
-@dependency.requires('admin_api', 'root_api') # noqa: 405
-class IntraExtensions(controller.V3Controller):
- collection_name = 'intra_extensions'
- member_name = 'intra_extension'
-
- def __init__(self):
- super(IntraExtensions, self).__init__()
-
- def _get_user_id_from_token(self, token_id):
- response = self.token_provider_api.validate_token(token_id)
- token_ref = token_model.KeystoneToken(
- token_id=token_id, token_data=response)
- return token_ref.get('user')['id']
-
- # IntraExtension functions
- @controller.protected()
- def get_intra_extensions(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- return self.admin_api.get_intra_extensions_dict(user_id)
-
- @controller.protected()
- def add_intra_extension(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_dict = dict()
- intra_extension_dict['name'] = kw.get('intra_extension_name', None)
- intra_extension_dict['model'] = kw.get('intra_extension_model', None)
- intra_extension_dict['genre'] = kw.get('intra_extension_genre', None)
- intra_extension_dict['description'] = kw.get(
- 'intra_extension_description', None)
- intra_extension_dict['subject_categories'] = kw.get(
- 'intra_extension_subject_categories', dict())
- intra_extension_dict['object_categories'] = kw.get(
- 'intra_extension_object_categories', dict())
- intra_extension_dict['action_categories'] = kw.get(
- 'intra_extension_action_categories', dict())
- intra_extension_dict['subjects'] = kw.get(
- 'intra_extension_subjects', dict())
- intra_extension_dict['objects'] = kw.get(
- 'intra_extension_objects', dict())
- intra_extension_dict['actions'] = kw.get(
- 'intra_extension_actions', dict())
- intra_extension_dict['subject_scopes'] = kw.get(
- 'intra_extension_subject_scopes', dict())
- intra_extension_dict['object_scopes'] = kw.get(
- 'intra_extension_object_scopes', dict())
- intra_extension_dict['action_scopes'] = kw.get(
- 'intra_extension_action_scopes', dict())
- intra_extension_dict['subject_assignments'] = kw.get(
- 'intra_extension_subject_assignments', dict())
- intra_extension_dict['object_assignments'] = kw.get(
- 'intra_extension_object_assignments', dict())
- intra_extension_dict['action_assignments'] = kw.get(
- 'intra_extension_action_assignments', dict())
- intra_extension_dict['aggregation_algorithm'] = kw.get(
- 'intra_extension_aggregation_algorithm', dict())
- intra_extension_dict['sub_meta_rules'] = kw.get(
- 'intra_extension_sub_meta_rules', dict())
- intra_extension_dict['rules'] = kw.get('intra_extension_rules', dict())
- ref = self.admin_api.load_intra_extension_dict(
- user_id, intra_extension_dict=intra_extension_dict)
- return self.admin_api.populate_default_data(ref)
-
- @controller.protected()
- def get_intra_extension(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_intra_extension_dict(
- user_id, intra_extension_id)
-
- @controller.protected()
- def del_intra_extension(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- self.admin_api.del_intra_extension(user_id, intra_extension_id)
-
- @controller.protected()
- def set_intra_extension(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- intra_extension_dict = dict()
- intra_extension_dict['name'] = kw.get('intra_extension_name', None)
- intra_extension_dict['model'] = kw.get('intra_extension_model', None)
- intra_extension_dict['genre'] = kw.get('intra_extension_genre', None)
- intra_extension_dict['description'] = kw.get(
- 'intra_extension_description', None)
- return self.admin_api.set_intra_extension_dict(
- user_id, intra_extension_id, intra_extension_dict)
-
- @controller.protected()
- def load_root_intra_extension(self, context, **kw):
- self.root_api.load_root_intra_extension_dict()
-
- # Metadata functions
- @controller.protected()
- def get_subject_categories(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_subject_categories_dict(
- user_id, intra_extension_id)
-
- @controller.protected()
- def add_subject_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_dict = dict()
- subject_category_dict['name'] = kw.get('subject_category_name', None)
- subject_category_dict['description'] = kw.get(
- 'subject_category_description', None)
- return self.admin_api.add_subject_category_dict(
- user_id, intra_extension_id, subject_category_dict)
-
- @controller.protected()
- def get_subject_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- return self.admin_api.get_subject_category_dict(
- user_id, intra_extension_id, subject_category_id)
-
- @controller.protected()
- def del_subject_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- self.admin_api.del_subject_category(
- user_id, intra_extension_id, subject_category_id)
-
- @controller.protected()
- def set_subject_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- subject_category_dict = dict()
- subject_category_dict['name'] = kw.get('subject_category_name', None)
- subject_category_dict['description'] = kw.get(
- 'subject_category_description', None)
- return self.admin_api.set_subject_category_dict(
- user_id, intra_extension_id, subject_category_id, subject_category_dict) # noqa
-
- @controller.protected()
- def get_object_categories(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_object_categories_dict(
- user_id, intra_extension_id)
-
- @controller.protected()
- def add_object_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_dict = dict()
- object_category_dict['name'] = kw.get('object_category_name', None)
- object_category_dict['description'] = kw.get(
- 'object_category_description', None)
- return self.admin_api.add_object_category_dict(
- user_id, intra_extension_id, object_category_dict)
-
- @controller.protected()
- def get_object_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- return self.admin_api.get_object_categories_dict(
- user_id, intra_extension_id, object_category_id)
-
- @controller.protected()
- def del_object_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- self.admin_api.del_object_category(
- user_id, intra_extension_id, object_category_id)
-
- @controller.protected()
- def set_object_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- object_category_dict = dict()
- object_category_dict['name'] = kw.get('object_category_name', None)
- object_category_dict['description'] = kw.get(
- 'object_category_description', None)
- return self.admin_api.set_object_category_dict(
- user_id, intra_extension_id, object_category_id, object_category_dict) # noqa
-
- @controller.protected()
- def get_action_categories(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_action_categories_dict(
- user_id, intra_extension_id)
-
- @controller.protected()
- def add_action_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_dict = dict()
- action_category_dict['name'] = kw.get('action_category_name', None)
- action_category_dict['description'] = kw.get(
- 'action_category_description', None)
- return self.admin_api.add_action_category_dict(
- user_id, intra_extension_id, action_category_dict)
-
- @controller.protected()
- def get_action_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- return self.admin_api.get_action_categories_dict(
- user_id, intra_extension_id, action_category_id)
-
- @controller.protected()
- def del_action_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- self.admin_api.del_action_category(
- user_id, intra_extension_id, action_category_id)
-
- @controller.protected()
- def set_action_category(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- action_category_dict = dict()
- action_category_dict['name'] = kw.get('action_category_name', None)
- action_category_dict['description'] = kw.get(
- 'action_category_description', None)
- return self.admin_api.set_action_category_dict(
- user_id, intra_extension_id, action_category_id, action_category_dict) # noqa
-
- # Perimeter functions
- @controller.protected()
- def get_subjects(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_subjects_dict(user_id, intra_extension_id)
-
- @controller.protected()
- def add_subject(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_dict = dict()
- subject_dict['name'] = kw.get('subject_name', None)
- subject_dict['description'] = kw.get('subject_description', None)
- subject_dict['password'] = kw.get('subject_password', None)
- subject_dict['email'] = kw.get('subject_email', None)
- return self.admin_api.add_subject_dict(
- user_id, intra_extension_id, subject_dict)
-
- @controller.protected()
- def get_subject(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_id = kw.get('subject_id', None)
- return self.admin_api.get_subject_dict(
- user_id, intra_extension_id, subject_id)
-
- @controller.protected()
- def del_subject(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_id = kw.get('subject_id', None)
- self.admin_api.del_subject(user_id, intra_extension_id, subject_id)
-
- @controller.protected()
- def set_subject(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_id = kw.get('subject_id', None)
- subject_dict = dict()
- subject_dict['name'] = kw.get('subject_name', None)
- subject_dict['description'] = kw.get('subject_description', None)
- return self.admin_api.set_subject_dict(
- user_id, intra_extension_id, subject_id, subject_dict)
-
- @controller.protected()
- def get_objects(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_objects_dict(user_id, intra_extension_id)
-
- @controller.protected()
- def add_object(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_dict = dict()
- object_dict['name'] = kw.get('object_name', None)
- object_dict['description'] = kw.get('object_description', None)
- return self.admin_api.add_object_dict(
- user_id, intra_extension_id, object_dict)
-
- @controller.protected()
- def get_object(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_id = kw.get('object_id', None)
- return self.admin_api.get_object_dict(
- user_id, intra_extension_id, object_id)
-
- @controller.protected()
- def del_object(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_id = kw.get('object_id', None)
- self.admin_api.del_object(user_id, intra_extension_id, object_id)
-
- @controller.protected()
- def set_object(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_id = kw.get('object_id', None)
- object_dict = dict()
- object_dict['name'] = kw.get('object_name', None)
- object_dict['description'] = kw.get('object_description', None)
- return self.admin_api.set_object_dict(
- user_id, intra_extension_id, object_id, object_dict)
-
- @controller.protected()
- def get_actions(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_actions_dict(user_id, intra_extension_id)
-
- @controller.protected()
- def add_action(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_dict = dict()
- action_dict['name'] = kw.get('action_name', None)
- action_dict['description'] = kw.get('action_description', None)
- return self.admin_api.add_action_dict(
- user_id, intra_extension_id, action_dict)
-
- @controller.protected()
- def get_action(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_id = kw.get('action_id', None)
- return self.admin_api.get_action_dict(
- user_id, intra_extension_id, action_id)
-
- @controller.protected()
- def del_action(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_id = kw.get('action_id', None)
- self.admin_api.del_action(user_id, intra_extension_id, action_id)
-
- @controller.protected()
- def set_action(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_id = kw.get('action_id', None)
- action_dict = dict()
- action_dict['name'] = kw.get('action_name', None)
- action_dict['description'] = kw.get('action_description', None)
- return self.admin_api.set_action_dict(
- user_id, intra_extension_id, action_id, action_dict)
-
- # Scope functions
- @controller.protected()
- def get_subject_scopes(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- return self.admin_api.get_subject_scopes_dict(
- user_id, intra_extension_id, subject_category_id)
-
- @controller.protected()
- def add_subject_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- subject_scope_dict = dict()
- subject_scope_dict['name'] = kw.get('subject_scope_name', None)
- subject_scope_dict['description'] = kw.get(
- 'subject_scope_description', None)
- return self.admin_api.add_subject_scope_dict(
- user_id, intra_extension_id, subject_category_id, subject_scope_dict) # noqa
-
- @controller.protected()
- def get_subject_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- subject_scope_id = kw.get('subject_scope_id', None)
- return self.admin_api.get_subject_scope_dict(
- user_id, intra_extension_id, subject_category_id, subject_scope_id)
-
- @controller.protected()
- def del_subject_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- subject_scope_id = kw.get('subject_scope_id', None)
- self.admin_api.del_subject_scope(
- user_id,
- intra_extension_id,
- subject_category_id,
- subject_scope_id)
-
- @controller.protected()
- def set_subject_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- subject_scope_id = kw.get('subject_scope_id', None)
- subject_scope_dict = dict()
- subject_scope_dict['name'] = kw.get('subject_scope_name', None)
- subject_scope_dict['description'] = kw.get(
- 'subject_scope_description', None)
- return self.admin_api.set_subject_scope_dict(
- user_id, intra_extension_id, subject_category_id, subject_scope_id, subject_scope_dict) # noqa
-
- @controller.protected()
- def get_object_scopes(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- return self.admin_api.get_object_scopes_dict(
- user_id, intra_extension_id, object_category_id)
-
- @controller.protected()
- def add_object_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- object_scope_dict = dict()
- object_scope_dict['name'] = kw.get('object_scope_name', None)
- object_scope_dict['description'] = kw.get(
- 'object_scope_description', None)
- return self.admin_api.add_object_scope_dict(
- user_id, intra_extension_id, object_category_id, object_scope_dict)
-
- @controller.protected()
- def get_object_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- object_scope_id = kw.get('object_scope_id', None)
- return self.admin_api.get_object_scope_dict(
- user_id, intra_extension_id, object_category_id, object_scope_id)
-
- @controller.protected()
- def del_object_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- object_scope_id = kw.get('object_scope_id', None)
- self.admin_api.del_object_scope(
- user_id,
- intra_extension_id,
- object_category_id,
- object_scope_id)
-
- @controller.protected()
- def set_object_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_category_id = kw.get('object_category_id', None)
- object_scope_id = kw.get('object_scope_id', None)
- object_scope_dict = dict()
- object_scope_dict['name'] = kw.get('object_scope_name', None)
- object_scope_dict['description'] = kw.get(
- 'object_scope_description', None)
- return self.admin_api.set_object_scope_dict(
- user_id, intra_extension_id, object_category_id, object_scope_id, object_scope_dict) # noqa
-
- @controller.protected()
- def get_action_scopes(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- return self.admin_api.get_action_scopes_dict(
- user_id, intra_extension_id, action_category_id)
-
- @controller.protected()
- def add_action_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- action_scope_dict = dict()
- action_scope_dict['name'] = kw.get('action_scope_name', None)
- action_scope_dict['description'] = kw.get(
- 'action_scope_description', None)
- return self.admin_api.add_action_scope_dict(
- user_id, intra_extension_id, action_category_id, action_scope_dict)
-
- @controller.protected()
- def get_action_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- action_scope_id = kw.get('action_scope_id', None)
- return self.admin_api.get_action_scope_dict(
- user_id, intra_extension_id, action_category_id, action_scope_id)
-
- @controller.protected()
- def del_action_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- action_scope_id = kw.get('action_scope_id', None)
- self.admin_api.del_action_scope(
- user_id,
- intra_extension_id,
- action_category_id,
- action_scope_id)
-
- @controller.protected()
- def set_action_scope(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_category_id = kw.get('action_category_id', None)
- action_scope_id = kw.get('action_scope_id', None)
- action_scope_dict = dict()
- action_scope_dict['name'] = kw.get('action_scope_name', None)
- action_scope_dict['description'] = kw.get(
- 'action_scope_description', None)
- return self.admin_api.set_action_scope_dict(
- user_id, intra_extension_id, action_category_id, action_scope_id, action_scope_dict) # noqa
-
- # Assignment functions
-
- @controller.protected()
- def add_subject_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_id = kw.get('subject_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- subject_scope_id = kw.get('subject_scope_id', None)
- return self.admin_api.add_subject_assignment_list(
- user_id, intra_extension_id, subject_id, subject_category_id, subject_scope_id) # noqa
-
- @controller.protected()
- def get_subject_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_id = kw.get('subject_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- return self.admin_api.get_subject_assignment_list(
- user_id, intra_extension_id, subject_id, subject_category_id)
-
- @controller.protected()
- def del_subject_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- subject_id = kw.get('subject_id', None)
- subject_category_id = kw.get('subject_category_id', None)
- subject_scope_id = kw.get('subject_scope_id', None)
- self.admin_api.del_subject_assignment(
- user_id,
- intra_extension_id,
- subject_id,
- subject_category_id,
- subject_scope_id)
-
- @controller.protected()
- def add_object_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_id = kw.get('object_id', None)
- object_category_id = kw.get('object_category_id', None)
- object_scope_id = kw.get('object_scope_id', None)
- return self.admin_api.add_object_assignment_list(
- user_id, intra_extension_id, object_id, object_category_id, object_scope_id) # noqa
-
- @controller.protected()
- def get_object_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_id = kw.get('object_id', None)
- object_category_id = kw.get('object_category_id', None)
- return self.admin_api.get_object_assignment_list(
- user_id, intra_extension_id, object_id, object_category_id)
-
- @controller.protected()
- def del_object_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- object_id = kw.get('object_id', None)
- object_category_id = kw.get('object_category_id', None)
- object_scope_id = kw.get('object_scope_id', None)
- self.admin_api.del_object_assignment(
- user_id,
- intra_extension_id,
- object_id,
- object_category_id,
- object_scope_id)
-
- @controller.protected()
- def add_action_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_id = kw.get('action_id', None)
- action_category_id = kw.get('action_category_id', None)
- action_scope_id = kw.get('action_scope_id', None)
- return self.admin_api.add_action_assignment_list(
- user_id, intra_extension_id, action_id, action_category_id, action_scope_id) # noqa
-
- @controller.protected()
- def get_action_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_id = kw.get('action_id', None)
- action_category_id = kw.get('action_category_id', None)
- return self.admin_api.get_action_assignment_list(
- user_id, intra_extension_id, action_id, action_category_id)
-
- @controller.protected()
- def del_action_assignment(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- action_id = kw.get('action_id', None)
- action_category_id = kw.get('action_category_id', None)
- action_scope_id = kw.get('action_scope_id', None)
- self.admin_api.del_action_assignment(
- user_id,
- intra_extension_id,
- action_id,
- action_category_id,
- action_scope_id)
-
- # Metarule functions
-
- @controller.protected()
- def get_aggregation_algorithm(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_aggregation_algorithm_id(
- user_id, intra_extension_id)
-
- @controller.protected()
- def set_aggregation_algorithm(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- aggregation_algorithm_id = kw.get('aggregation_algorithm_id', None)
- return self.admin_api.set_aggregation_algorithm_id(
- user_id, intra_extension_id, aggregation_algorithm_id)
-
- @controller.protected()
- def get_sub_meta_rules(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- return self.admin_api.get_sub_meta_rules_dict(
- user_id, intra_extension_id)
-
- @controller.protected()
- def add_sub_meta_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_dict = dict()
- sub_meta_rule_dict['name'] = kw.get('sub_meta_rule_name', None)
- sub_meta_rule_dict['algorithm'] = kw.get(
- 'sub_meta_rule_algorithm', None)
- sub_meta_rule_dict['subject_categories'] = kw.get(
- 'sub_meta_rule_subject_categories', None)
- sub_meta_rule_dict['object_categories'] = kw.get(
- 'sub_meta_rule_object_categories', None)
- sub_meta_rule_dict['action_categories'] = kw.get(
- 'sub_meta_rule_action_categories', None)
- return self.admin_api.add_sub_meta_rule_dict(
- user_id, intra_extension_id, sub_meta_rule_dict)
-
- @controller.protected()
- def get_sub_meta_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- return self.admin_api.get_sub_meta_rule_dict(
- user_id, intra_extension_id, sub_meta_rule_id)
-
- @controller.protected()
- def del_sub_meta_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- self.admin_api.del_sub_meta_rule(
- user_id, intra_extension_id, sub_meta_rule_id)
-
- @controller.protected()
- def set_sub_meta_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- sub_meta_rule_dict = dict()
- sub_meta_rule_dict['name'] = kw.get('sub_meta_rule_name', None)
- sub_meta_rule_dict['algorithm'] = kw.get(
- 'sub_meta_rule_algorithm', None)
- sub_meta_rule_dict['subject_categories'] = kw.get(
- 'sub_meta_rule_subject_categories', None)
- sub_meta_rule_dict['object_categories'] = kw.get(
- 'sub_meta_rule_object_categories', None)
- sub_meta_rule_dict['action_categories'] = kw.get(
- 'sub_meta_rule_action_categories', None)
- return self.admin_api.set_sub_meta_rule_dict(
- user_id, intra_extension_id, sub_meta_rule_id, sub_meta_rule_dict)
-
- # Rules functions
- @controller.protected()
- def get_rules(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- return self.admin_api.get_rules_dict(
- user_id, intra_extension_id, sub_meta_rule_id)
-
- @controller.protected()
- def add_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- subject_category_list = kw.get('subject_categories', [])
- object_category_list = kw.get('object_categories', [])
- action_category_list = kw.get('action_categories', [])
- enabled_bool = kw.get('enabled', True)
- rule_list = subject_category_list + action_category_list + \
- object_category_list + [enabled_bool, ]
- return self.admin_api.add_rule_dict(
- user_id, intra_extension_id, sub_meta_rule_id, rule_list)
-
- @controller.protected()
- def get_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- rule_id = kw.get('rule_id', None)
- return self.admin_api.get_rule_dict(
- user_id, intra_extension_id, sub_meta_rule_id, rule_id)
-
- @controller.protected()
- def del_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- rule_id = kw.get('rule_id', None)
- self.admin_api.del_rule(
- user_id,
- intra_extension_id,
- sub_meta_rule_id,
- rule_id)
-
- @controller.protected()
- def set_rule(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- intra_extension_id = kw.get('intra_extension_id', None)
- sub_meta_rule_id = kw.get('sub_meta_rule_id', None)
- rule_id = kw.get('rule_id', None)
- rule_list = list()
- subject_category_list = kw.get('subject_categories', [])
- object_category_list = kw.get('object_categories', [])
- action_category_list = kw.get('action_categories', [])
- rule_list = subject_category_list + action_category_list + object_category_list # noqa
- return self.admin_api.set_rule_dict(
- user_id, intra_extension_id, sub_meta_rule_id, rule_id, rule_list)
-
-
-@dependency.requires('authz_api') # noqa: 405
-class InterExtensions(controller.V3Controller):
-
- def __init__(self):
- super(InterExtensions, self).__init__()
-
- def _get_user_from_token(self, token_id):
- response = self.token_provider_api.validate_token(token_id)
- token_ref = token_model.KeystoneToken(
- token_id=token_id, token_data=response)
- return token_ref['user']
-
- # @controller.protected()
- # def get_inter_extensions(self, context, **kw):
- # user = self._get_user_from_token(context.get('token_id'))
- # return {
- # 'inter_extensions':
- # self.interextension_api.get_inter_extensions()
- # }
-
- # @controller.protected()
- # def get_inter_extension(self, context, **kw):
- # user = self._get_user_from_token(context.get('token_id'))
- # return {
- # 'inter_extensions':
- # self.interextension_api.get_inter_extension(uuid=kw['inter_extension_id'])
- # }
-
- # @controller.protected()
- # def create_inter_extension(self, context, **kw):
- # user = self._get_user_from_token(context.get('token_id'))
- # return self.interextension_api.create_inter_extension(kw)
-
- # @controller.protected()
- # def delete_inter_extension(self, context, **kw):
- # user = self._get_user_from_token(context.get('token_id'))
- # if 'inter_extension_id' not in kw:
- # raise exception.Error
- # return
- # self.interextension_api.delete_inter_extension(kw['inter_extension_id'])
-
-
-@dependency.requires('moonlog_api', 'authz_api') # noqa: 405
-class Logs(controller.V3Controller):
-
- def __init__(self):
- super(Logs, self).__init__()
-
- def _get_user_id_from_token(self, token_id):
- response = self.token_provider_api.validate_token(token_id)
- token_ref = token_model.KeystoneToken(
- token_id=token_id, token_data=response)
- return token_ref['user']
-
- @controller.protected()
- def get_logs(self, context, **kw):
- user_id = self._get_user_id_from_token(context.get('token_id'))
- options = kw.get('options', '')
- return self.moonlog_api.get_logs(user_id, options)
-
-
-@dependency.requires('identity_api', "token_provider_api", "resource_api") # noqa: 405
-class MoonAuth(controller.V3Controller):
-
- def __init__(self):
- super(MoonAuth, self).__init__()
-
- def _get_project(self, uuid="", name=""):
- projects = self.resource_api.list_projects()
- for project in projects:
- if uuid and uuid == project['id']:
- return project
- elif name and name == project['name']:
- return project
-
- def get_token(self, context, **kw):
- data_auth = {
- "auth": {
- "identity": {
- "methods": [
- "password"
- ],
- "password": {
- "user": {
- "domain": {
- "id": "Default"
- },
- "name": kw['username'],
- "password": kw['password']
- }
- }
- }
- }
- }
-
- message = {}
- if "project" in kw:
- project = self._get_project(name=kw['project'])
- if project:
- data_auth["auth"]["scope"] = dict()
- data_auth["auth"]["scope"]['project'] = dict()
- data_auth["auth"]["scope"]['project']['id'] = project['id']
- else:
- message = {
- "error": {
- "message": "Unable to find project {}".format(kw['project']), # noqa
- "code": 200,
- "title": "UnScopedToken"
- }}
-
-# req = requests.post("http://localhost:5000/v3/auth/tokens",
-# json=data_auth,
-# headers={"Content-Type": "application/json"}
-# )
- req = requests.post("http://172.16.1.222:5000/v3/auth/tokens",
- json=data_auth,
- headers={"Content-Type": "application/json"}
- )
- if req.status_code not in (200, 201):
- LOG.error(req.text)
- else:
- _token = req.headers['X-Subject-Token']
- _data = req.json()
- _result = {
- "token": _token,
- 'message': message
- }
- try:
- _result["roles"] = map(
- lambda x: x['name'], _data["token"]["roles"])
- except KeyError:
- pass
- return _result
- return {"token": None, 'message': req.json()}
diff --git a/deploy/adapters/ansible/roles/moon/files/deb.conf b/deploy/adapters/ansible/roles/moon/files/deb.conf
deleted file mode 100644
index 6e1159a1..00000000
--- a/deploy/adapters/ansible/roles/moon/files/deb.conf
+++ /dev/null
@@ -1,11 +0,0 @@
-keystone/admin-password: password
-keystone/auth-token: password
-keystone/admin-password-confirm: password
-keystone/admin-email: root@localhost
-keystone/admin-role-name: admin
-keystone/admin-user: admin
-keystone/create-admin-tenant: false
-keystone/region-name: Orange
-keystone/admin-tenant-name: admin
-keystone/register-endpoint: false
-keystone/configure_db: false
diff --git a/deploy/adapters/ansible/roles/moon/files/get_deb_depends.py b/deploy/adapters/ansible/roles/moon/files/get_deb_depends.py
deleted file mode 100644
index e01c1ff7..00000000
--- a/deploy/adapters/ansible/roles/moon/files/get_deb_depends.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python3
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-
-import sys
-import subprocess
-
-pkts = []
-
-for arg in sys.argv[1:]:
- proc = subprocess.Popen(["dpkg-deb",
- "--info",
- arg],
- stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- out = proc.stdout.read()
- err = proc.stderr.read()
- if err:
- print("An error occurred with {} ({})".format(arg, err))
- continue
- for line in out.splitlines():
- line = line.decode('utf-8')
- if " Depends:" in line:
- line = line.replace(" Depends:", "")
- for _dep in line.split(','):
- pkts.append(_dep.split()[0])
-
-print(" ".join(pkts))
diff --git a/deploy/adapters/ansible/roles/moon/files/sources.list b/deploy/adapters/ansible/roles/moon/files/sources.list
new file mode 100644
index 00000000..a78ab23a
--- /dev/null
+++ b/deploy/adapters/ansible/roles/moon/files/sources.list
@@ -0,0 +1,7 @@
+deb [ arch=amd64 ] http://192.168.137.222/ubuntu/ xenial main restricted universe multiverse
+deb [ arch=amd64 ] http://192.168.137.222/ubuntu/ xenial-security main restricted universe multiverse
+deb [ arch=amd64 ] http://192.168.137.222/ubuntu/ xenial-updates main restricted universe multiverse
+
+deb-src [ arch=amd64 ] http://192.168.137.222/ubuntu/ xenial main restricted universe multiverse
+deb-src [ arch=amd64 ] http://192.168.137.222/ubuntu/ xenial-security main restricted universe multiverse
+deb-src [ arch=amd64 ] http://192.168.137.222/ubuntu/ xenial-updates main restricted universe multiverse
diff --git a/deploy/adapters/ansible/roles/moon/tasks/main.yml b/deploy/adapters/ansible/roles/moon/tasks/main.yml
index a3511de7..ffac2139 100644
--- a/deploy/adapters/ansible/roles/moon/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/moon/tasks/main.yml
@@ -1,5 +1,5 @@
#############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -8,4 +8,4 @@
##############################################################################
---
- include: moon.yml
- when: moon == "Enable"
+ when: ansible_os_family == "Debian"
diff --git a/deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml b/deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml
deleted file mode 100644
index c2ca2fcf..00000000
--- a/deploy/adapters/ansible/roles/moon/tasks/moon-compute.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-#############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: update api-paste.ini
- template: src=api-paste.ini dest=/etc/nova/api-paste.ini backup=yes
-
-- name: restart nova task
- service: name={{ item }} state=restarted enabled=yes
- with_items:
- - nova-compute
diff --git a/deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml b/deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml
deleted file mode 100644
index ad030bda..00000000
--- a/deploy/adapters/ansible/roles/moon/tasks/moon-controller.yml
+++ /dev/null
@@ -1,235 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-# install all packages
-- name: install unzip packages
- shell: apt-get install -y python-pip unzip
-
-# download master.zip
-- name: get image http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: download keystone-moon packages
- get_url:
- url: "http://{{ http_server.stdout_lines[0] }}/packages/moon/master.zip"
- dest: /tmp/master.zip
- mode: 0444
-
-- name: extract keystone-moon packages
- unarchive: src=/tmp/master.zip dest=/tmp copy=no
-
-# install all dependencies
-- name: copy scripts
- copy: src=get_deb_depends.py dest=/tmp/get_deb_depends.py
-
-- name: install keystone-moon dependencies
- shell: |
- apt-get install \
- $(python /tmp/get_deb_depends.py /tmp/moon-bin-master/*.deb)
- when: ansible_os_family == "Debian"
-
-- name: delete configuration file
- shell: >
- rm -f {{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf;
- rm -f {{ apache_config_dir }}/sites-available/wsgi-keystone.conf;
-
-# install keystone moon
-- name: copy scripts
- copy: src=deb.conf dest=/tmp/deb.conf
-
-- name: install keystone moon
- shell: >
- export DEBIAN_FRONTEND="noninteractive";
- sudo -E dpkg -i /tmp/moon-bin-master/*moon*.deb;
-
-# - name: install keystone moon
-# shell: >
-# export DEBIAN_FRONTEND="noninteractive";
-# sudo -E debconf-set-selections python-keystone < /tmp/deb.conf;
-# sudo -E dpkg -i /tmp/moon-bin-master/*moon*.deb;
-
-- name: stop keystone task
- shell: >
- service keystone stop;
- mv /etc/init.d/keystone /home/;
- mv /etc/init/keystone.conf /home/;
- mv /lib/systemd/system/keystone.service /home/;
-
-# config keystone and apache2
-- name: delete sqlite database
- file:
- path: /var/lib/keystone/keystone.db
- state: absent
-
-# - name: update keystone conf
-# template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
-
-
-# - name: assure listen port exist
-# lineinfile:
-# dest: '{{ apache_config_dir }}/ports.conf'
-# regexp: '{{ item.regexp }}'
-# line: '{{ item.line}}'
-# with_items:
-# - regexp: "^Listen {{ internal_ip }}:5000"
-# line: "Listen {{ internal_ip }}:5000"
-# - regexp: "^Listen {{ internal_ip }}:35357"
-# line: "Listen {{ internal_ip }}:35357"
-
-- name: update apache2 configs
- template:
- src: wsgi-keystone.conf.j2
- dest: '{{ apache_config_dir }}/sites-available/wsgi-keystone.conf'
- when: ansible_os_family == 'Debian'
-
-- name: enable keystone server
- file:
- src: "{{ apache_config_dir }}/sites-available/wsgi-keystone.conf"
- dest: "{{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf"
- state: "link"
- when: ansible_os_family == 'Debian'
-
-# - name: keystone source files
-# template: src={{ item }} dest=/opt/{{ item }}
-# with_items:
-# - admin-openrc.sh
-# - demo-openrc.sh
-
-# keystone paste ini
-- name: backup keystone-paste.ini
- shell: >
- cp /etc/keystone/keystone-paste.ini /etc/keystone/keystone-paste.ini.bak;
-
-- name: config keystone-paste.ini
- shell: >
- sed -i "3i[pipeline:moon_pipeline]\n" /etc/keystone/keystone-paste.ini;
- sed -i "5i[app:moon_service]\nuse = egg:keystone#moon_service\n" \
- /etc/keystone/keystone-paste.ini;
- sed -i "s/use = egg:Paste#urlmap/use = egg:Paste#urlmap\n\/moon = moon_pipeline/" \
- /etc/keystone/keystone-paste.ini;
-
-- name: config keystone-paste.ini
- blockinfile:
- dest: /etc/keystone/keystone-paste.ini
- insertafter: "pipeline:moon_pipeline"
- block: >
- pipeline = sizelimit url_normalize request_id build_auth_context
- token_auth admin_token_auth json_body ec2_extension_v3 s3_extension moon_service
-
-# moon log
-- name: moon log
- shell: >
- sudo mkdir /var/log/moon/;
- sudo chown keystone /var/log/moon/;
- sudo addgroup moonlog;
- sudo chgrp moonlog /var/log/moon/;
- sudo touch /var/log/moon/keystonemiddleware.log;
- sudo touch /var/log/moon/system.log;
- sudo chgrp moonlog /var/log/moon/keystonemiddleware.log;
- sudo chgrp moonlog /var/log/moon/system.log;
- sudo chmod g+rw /var/log/moon;
- sudo chmod g+rw /var/log/moon/keystonemiddleware.log;
- sudo chmod g+rw /var/log/moon/system.log;
- sudo adduser keystone moonlog;
- # sudo adduser swift moonlog;
- sudo adduser nova moonlog;
-
-
-# keystone db sync
-- name: keystone db sync
- shell: >
- sudo /usr/bin/keystone-manage db_sync;
- sudo /usr/bin/keystone-manage db_sync --extension moon;
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: wait for keystone ready
- wait_for: port=35357 delay=3 timeout=10 host={{ internal_ip }}
-
-# moon workaround
-- name: copy scripts
- copy:
- src: controllers.py
- dest: /usr/lib/python2.7/dist-packages/keystone/contrib/moon/controllers.py
-
-# apache2 restart
-- name: restart apache2
- service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
-
-# install moonclient
-- name: install moon client
- shell: sudo pip install /tmp/moon-bin-master/python-moonclient-0.1.tar.gz
-
-# - name: add tenants
-# keystone_user:
-# token: "{{ ADMIN_TOKEN }}"
-# endpoint: "http://{{ internal_ip }}:35357/v2.0"
-# tenant: "{{ item.tenant }}"
-# tenant_description: "{{ item.tenant_description }}"
-# with_items: "{{ os_users }}"
-# when: inventory_hostname == groups['controller'][0]
-
-# - name: add users
-# keystone_user:
-# token: "{{ ADMIN_TOKEN }}"
-# endpoint: "http://{{ internal_ip }}:35357/v2.0"
-# user: "{{ item.user }}"
-# tenant: "{{ item.tenant }}"
-# password: "{{ item.password }}"
-# email: "{{ item.email }}"
-# with_items: "{{ os_users }}"
-# when: inventory_hostname == groups['controller'][0]
-
-# - name: grant roles
-# keystone_user:
-# token: "{{ ADMIN_TOKEN }}"
-# endpoint: "http://{{ internal_ip }}:35357/v2.0"
-# user: "{{ item.user }}"
-# role: "{{ item.role }}"
-# tenant: "{{ item.tenant }}"
-# with_items: "{{ os_users }}"
-# when: inventory_hostname == groups['controller'][0]
-
-# - name: add endpoints
-# keystone_service:
-# token: "{{ ADMIN_TOKEN }}"
-# endpoint: "http://{{ internal_ip }}:35357/v2.0"
-# name: "{{ item.name }}"
-# type: "{{ item.type }}"
-# region: "{{ item.region}}"
-# description: "{{ item.description }}"
-# publicurl: "{{ item.publicurl }}"
-# internalurl: "{{ item.internalurl }}"
-# adminurl: "{{ item.adminurl }}"
-# with_items: "{{ os_services }}"
-# when: inventory_hostname == groups['controller'][0]
-
-- name: update api-paste.ini
- template: src=api-paste.ini dest=/etc/nova/api-paste.ini backup=yes
-
-# - name: update proxy-server conf
-# template: src=proxy-server.conf dest=/etc/swift/proxy-server.conf backup=yes
-
-# restart nova
-- name: restart nova
- service: name={{ item }} state=restarted enabled=yes
- with_items:
- - nova-api
- - nova-cert
- - nova-conductor
- - nova-consoleauth
- - nova-scheduler
-
-# restart swift
-# - name: restart swift
-# service: name={{ item }} state=restarted enabled=yes
-# with_items:
-# - swift-proxy
-# - memcached
diff --git a/deploy/adapters/ansible/roles/moon/tasks/moon.yml b/deploy/adapters/ansible/roles/moon/tasks/moon.yml
index 40e1c98c..54000145 100644..100755
--- a/deploy/adapters/ansible/roles/moon/tasks/moon.yml
+++ b/deploy/adapters/ansible/roles/moon/tasks/moon.yml
@@ -1,5 +1,5 @@
#############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -7,10 +7,205 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- include_vars: "{{ ansible_os_family }}.yml"
+- name: setup sources.list
+ remote_user: root
+ copy:
+ src: sources.list
+ dest: /etc/apt/sources.list
+ mode: "u=rw,g=r,o=r"
-- include: moon-controller.yml
- when: inventory_hostname in groups['controller']
+- name: rm pip.conf
+ file:
+ path: /root/.pip/pip.conf
+ state: absent
-- include: moon-compute.yml
- when: inventory_hostname in groups['compute']
+- name: check docker.list stat
+ stat:
+ path: /etc/apt/sources.list.d/docker.list
+ register: stat_result
+
+# [WARNING]: Consider using get_url or uri module rather than running wget
+- name: install docker
+ remote_user: root
+ shell: wget -qO- https://get.docker.com/ | sh
+ when: stat_result.stat.exists == False
+
+- name: remove conflict packages
+ remote_user: root
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+ with_items:
+ - python3-pkg-resources
+
+- name: install dependent packages
+ remote_user: root
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items:
+ - git
+ - python3-dev
+ - python3-pip
+ - python3-venv
+ - python3-setuptools
+
+- name: make ubuntu dir
+ file:
+ path: /home/ubuntu
+ state: directory
+ mode: 0755
+
+- name: check moon code dir exists
+ stat:
+ path: /home/ubuntu/moon
+ register: stat_result
+
+- name: fetch moon source code
+ shell: git clone https://git.opnfv.org/moon /home/ubuntu/moon
+ when: stat_result.stat.exists == False
+
+- name: create moon etc conf
+ remote_user: root
+ file:
+ src: /home/ubuntu/moon/moonv4/moon_orchestrator/conf
+ dest: /etc/moon
+ state: link
+
+- name: configure moon.conf
+ lineinfile:
+ dest: /etc/moon/moon.conf
+ regexp: '{{ item.regexp }}'
+ line: '{{ item.line }}'
+ with_items:
+ - regexp: '^dist_dir=.*'
+ line: 'dist_dir=/home/ubuntu/moon/moonv4/moon_orchestrator/dist'
+ - regexp: '^password=.*'
+ line: 'password={{ ADMIN_PASS }}'
+
+- name: configure moon.conf for moon slave
+ lineinfile:
+ dest: /etc/moon/moon.conf
+ regexp: '{{ item.regexp }}'
+ line: '{{ item.line }}'
+ with_items:
+ - regexp: '^slave_name=.*'
+ line: 'slave_name={{ moon_slave_name }}'
+ - regexp: '^master_url=.*'
+ line: 'master_url=rabbit://moon:p4sswOrd1@{{ moon_master_ip }}/moon'
+ - regexp: '^master_login.*'
+ line: 'master_login=moon'
+ - regexp: '^master_password=.*'
+ line: 'master_password=p4sswOrd1'
+ when: moon_slave == "Enable"
+
+- name: list container netwroks
+ shell: docker network ls --format \{\{.Name\}\}
+ register: result
+
+- name: dump template generated file content
+ debug:
+ msg: "{{ result.stdout }}"
+
+- name: create moon network
+ shell: docker network create -d bridge --subnet=172.18.0.0/16 --gateway=172.18.0.1 moon
+ when: result.stdout.find("moon") == -1
+
+- name: list containers
+ shell: docker ps --format \{\{.Names\}\}
+ register: result
+
+- name: start rabbitmq
+ shell: |
+ docker run -dti \
+ --net=moon \
+ --hostname messenger \
+ --name messenger \
+ --link messenger:messenger \
+ -e RABBITMQ_DEFAULT_USER=moon \
+ -e RABBITMQ_DEFAULT_PASS=p4sswOrd1 \
+ -e RABBITMQ_NODENAME=rabbit@messenger \
+ -e RABBITMQ_DEFAULT_VHOST=moon \
+ -p 5671:5671 \
+ -p 5672:5672 \
+ rabbitmq:3-management
+ when: result.stdout.find("messenger") == -1
+
+- name: dump template generated file content
+ debug:
+ msg: "{{ result.stdout }}"
+
+- name: start mysql
+ shell: docker run -dti \
+ --net=moon \
+ --hostname db \
+ --name db \
+ -e MYSQL_ROOT_PASSWORD=p4sswOrd1 \
+ -e MYSQL_DATABASE=moon \
+ -e MYSQL_USER=moon \
+ -e MYSQL_PASSWORD=p4sswOrd1 \
+ -p 3306:3306 \
+ mysql:latest
+ when: result.stdout.find("db") == -1
+
+- name: set messenger and db hosts name
+ remote_user: root
+ lineinfile:
+ dest: /etc/hosts
+ regexp: 'messenger'
+ line: '127.0.0.1 messenger db'
+
+- name: set keystone hosts name
+ remote_user: root
+ lineinfile:
+ dest: /etc/hosts
+ regexp: 'keystone'
+ line: '{{ internal_vip.ip }} keystone'
+
+- name: update pip
+ remote_user: root
+ shell: pip3 install pip --upgrade
+
+- name: run moon build_all.sh
+ shell: |
+ export MOON_HOME=/home/ubuntu/moon/moonv4;
+ cd ${MOON_HOME}/bin;
+ source build_all.sh;
+ args:
+ executable: /bin/bash
+
+- name: modify moon start.sh
+ lineinfile:
+ dest: /home/ubuntu/moon/moonv4/bin/start.sh
+ state: absent
+ regexp: "{{ item }}"
+ with_items:
+ - "^sleep 20s"
+ - "^.*starting Keystone container.*"
+ - "^docker run .*keystone.*"
+
+- name: run moon start.sh
+ shell: |
+ export MOON_HOME=/home/ubuntu/moon/moonv4;
+ cd ${MOON_HOME}/moon_orchestrator;
+ if [ ! -d tests/venv ]; then
+ pyvenv tests/venv;
+ source tests/venv/bin/activate;
+ pip3 install -r requirements.txt --upgrade;
+ pip3 install dist/moon_db-0.1.0.tar.gz --upgrade;
+ pip3 install dist/moon_utilities-0.1.0.tar.gz --upgrade;
+ pip3 install . --upgrade;
+ cd ../moon_db/;
+ pip3 install -r requirements.txt --upgrade;
+ else
+ source tests/venv/bin/activate;
+ fi
+ killall -9 /home/ubuntu/moon/moonv4/moon_orchestrator/tests/venv/bin/python3.5
+ sleep 3
+ ( ( nohup ../bin/start.sh > /tmp/start.log 2>&1 ) & );
+ args:
+ executable: /bin/bash
+
+- name: fetch template generated file content
+ shell: whoami
+ register: template_gen_out
+
+- name: dump template generated file content
+ debug:
+ msg: "{{ template_gen_out.stdout }}"
diff --git a/deploy/adapters/ansible/roles/moon/templates/admin-openrc.sh b/deploy/adapters/ansible/roles/moon/templates/admin-openrc.sh
deleted file mode 100644
index 6ba620ff..00000000
--- a/deploy/adapters/ansible/roles/moon/templates/admin-openrc.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-# Verify the Identity Service installation
-export OS_PASSWORD={{ ADMIN_PASS }}
-export OS_TENANT_NAME=admin
-export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
-export OS_USERNAME=admin
-export OS_VOLUME_API_VERSION=2
-
diff --git a/deploy/adapters/ansible/roles/moon/templates/api-paste.ini b/deploy/adapters/ansible/roles/moon/templates/api-paste.ini
deleted file mode 100644
index f99689b7..00000000
--- a/deploy/adapters/ansible/roles/moon/templates/api-paste.ini
+++ /dev/null
@@ -1,106 +0,0 @@
-############
-# Metadata #
-############
-[composite:metadata]
-use = egg:Paste#urlmap
-/: meta
-
-[pipeline:meta]
-pipeline = cors metaapp
-
-[app:metaapp]
-paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
-
-#############
-# OpenStack #
-#############
-
-[composite:osapi_compute]
-use = call:nova.api.openstack.urlmap:urlmap_factory
-/: oscomputeversions
-# starting in Liberty the v21 implementation replaces the v2
-# implementation and is suggested that you use it as the default. If
-# this causes issues with your clients you can rollback to the
-# *frozen* v2 api by commenting out the above stanza and using the
-# following instead::
-# /v2: openstack_compute_api_legacy_v2
-# if rolling back to v2 fixes your issue please file a critical bug
-# at - https://bugs.launchpad.net/nova/+bugs
-#
-# v21 is an exactly feature match for v2, except it has more stringent
-# input validation on the wsgi surface (prevents fuzzing early on the
-# API). It also provides new features via API microversions which are
-# opt into for clients. Unaware clients will receive the same frozen
-# v2 API feature set, but with some relaxed validation
-/v2: openstack_compute_api_v21_legacy_v2_compatible
-/v2.1: openstack_compute_api_v21
-
-# NOTE: this is deprecated in favor of openstack_compute_api_v21_legacy_v2_compatible
-[composite:openstack_compute_api_legacy_v2]
-use = call:nova.api.auth:pipeline_factory
-noauth2 = cors compute_req_id faultwrap sizelimit noauth2 legacy_ratelimit osapi_compute_app_legacy_v2
-keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext moon legacy_ratelimit osapi_compute_app_legacy_v2
-keystone_nolimit = cors compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_legacy_v2
-
-[composite:openstack_compute_api_v21]
-use = call:nova.api.auth:pipeline_factory_v21
-noauth2 = cors compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
-keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v21
-
-[composite:openstack_compute_api_v21_legacy_v2_compatible]
-use = call:nova.api.auth:pipeline_factory_v21
-noauth2 = cors compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21
-keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext legacy_v2_compatible osapi_compute_app_v21
-
-[filter:request_id]
-paste.filter_factory = oslo_middleware:RequestId.factory
-
-[filter:compute_req_id]
-paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
-
-[filter:faultwrap]
-paste.filter_factory = nova.api.openstack:FaultWrapper.factory
-
-[filter:noauth2]
-paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
-
-[filter:legacy_ratelimit]
-paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
-
-[filter:sizelimit]
-paste.filter_factory = oslo_middleware:RequestBodySizeLimiter.factory
-
-[filter:legacy_v2_compatible]
-paste.filter_factory = nova.api.openstack:LegacyV2CompatibleWrapper.factory
-
-[app:osapi_compute_app_legacy_v2]
-paste.app_factory = nova.api.openstack.compute:APIRouter.factory
-
-[app:osapi_compute_app_v21]
-paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory
-
-[pipeline:oscomputeversions]
-pipeline = faultwrap oscomputeversionapp
-
-[app:oscomputeversionapp]
-paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
-
-##########
-# Shared #
-##########
-
-[filter:cors]
-paste.filter_factory = oslo_middleware.cors:filter_factory
-oslo_config_project = nova
-
-[filter:keystonecontext]
-paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
-
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-
-[filter:moon]
-paste.filter_factory = keystonemiddleware.moon_agent:filter_factory
-authz_login=admin
-authz_password=password
-logfile=/var/log/moon/keystonemiddleware.log
diff --git a/deploy/adapters/ansible/roles/moon/templates/demo-openrc.sh b/deploy/adapters/ansible/roles/moon/templates/demo-openrc.sh
deleted file mode 100644
index 5807e868..00000000
--- a/deploy/adapters/ansible/roles/moon/templates/demo-openrc.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-export OS_USERNAME=demo
-export OS_PASSWORD={{ DEMO_PASS }}
-export OS_TENANT_NAME=demo
-export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
-
diff --git a/deploy/adapters/ansible/roles/moon/templates/keystone-paste.ini b/deploy/adapters/ansible/roles/moon/templates/keystone-paste.ini
deleted file mode 100644
index cd9ebede..00000000
--- a/deploy/adapters/ansible/roles/moon/templates/keystone-paste.ini
+++ /dev/null
@@ -1,96 +0,0 @@
-# Keystone PasteDeploy configuration file.
-
-[pipeline:moon_pipeline]
-pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension moon_service
-
-[app:moon_service]
-use = egg:keystone#moon_service
-
-[filter:debug]
-use = egg:oslo.middleware#debug
-
-[filter:request_id]
-use = egg:oslo.middleware#request_id
-
-[filter:build_auth_context]
-use = egg:keystone#build_auth_context
-
-[filter:token_auth]
-use = egg:keystone#token_auth
-
-[filter:admin_token_auth]
-# This is deprecated in the M release and will be removed in the O release.
-# Use `keystone-manage bootstrap` and remove this from the pipelines below.
-use = egg:keystone#admin_token_auth
-
-[filter:json_body]
-use = egg:keystone#json_body
-
-[filter:cors]
-use = egg:oslo.middleware#cors
-oslo_config_project = keystone
-
-[filter:ec2_extension]
-use = egg:keystone#ec2_extension
-
-[filter:ec2_extension_v3]
-use = egg:keystone#ec2_extension_v3
-
-[filter:s3_extension]
-use = egg:keystone#s3_extension
-
-[filter:url_normalize]
-use = egg:keystone#url_normalize
-
-[filter:sizelimit]
-use = egg:oslo.middleware#sizelimit
-
-[app:public_service]
-use = egg:keystone#public_service
-
-[app:service_v3]
-use = egg:keystone#service_v3
-
-[app:admin_service]
-use = egg:keystone#admin_service
-
-[pipeline:public_api]
-# The last item in this pipeline must be public_service or an equivalent
-# application. It cannot be a filter.
-pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension public_service
-
-[pipeline:admin_api]
-# The last item in this pipeline must be admin_service or an equivalent
-# application. It cannot be a filter.
-pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension s3_extension admin_service
-
-[pipeline:api_v3]
-# The last item in this pipeline must be service_v3 or an equivalent
-# application. It cannot be a filter.
-pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension_v3 s3_extension service_v3
-
-[app:public_version_service]
-use = egg:keystone#public_version_service
-
-[app:admin_version_service]
-use = egg:keystone#admin_version_service
-
-[pipeline:public_version_api]
-pipeline = cors sizelimit url_normalize public_version_service
-
-[pipeline:admin_version_api]
-pipeline = cors sizelimit url_normalize admin_version_service
-
-[composite:main]
-use = egg:Paste#urlmap
-/moon = moon_pipeline
-/v2.0 = public_api
-/v3 = api_v3
-/ = public_version_api
-
-[composite:admin]
-use = egg:Paste#urlmap
-/moon = moon_pipeline
-/v2.0 = admin_api
-/v3 = api_v3
-/ = admin_version_api
diff --git a/deploy/adapters/ansible/roles/moon/templates/keystone.conf b/deploy/adapters/ansible/roles/moon/templates/keystone.conf
deleted file mode 100644
index 649fc32c..00000000
--- a/deploy/adapters/ansible/roles/moon/templates/keystone.conf
+++ /dev/null
@@ -1,59 +0,0 @@
-{% set memcached_servers = [] %}
-{% set rabbitmq_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% set _ = rabbitmq_servers.append('%s:5672'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-{% set rabbitmq_servers = rabbitmq_servers|join(',') %}
-[DEFAULT]
-admin_token={{ ADMIN_TOKEN }}
-debug={{ DEBUG }}
-log_dir = /var/log/keystone
-
-[cache]
-backend=keystone.cache.memcache_pool
-memcache_servers={{ memcached_servers}}
-enabled=true
-
-[revoke]
-driver=sql
-expiration_buffer=3600
-caching=true
-
-[database]
-connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone?charset=utf8
-idle_timeout=30
-min_pool_size=5
-max_pool_size=120
-pool_timeout=30
-
-
-[identity]
-default_domain_id=default
-driver=sql
-
-[assignment]
-driver=sql
-
-[resource]
-driver=sql
-caching=true
-cache_time=3600
-
-[token]
-enforce_token_bind=permissive
-expiration=43200
-provider=uuid
-driver=sql
-caching=true
-cache_time=3600
-
-[eventlet_server]
-public_bind_host= {{ identity_host }}
-admin_bind_host= {{ identity_host }}
-
-[oslo_messaging_rabbit]
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-rabbit_hosts = {{ rabbitmq_servers }}
diff --git a/deploy/adapters/ansible/roles/moon/templates/proxy-server.conf b/deploy/adapters/ansible/roles/moon/templates/proxy-server.conf
deleted file mode 100644
index 9bea7a8e..00000000
--- a/deploy/adapters/ansible/roles/moon/templates/proxy-server.conf
+++ /dev/null
@@ -1,775 +0,0 @@
-{% set memcached_servers = [] %}
-{% for host in haproxy_hosts.values() %}
-{% set _ = memcached_servers.append('%s:11211'% host) %}
-{% endfor %}
-{% set memcached_servers = memcached_servers|join(',') %}
-[DEFAULT]
-bind_ip = {{ internal_ip }}
-bind_port = 8080
-# bind_timeout = 30
-# backlog = 4096
-swift_dir = /etc/swift
-user = swift
-
-# Enables exposing configuration settings via HTTP GET /info.
-# expose_info = true
-
-# Key to use for admin calls that are HMAC signed. Default is empty,
-# which will disable admin calls to /info.
-# admin_key = secret_admin_key
-#
-# Allows the ability to withhold sections from showing up in the public calls
-# to /info. You can withhold subsections by separating the dict level with a
-# ".". The following would cause the sections 'container_quotas' and 'tempurl'
-# to not be listed, and the key max_failed_deletes would be removed from
-# bulk_delete. Default value is 'swift.valid_api_versions' which allows all
-# registered features to be listed via HTTP GET /info except
-# swift.valid_api_versions information
-# disallowed_sections = swift.valid_api_versions, container_quotas, tempurl
-
-# Use an integer to override the number of pre-forked processes that will
-# accept connections. Should default to the number of effective cpu
-# cores in the system. It's worth noting that individual workers will
-# use many eventlet co-routines to service multiple concurrent requests.
-# workers = auto
-#
-# Maximum concurrent requests per worker
-# max_clients = 1024
-#
-# Set the following two lines to enable SSL. This is for testing only.
-# cert_file = /etc/swift/proxy.crt
-# key_file = /etc/swift/proxy.key
-#
-# expiring_objects_container_divisor = 86400
-# expiring_objects_account_name = expiring_objects
-#
-# You can specify default log routing here if you want:
-# log_name = swift
-# log_facility = LOG_LOCAL0
-# log_level = INFO
-# log_headers = false
-# log_address = /dev/log
-# The following caps the length of log lines to the value given; no limit if
-# set to 0, the default.
-# log_max_line_length = 0
-#
-# This optional suffix (default is empty) that would be appended to the swift transaction
-# id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
-# This is very useful when one is managing more than one swift cluster.
-# trans_id_suffix =
-#
-# comma separated list of functions to call to setup custom log handlers.
-# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
-# adapted_logger
-# log_custom_handlers =
-#
-# If set, log_udp_host will override log_address
-# log_udp_host =
-# log_udp_port = 514
-#
-# You can enable StatsD logging here:
-# log_statsd_host =
-# log_statsd_port = 8125
-# log_statsd_default_sample_rate = 1.0
-# log_statsd_sample_rate_factor = 1.0
-# log_statsd_metric_prefix =
-#
-# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
-# cors_allow_origin =
-# strict_cors_mode = True
-#
-# client_timeout = 60
-# eventlet_debug = false
-
-[pipeline:main]
-# This sample pipeline uses tempauth and is used for SAIO dev work and
-# testing. See below for a pipeline using keystone.
-#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
-pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging moon proxy-server
-
-# The following pipeline shows keystone integration. Comment out the one
-# above and uncomment this one. Additional steps for integrating keystone are
-# covered further below in the filter sections for authtoken and keystoneauth.
-#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
-
-[app:proxy-server]
-use = egg:swift#proxy
-account_autocreate = True
-# You can override the default log routing for this app here:
-# set log_name = proxy-server
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_address = /dev/log
-#
-# log_handoffs = true
-# recheck_account_existence = 60
-# recheck_container_existence = 60
-# object_chunk_size = 65536
-# client_chunk_size = 65536
-#
-# How long the proxy server will wait on responses from the a/c/o servers.
-# node_timeout = 10
-#
-# How long the proxy server will wait for an initial response and to read a
-# chunk of data from the object servers while serving GET / HEAD requests.
-# Timeouts from these requests can be recovered from so setting this to
-# something lower than node_timeout would provide quicker error recovery
-# while allowing for a longer timeout for non-recoverable requests (PUTs).
-# Defaults to node_timeout, should be overriden if node_timeout is set to a
-# high number to prevent client timeouts from firing before the proxy server
-# has a chance to retry.
-# recoverable_node_timeout = node_timeout
-#
-# conn_timeout = 0.5
-#
-# How long to wait for requests to finish after a quorum has been established.
-# post_quorum_timeout = 0.5
-#
-# How long without an error before a node's error count is reset. This will
-# also be how long before a node is reenabled after suppression is triggered.
-# error_suppression_interval = 60
-#
-# How many errors can accumulate before a node is temporarily ignored.
-# error_suppression_limit = 10
-#
-# If set to 'true' any authorized user may create and delete accounts; if
-# 'false' no one, even authorized, can.
-# allow_account_management = false
-#
-# Set object_post_as_copy = false to turn on fast posts where only the metadata
-# changes are stored anew and the original data file is kept in place. This
-# makes for quicker posts.
-# object_post_as_copy = true
-#
-# If set to 'true' authorized accounts that do not yet exist within the Swift
-# cluster will be automatically created.
-# account_autocreate = false
-#
-# If set to a positive value, trying to create a container when the account
-# already has at least this maximum containers will result in a 403 Forbidden.
-# Note: This is a soft limit, meaning a user might exceed the cap for
-# recheck_account_existence before the 403s kick in.
-# max_containers_per_account = 0
-#
-# This is a comma separated list of account hashes that ignore the
-# max_containers_per_account cap.
-# max_containers_whitelist =
-#
-# Comma separated list of Host headers to which the proxy will deny requests.
-# deny_host_headers =
-#
-# Prefix used when automatically creating accounts.
-# auto_create_account_prefix = .
-#
-# Depth of the proxy put queue.
-# put_queue_depth = 10
-#
-# Storage nodes can be chosen at random (shuffle), by using timing
-# measurements (timing), or by using an explicit match (affinity).
-# Using timing measurements may allow for lower overall latency, while
-# using affinity allows for finer control. In both the timing and
-# affinity cases, equally-sorting nodes are still randomly chosen to
-# spread load.
-# The valid values for sorting_method are "affinity", "shuffle", or "timing".
-# sorting_method = shuffle
-#
-# If the "timing" sorting_method is used, the timings will only be valid for
-# the number of seconds configured by timing_expiry.
-# timing_expiry = 300
-#
-# By default on a GET/HEAD swift will connect to a storage node one at a time
-# in a single thread. There is smarts in the order they are hit however. If you
-# turn on concurrent_gets below, then replica count threads will be used.
-# With addition of the concurrency_timeout option this will allow swift to send
-# out GET/HEAD requests to the storage nodes concurrently and answer with the
-# first to respond. With an EC policy the parameter only affects HEAD requests.
-# concurrent_gets = off
-#
-# This parameter controls how long to wait before firing off the next
-# concurrent_get thread. A value of 0 would be fully concurrent, any other
-# number will stagger the firing of the threads. This number should be
-# between 0 and node_timeout. The default is what ever you set for the
-# conn_timeout parameter.
-# concurrency_timeout = 0.5
-#
-# Set to the number of nodes to contact for a normal request. You can use
-# '* replicas' at the end to have it use the number given times the number of
-# replicas for the ring being used for the request.
-# request_node_count = 2 * replicas
-#
-# Which backend servers to prefer on reads. Format is r<N> for region
-# N or r<N>z<M> for region N, zone M. The value after the equals is
-# the priority; lower numbers are higher priority.
-#
-# Example: first read from region 1 zone 1, then region 1 zone 2, then
-# anything in region 2, then everything else:
-# read_affinity = r1z1=100, r1z2=200, r2=300
-# Default is empty, meaning no preference.
-# read_affinity =
-#
-# Which backend servers to prefer on writes. Format is r<N> for region
-# N or r<N>z<M> for region N, zone M. If this is set, then when
-# handling an object PUT request, some number (see setting
-# write_affinity_node_count) of local backend servers will be tried
-# before any nonlocal ones.
-#
-# Example: try to write to regions 1 and 2 before writing to any other
-# nodes:
-# write_affinity = r1, r2
-# Default is empty, meaning no preference.
-# write_affinity =
-#
-# The number of local (as governed by the write_affinity setting)
-# nodes to attempt to contact first, before any non-local ones. You
-# can use '* replicas' at the end to have it use the number given
-# times the number of replicas for the ring being used for the
-# request.
-# write_affinity_node_count = 2 * replicas
-#
-# These are the headers whose values will only be shown to swift_owners. The
-# exact definition of a swift_owner is up to the auth system in use, but
-# usually indicates administrative responsibilities.
-# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control
-
-[filter:tempauth]
-use = egg:swift#tempauth
-# You can override the default log routing for this filter here:
-# set log_name = tempauth
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-#
-# The reseller prefix will verify a token begins with this prefix before even
-# attempting to validate it. Also, with authorization, only Swift storage
-# accounts with this prefix will be authorized by this middleware. Useful if
-# multiple auth systems are in use for one Swift cluster.
-# The reseller_prefix may contain a comma separated list of items. The first
-# item is used for the token as mentioned above. If second and subsequent
-# items exist, the middleware will handle authorization for an account with
-# that prefix. For example, for prefixes "AUTH, SERVICE", a path of
-# /v1/SERVICE_account is handled the same as /v1/AUTH_account. If an empty
-# (blank) reseller prefix is required, it must be first in the list. Two
-# single quote characters indicates an empty (blank) reseller prefix.
-# reseller_prefix = AUTH
-
-#
-# The require_group parameter names a group that must be presented by
-# either X-Auth-Token or X-Service-Token. Usually this parameter is
-# used only with multiple reseller prefixes (e.g., SERVICE_require_group=blah).
-# By default, no group is needed. Do not use .admin.
-# require_group =
-
-# The auth prefix will cause requests beginning with this prefix to be routed
-# to the auth subsystem, for granting tokens, etc.
-# auth_prefix = /auth/
-# token_life = 86400
-#
-# This allows middleware higher in the WSGI pipeline to override auth
-# processing, useful for middleware such as tempurl and formpost. If you know
-# you're not going to use such middleware and you want a bit of extra security,
-# you can set this to false.
-# allow_overrides = true
-#
-# This specifies what scheme to return with storage urls:
-# http, https, or default (chooses based on what the server is running as)
-# This can be useful with an SSL load balancer in front of a non-SSL server.
-# storage_url_scheme = default
-#
-# Lastly, you need to list all the accounts/users you want here. The format is:
-# user_<account>_<user> = <key> [group] [group] [...] [storage_url]
-# or if you want underscores in <account> or <user>, you can base64 encode them
-# (with no equal signs) and use this format:
-# user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url]
-# There are special groups of:
-# .reseller_admin = can do anything to any account for this auth
-# .admin = can do anything within the account
-# If neither of these groups are specified, the user can only access containers
-# that have been explicitly allowed for them by a .admin or .reseller_admin.
-# The trailing optional storage_url allows you to specify an alternate url to
-# hand back to the user upon authentication. If not specified, this defaults to
-# $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve
-# to what the requester would need to use to reach this host.
-# Here are example entries, required for running the tests:
-user_admin_admin = admin .admin .reseller_admin
-user_test_tester = testing .admin
-user_test2_tester2 = testing2 .admin
-user_test_tester3 = testing3
-user_test5_tester5 = testing5 service
-
-# To enable Keystone authentication you need to have the auth token
-# middleware first to be configured. Here is an example below, please
-# refer to the keystone's documentation for details about the
-# different settings.
-#
-# You'll also need to have the keystoneauth middleware enabled and have it in
-# your main pipeline, as show in the sample pipeline at the top of this file.
-#
-# Following parameters are known to work with keystonemiddleware v2.3.0
-# (above v2.0.0), but checking the latest information in the wiki page[1]
-# is recommended.
-# 1. http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html#configuration
-#
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-auth_uri = http://{{ internal_vip.ip }}:5000
-auth_url = http://{{ internal_vip.ip }}:35357
-identity_uri = http://{{ internal_vip.ip }}:35357
-memcached_servers = {{ memcached_servers }}
-#auth_plugin = password
-auth_type = password
-project_domain_id = default
-user_domain_id = default
-project_name = service
-username = swift
-password = {{ CINDER_PASS }}
-delay_auth_decision = True
-admin_user=admin
-admin_password={{ ADMIN_PASS }}
-admin_token={{ ADMIN_TOKEN }}
-#
-# delay_auth_decision defaults to False, but leaving it as false will
-# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from
-# working. This value must be explicitly set to True.
-# delay_auth_decision = False
-#
-# cache = swift.cache
-# include_service_catalog = False
-#
-[filter:keystoneauth]
-use = egg:swift#keystoneauth
-operator_roles = admin,user
-# The reseller_prefix option lists account namespaces that this middleware is
-# responsible for. The prefix is placed before the Keystone project id.
-# For example, for project 12345678, and prefix AUTH, the account is
-# named AUTH_12345678 (i.e., path is /v1/AUTH_12345678/...).
-# Several prefixes are allowed by specifying a comma-separated list
-# as in: "reseller_prefix = AUTH, SERVICE". The empty string indicates a
-# single blank/empty prefix. If an empty prefix is required in a list of
-# prefixes, a value of '' (two single quote characters) indicates a
-# blank/empty prefix. Except for the blank/empty prefix, an underscore ('_')
-# character is appended to the value unless already present.
-# reseller_prefix = AUTH
-#
-# The user must have at least one role named by operator_roles on a
-# project in order to create, delete and modify containers and objects
-# and to set and read privileged headers such as ACLs.
-# If there are several reseller prefix items, you can prefix the
-# parameter so it applies only to those accounts (for example
-# the parameter SERVICE_operator_roles applies to the /v1/SERVICE_<project>
-# path). If you omit the prefix, the option applies to all reseller
-# prefix items. For the blank/empty prefix, prefix with '' (do not put
-# underscore after the two single quote characters).
-# operator_roles = admin, swiftoperator
-#
-# The reseller admin role has the ability to create and delete accounts
-# reseller_admin_role = ResellerAdmin
-#
-# This allows middleware higher in the WSGI pipeline to override auth
-# processing, useful for middleware such as tempurl and formpost. If you know
-# you're not going to use such middleware and you want a bit of extra security,
-# you can set this to false.
-# allow_overrides = true
-#
-# If the service_roles parameter is present, an X-Service-Token must be
-# present in the request that when validated, grants at least one role listed
-# in the parameter. The X-Service-Token may be scoped to any project.
-# If there are several reseller prefix items, you can prefix the
-# parameter so it applies only to those accounts (for example
-# the parameter SERVICE_service_roles applies to the /v1/SERVICE_<project>
-# path). If you omit the prefix, the option applies to all reseller
-# prefix items. For the blank/empty prefix, prefix with '' (do not put
-# underscore after the two single quote characters).
-# By default, no service_roles are required.
-# service_roles =
-#
-# For backwards compatibility, keystoneauth will match names in cross-tenant
-# access control lists (ACLs) when both the requesting user and the tenant
-# are in the default domain i.e the domain to which existing tenants are
-# migrated. The default_domain_id value configured here should be the same as
-# the value used during migration of tenants to keystone domains.
-# default_domain_id = default
-#
-# For a new installation, or an installation in which keystone projects may
-# move between domains, you should disable backwards compatible name matching
-# in ACLs by setting allow_names_in_acls to false:
-# allow_names_in_acls = true
-
-[filter:healthcheck]
-use = egg:swift#healthcheck
-# An optional filesystem path, which if present, will cause the healthcheck
-# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE".
-# This facility may be used to temporarily remove a Swift node from a load
-# balancer pool during maintenance or upgrade (remove the file to allow the
-# node back into the load balancer pool).
-# disable_path =
-
-[filter:cache]
-use = egg:swift#memcache
-memcache_servers = {{ memcached_servers }}
-# You can override the default log routing for this filter here:
-# set log_name = cache
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-#
-# If not set here, the value for memcache_servers will be read from
-# memcache.conf (see memcache.conf-sample) or lacking that file, it will
-# default to the value below. You can specify multiple servers separated with
-# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 (IPv6 addresses must
-# follow rfc3986 section-3.2.2, i.e. [::1]:11211)
-# memcache_servers = 127.0.0.1:11211
-#
-# Sets how memcache values are serialized and deserialized:
-# 0 = older, insecure pickle serialization
-# 1 = json serialization but pickles can still be read (still insecure)
-# 2 = json serialization only (secure and the default)
-# If not set here, the value for memcache_serialization_support will be read
-# from /etc/swift/memcache.conf (see memcache.conf-sample).
-# To avoid an instant full cache flush, existing installations should
-# upgrade with 0, then set to 1 and reload, then after some time (24 hours)
-# set to 2 and reload.
-# In the future, the ability to use pickle serialization will be removed.
-# memcache_serialization_support = 2
-#
-# Sets the maximum number of connections to each memcached server per worker
-# memcache_max_connections = 2
-#
-# More options documented in memcache.conf-sample
-
-[filter:ratelimit]
-use = egg:swift#ratelimit
-# You can override the default log routing for this filter here:
-# set log_name = ratelimit
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-#
-# clock_accuracy should represent how accurate the proxy servers' system clocks
-# are with each other. 1000 means that all the proxies' clock are accurate to
-# each other within 1 millisecond. No ratelimit should be higher than the
-# clock accuracy.
-# clock_accuracy = 1000
-#
-# max_sleep_time_seconds = 60
-#
-# log_sleep_time_seconds of 0 means disabled
-# log_sleep_time_seconds = 0
-#
-# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
-# rate_buffer_seconds = 5
-#
-# account_ratelimit of 0 means disabled
-# account_ratelimit = 0
-
-# DEPRECATED- these will continue to work but will be replaced
-# by the X-Account-Sysmeta-Global-Write-Ratelimit flag.
-# Please see ratelimiting docs for details.
-# these are comma separated lists of account names
-# account_whitelist = a,b
-# account_blacklist = c,d
-
-# with container_limit_x = r
-# for containers of size x limit write requests per second to r. The container
-# rate will be linearly interpolated from the values given. With the values
-# below, a container of size 5 will get a rate of 75.
-# container_ratelimit_0 = 100
-# container_ratelimit_10 = 50
-# container_ratelimit_50 = 20
-
-# Similarly to the above container-level write limits, the following will limit
-# container GET (listing) requests.
-# container_listing_ratelimit_0 = 100
-# container_listing_ratelimit_10 = 50
-# container_listing_ratelimit_50 = 20
-
-[filter:domain_remap]
-use = egg:swift#domain_remap
-# You can override the default log routing for this filter here:
-# set log_name = domain_remap
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-#
-# storage_domain = example.com
-# path_root = v1
-
-# Browsers can convert a host header to lowercase, so check that reseller
-# prefix on the account is the correct case. This is done by comparing the
-# items in the reseller_prefixes config option to the found prefix. If they
-# match except for case, the item from reseller_prefixes will be used
-# instead of the found reseller prefix. When none match, the default reseller
-# prefix is used. When no default reseller prefix is configured, any request
-# with an account prefix not in that list will be ignored by this middleware.
-# reseller_prefixes = AUTH
-# default_reseller_prefix =
-
-[filter:catch_errors]
-use = egg:swift#catch_errors
-# You can override the default log routing for this filter here:
-# set log_name = catch_errors
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-
-[filter:cname_lookup]
-# Note: this middleware requires python-dnspython
-use = egg:swift#cname_lookup
-# You can override the default log routing for this filter here:
-# set log_name = cname_lookup
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-#
-# Specify the storage_domain that match your cloud, multiple domains
-# can be specified separated by a comma
-# storage_domain = example.com
-#
-# lookup_depth = 1
-
-# Note: Put staticweb just after your auth filter(s) in the pipeline
-[filter:staticweb]
-use = egg:swift#staticweb
-# You can override the default log routing for this filter here:
-# set log_name = staticweb
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-
-# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline
-[filter:tempurl]
-use = egg:swift#tempurl
-# The methods allowed with Temp URLs.
-# methods = GET HEAD PUT POST DELETE
-#
-# The headers to remove from incoming requests. Simply a whitespace delimited
-# list of header names and names can optionally end with '*' to indicate a
-# prefix match. incoming_allow_headers is a list of exceptions to these
-# removals.
-# incoming_remove_headers = x-timestamp
-#
-# The headers allowed as exceptions to incoming_remove_headers. Simply a
-# whitespace delimited list of header names and names can optionally end with
-# '*' to indicate a prefix match.
-# incoming_allow_headers =
-#
-# The headers to remove from outgoing responses. Simply a whitespace delimited
-# list of header names and names can optionally end with '*' to indicate a
-# prefix match. outgoing_allow_headers is a list of exceptions to these
-# removals.
-# outgoing_remove_headers = x-object-meta-*
-#
-# The headers allowed as exceptions to outgoing_remove_headers. Simply a
-# whitespace delimited list of header names and names can optionally end with
-# '*' to indicate a prefix match.
-# outgoing_allow_headers = x-object-meta-public-*
-
-# Note: Put formpost just before your auth filter(s) in the pipeline
-[filter:formpost]
-use = egg:swift#formpost
-
-# Note: Just needs to be placed before the proxy-server in the pipeline.
-[filter:name_check]
-use = egg:swift#name_check
-# forbidden_chars = '"`<>
-# maximum_length = 255
-# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$
-
-[filter:list-endpoints]
-use = egg:swift#list_endpoints
-# list_endpoints_path = /endpoints/
-
-[filter:proxy-logging]
-use = egg:swift#proxy_logging
-# If not set, logging directives from [DEFAULT] without "access_" will be used
-# access_log_name = swift
-# access_log_facility = LOG_LOCAL0
-# access_log_level = INFO
-# access_log_address = /dev/log
-#
-# If set, access_log_udp_host will override access_log_address
-# access_log_udp_host =
-# access_log_udp_port = 514
-#
-# You can use log_statsd_* from [DEFAULT] or override them here:
-# access_log_statsd_host =
-# access_log_statsd_port = 8125
-# access_log_statsd_default_sample_rate = 1.0
-# access_log_statsd_sample_rate_factor = 1.0
-# access_log_statsd_metric_prefix =
-# access_log_headers = false
-#
-# If access_log_headers is True and access_log_headers_only is set only
-# these headers are logged. Multiple headers can be defined as comma separated
-# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime
-# access_log_headers_only =
-#
-# By default, the X-Auth-Token is logged. To obscure the value,
-# set reveal_sensitive_prefix to the number of characters to log.
-# For example, if set to 12, only the first 12 characters of the
-# token appear in the log. An unauthorized access of the log file
-# won't allow unauthorized usage of the token. However, the first
-# 12 or so characters is unique enough that you can trace/debug
-# token usage. Set to 0 to suppress the token completely (replaced
-# by '...' in the log).
-# Note: reveal_sensitive_prefix will not affect the value
-# logged with access_log_headers=True.
-# reveal_sensitive_prefix = 16
-#
-# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
-# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
-# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
-#
-# Note: The double proxy-logging in the pipeline is not a mistake. The
-# left-most proxy-logging is there to log requests that were handled in
-# middleware and never made it through to the right-most middleware (and
-# proxy server). Double logging is prevented for normal requests. See
-# proxy-logging docs.
-
-# Note: Put before both ratelimit and auth in the pipeline.
-[filter:bulk]
-use = egg:swift#bulk
-# max_containers_per_extraction = 10000
-# max_failed_extractions = 1000
-# max_deletes_per_request = 10000
-# max_failed_deletes = 1000
-
-# In order to keep a connection active during a potentially long bulk request,
-# Swift may return whitespace prepended to the actual response body. This
-# whitespace will be yielded no more than every yield_frequency seconds.
-# yield_frequency = 10
-
-# Note: The following parameter is used during a bulk delete of objects and
-# their container. This would frequently fail because it is very likely
-# that all replicated objects have not been deleted by the time the middleware got a
-# successful response. It can be configured the number of retries. And the
-# number of seconds to wait between each retry will be 1.5**retry
-
-# delete_container_retry_count = 0
-
-# Note: Put after auth and staticweb in the pipeline.
-[filter:slo]
-use = egg:swift#slo
-# max_manifest_segments = 1000
-# max_manifest_size = 2097152
-#
-# Rate limiting applies only to segments smaller than this size (bytes).
-# rate_limit_under_size = 1048576
-#
-# Start rate-limiting SLO segment serving after the Nth small segment of a
-# segmented object.
-# rate_limit_after_segment = 10
-#
-# Once segment rate-limiting kicks in for an object, limit segments served
-# to N per second. 0 means no rate-limiting.
-# rate_limit_segments_per_sec = 1
-#
-# Time limit on GET requests (seconds)
-# max_get_time = 86400
-
-# Note: Put after auth and staticweb in the pipeline.
-# If you don't put it in the pipeline, it will be inserted for you.
-[filter:dlo]
-use = egg:swift#dlo
-# Start rate-limiting DLO segment serving after the Nth segment of a
-# segmented object.
-# rate_limit_after_segment = 10
-#
-# Once segment rate-limiting kicks in for an object, limit segments served
-# to N per second. 0 means no rate-limiting.
-# rate_limit_segments_per_sec = 1
-#
-# Time limit on GET requests (seconds)
-# max_get_time = 86400
-
-# Note: Put after auth in the pipeline.
-[filter:container-quotas]
-use = egg:swift#container_quotas
-
-# Note: Put after auth in the pipeline.
-[filter:account-quotas]
-use = egg:swift#account_quotas
-
-[filter:gatekeeper]
-use = egg:swift#gatekeeper
-# Set this to false if you want to allow clients to set arbitrary X-Timestamps
-# on uploaded objects. This may be used to preserve timestamps when migrating
-# from a previous storage system, but risks allowing users to upload
-# difficult-to-delete data.
-# shunt_inbound_x_timestamp = true
-#
-# You can override the default log routing for this filter here:
-# set log_name = gatekeeper
-# set log_facility = LOG_LOCAL0
-# set log_level = INFO
-# set log_headers = false
-# set log_address = /dev/log
-
-[filter:container_sync]
-use = egg:swift#container_sync
-# Set this to false if you want to disallow any full url values to be set for
-# any new X-Container-Sync-To headers. This will keep any new full urls from
-# coming in, but won't change any existing values already in the cluster.
-# Updating those will have to be done manually, as knowing what the true realm
-# endpoint should be cannot always be guessed.
-# allow_full_urls = true
-# Set this to specify this clusters //realm/cluster as "current" in /info
-# current = //REALM/CLUSTER
-
-# Note: Put it at the beginning of the pipeline to profile all middleware. But
-# it is safer to put this after catch_errors, gatekeeper and healthcheck.
-[filter:xprofile]
-use = egg:swift#xprofile
-# This option enable you to switch profilers which should inherit from python
-# standard profiler. Currently the supported value can be 'cProfile',
-# 'eventlet.green.profile' etc.
-# profile_module = eventlet.green.profile
-#
-# This prefix will be used to combine process ID and timestamp to name the
-# profile data file. Make sure the executing user has permission to write
-# into this path (missing path segments will be created, if necessary).
-# If you enable profiling in more than one type of daemon, you must override
-# it with an unique value like: /var/log/swift/profile/proxy.profile
-# log_filename_prefix = /tmp/log/swift/profile/default.profile
-#
-# the profile data will be dumped to local disk based on above naming rule
-# in this interval.
-# dump_interval = 5.0
-#
-# Be careful, this option will enable profiler to dump data into the file with
-# time stamp which means there will be lots of files piled up in the directory.
-# dump_timestamp = false
-#
-# This is the path of the URL to access the mini web UI.
-# path = /__profile__
-#
-# Clear the data when the wsgi server shutdown.
-# flush_at_shutdown = false
-#
-# unwind the iterator of applications
-# unwind = false
-
-# Note: Put after slo, dlo in the pipeline.
-# If you don't put it in the pipeline, it will be inserted automatically.
-[filter:versioned_writes]
-use = egg:swift#versioned_writes
-# Enables using versioned writes middleware and exposing configuration
-# settings via HTTP GET /info.
-# WARNING: Setting this option bypasses the "allow_versions" option
-# in the container configuration file, which will be eventually
-# deprecated. See documentation for more details.
-# allow_versioned_writes = false
-
-
-[filter:moon]
-paste.filter_factory = keystonemiddleware.moon_agent:filter_factory
-authz_login=admin
-authz_password={{ ADMIN_PASS }}
-auth_host = {{ internal_vip.ip }}
-logfile=/var/log/moon/keystonemiddleware.log
diff --git a/deploy/adapters/ansible/roles/moon/templates/wsgi-keystone.conf.j2 b/deploy/adapters/ansible/roles/moon/templates/wsgi-keystone.conf.j2
deleted file mode 100644
index 64d864af..00000000
--- a/deploy/adapters/ansible/roles/moon/templates/wsgi-keystone.conf.j2
+++ /dev/null
@@ -1,46 +0,0 @@
- {% set work_threads = (ansible_processor_vcpus + 1) // 2 %}
-<VirtualHost {{ internal_ip }}:5000>
- WSGIDaemonProcess keystone-public processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
- WSGIProcessGroup keystone-public
- WSGIScriptAlias / /usr/bin/keystone-wsgi-public
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- <IfVersion >= 2.4>
- ErrorLogFormat "%{cu}t %M"
- </IfVersion>
- ErrorLog /var/log/{{ http_service_name }}/keystone.log
- CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined
-
- <Directory /usr/bin>
- <IfVersion >= 2.4>
- Require all granted
- </IfVersion>
- <IfVersion < 2.4>
- Order allow,deny
- Allow from all
- </IfVersion>
- </Directory>
-</VirtualHost>
-
-<VirtualHost {{ internal_ip }}:35357>
- WSGIDaemonProcess keystone-admin processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP}
- WSGIProcessGroup keystone-admin
- WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- <IfVersion >= 2.4>
- ErrorLogFormat "%{cu}t %M"
- </IfVersion>
- ErrorLog /var/log/{{ http_service_name }}/keystone.log
- CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined
-
- <Directory /usr/bin>
- <IfVersion >= 2.4>
- Require all granted
- </IfVersion>
- <IfVersion < 2.4>
- Order allow,deny
- Allow from all
- </IfVersion>
- </Directory>
-</VirtualHost>
diff --git a/deploy/adapters/ansible/roles/moon/vars/Debian.yml b/deploy/adapters/ansible/roles/moon/vars/Debian.yml
deleted file mode 100644
index 0da81179..00000000
--- a/deploy/adapters/ansible/roles/moon/vars/Debian.yml
+++ /dev/null
@@ -1,168 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-packages:
- - adduser
- - dbconfig-common
- - init-system-helpers
- - python-keystone
- - q-text-as-data
- - sqlite3
- - ssl-cert
- - debconf
- - lsb-base
- - python:any
- - libjs-sphinxdoc
- - python-pip
- - unzip
- - apache2
- - libapache2-mod-wsgi
-
-dependency_packages:
- - python-cryptography
- - python-dateutil
- - python-dogpile.cache
- - python-eventlet
- - python-greenlet
- - python-jsonschema
- - python-keystoneclient
- - python-keystonemiddleware
- - python-ldap
- - python-ldappool
- - python-lxml
- - python-memcache
- - python-migrate
- - python-msgpack
- - python-mysqldb
- - python-oauthlib
- - python-openstackclient
- - python-oslo.cache
- - python-oslo.concurrency
- - python-oslo.config
- - python-oslo.context
- - python-oslo.db
- - python-oslo.i18n
- - python-oslo.log
- - python-oslo.messaging
- - python-oslo.middleware
- - python-oslo.policy
- - python-oslo.serialization
- - python-oslo.service
- - python-oslo.utils
- - python-pam
- - python-passlib
- - python-paste
- - python-pastedeploy
- - python-pbr
- - python-pycadf
- - python-pymysql
- - python-pysaml2
- - python-pysqlite2
- - python-routes
- - python-six
- - python-sqlalchemy
- - python-stevedore
- - python-webob
- - unzip
- - python3-keystoneauth1
- - python3-keystoneclient
- - python3-oslo.config
- - python3-oslo.context
- - python3-oslo.i18n
- - python3-oslo.serialization
- - python-oslo.service
- - python-oslo.utils
- - python-pam
- - python-passlib
- - python-paste
- - python-pastedeploy
- - python-pbr
- - python-pycadf
- - python-pymysql
- - python-pysaml2
- - python-pysqlite2
- - python-routes
- - python-six
- - python-sqlalchemy
- - python-stevedore
- - python-webob
- - unzip
- - python3-keystoneauth1
- - python3-keystoneclient
- - python3-oslo.config
- - python3-oslo.context
- - python3-oslo.i18n
- - python3-oslo.serialization
- - python3-oslo.utils
- - apache2
- - libapache2-mod-wsgi
- - python3-cryptography
- - python3-dateutil
- - python3-dogpile.cache
- - python3-eventlet
- - python3-greenlet
- - python3-jsonschema
- - python3-keystoneclient
- - python3-keystonemiddleware
- - python3-lxml
- - python3-memcache
- - python3-migrate
- - python3-msgpack
- - python3-mysqldb
- - python3-oauthlib
- - python3-openstackclient
- - python3-oslo.cache
- - python3-oslo.concurrency
- - python3-oslo.config
- - python3-oslo.context
- - python3-oslo.db
- - python3-oslo.i18n
- - python3-oslo.log
- - python3-oslo.messaging
- - python3-oslo.middleware
- - python3-oslo.policy
- - python3-oslo.serialization
- - python3-oslo.service
- - python3-oslo.utils
- - python3-pam
- - python3-passlib
- - python3-paste
- - python3-pastedeploy
- - python3-pbr
- - python3-pycadf
- - python3-pymysql
- - python3-pysaml2
- - python3-routes
- - python3-six
- - python3-sqlalchemy
- - python3-stevedore
- - python3-webob
- - python3-oslo.service
- - python3-oslo.utils
- - python3-pam
- - python3-passlib
- - python3-paste
- - python3-pastedeploy
- - python3-pbr
- - python3-pycadf
- - python3-pymysql
- - python3-pysaml2
- - python3-routes
- - python3-six
- - python3-sqlalchemy
- - python3-stevedore
- - python3-webob
-
-services:
- - apache2
-
-
-apache_config_dir: /etc/apache2
-http_service_name: apache2
diff --git a/deploy/adapters/ansible/roles/moon/vars/main.yml b/deploy/adapters/ansible/roles/moon/vars/main.yml
deleted file mode 100644
index 6793c189..00000000
--- a/deploy/adapters/ansible/roles/moon/vars/main.yml
+++ /dev/null
@@ -1,165 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-packages_noarch: []
-
-services_noarch: []
-
-os_services:
- - name: keystone
- type: identity
- region: RegionOne
- description: "OpenStack Identity"
- publicurl: "http://{{ public_vip.ip }}:5000/v2.0"
- internalurl: "http://{{ internal_vip.ip }}:5000/v2.0"
- adminurl: "http://{{ internal_vip.ip }}:35357/v2.0"
-
- - name: glance
- type: image
- region: RegionOne
- description: "OpenStack Image Service"
- publicurl: "http://{{ public_vip.ip }}:9292"
- internalurl: "http://{{ internal_vip.ip }}:9292"
- adminurl: "http://{{ internal_vip.ip }}:9292"
-
- - name: nova
- type: compute
- region: RegionOne
- description: "OpenStack Compute"
- publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
-
- - name: neutron
- type: network
- region: RegionOne
- description: "OpenStack Networking"
- publicurl: "http://{{ public_vip.ip }}:9696"
- internalurl: "http://{{ internal_vip.ip }}:9696"
- adminurl: "http://{{ internal_vip.ip }}:9696"
-
- - name: ceilometer
- type: metering
- region: RegionOne
- description: "OpenStack Telemetry"
- publicurl: "http://{{ public_vip.ip }}:8777"
- internalurl: "http://{{ internal_vip.ip }}:8777"
- adminurl: "http://{{ internal_vip.ip }}:8777"
-
- - name: aodh
- type: alarming
- region: RegionOne
- description: "OpenStack Telemetry"
- publicurl: "http://{{ public_vip.ip }}:8042"
- internalurl: "http://{{ internal_vip.ip }}:8042"
- adminurl: "http://{{ internal_vip.ip }}:8042"
-
- - name: heat
- type: orchestration
- region: RegionOne
- description: "OpenStack Orchestration"
- publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
-
- - name: heat-cfn
- type: cloudformation
- region: RegionOne
- description: "OpenStack CloudFormation Orchestration"
- publicurl: "http://{{ public_vip.ip }}:8000/v1"
- internalurl: "http://{{ internal_vip.ip }}:8000/v1"
- adminurl: "http://{{ internal_vip.ip }}:8000/v1"
-
-# - name: cinder
-# type: volume
-# region: RegionOne
-# description: "OpenStack Block Storage"
-# publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s"
-# internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
-# adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
-
-# - name: cinderv2
-# type: volumev2
-# region: RegionOne
-# description: "OpenStack Block Storage v2"
-# publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s"
-# internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
-# adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
-
-# - name: swift
-# type: object-store
-# region: RegionOne
-# description: "OpenStack Object Storage"
-# publicurl: "http://{{ public_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-# internalurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-# adminurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-
-os_users:
- - user: admin
- password: "{{ ADMIN_PASS }}"
- email: admin@admin.com
- role: admin
- tenant: admin
- tenant_description: "Admin Tenant"
-
- - user: glance
- password: "{{ GLANCE_PASS }}"
- email: glance@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: nova
- password: "{{ NOVA_PASS }}"
- email: nova@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: keystone
- password: "{{ KEYSTONE_PASS }}"
- email: keystone@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: neutron
- password: "{{ NEUTRON_PASS }}"
- email: neutron@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: ceilometer
- password: "{{ CEILOMETER_PASS }}"
- email: ceilometer@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: cinder
- password: "{{ CINDER_PASS }}"
- email: cinder@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: heat
- password: "{{ HEAT_PASS }}"
- email: heat@admin.com
- role: admin
- tenant: service
- tenant_description: "Service Tenant"
-
- - user: demo
- password: ""
- email: heat@demo.com
- role: heat_stack_user
- tenant: demo
- tenant_description: "Demo Tenant"
diff --git a/deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml b/deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml
index d0ae2bdd..81f9194f 100644
--- a/deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/neutron-compute/vars/Debian.yml
@@ -11,8 +11,10 @@
packages:
- neutron-common
- neutron-plugin-ml2
- - openvswitch-switch-dpdk
+ - dkms
+ - openvswitch-datapath-dkms
- openvswitch-switch
+ - openvswitch-common
- neutron-plugin-openvswitch-agent
services:
diff --git a/deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml b/deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml
index 3597f587..6ffeab1d 100644
--- a/deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/neutron-network/vars/Debian.yml
@@ -9,8 +9,10 @@
---
packages:
- neutron-plugin-ml2
- - openvswitch-switch-dpdk
+ - dkms
+ - openvswitch-datapath-dkms
- openvswitch-switch
+ - openvswitch-common
- neutron-l3-agent
- neutron-dhcp-agent
- neutron-plugin-openvswitch-agent
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_04_install_pip_packages.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_04_install_pip_packages.yml
index fa418c6c..cc4ae992 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_04_install_pip_packages.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_04_install_pip_packages.yml
@@ -23,3 +23,7 @@
pip install {{ networking_odl_pkg_name }}
rm -rf {{ networking_odl_pkg_name }}
cd -
+
+- name: install networking-sfc
+ pip: name=networking-sfc state=present version=3.0.0
+ when: odl_sfc == "Enable"
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_08_configure_neutron.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_08_configure_neutron.yml
index 80443f1b..49396e35 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/01_08_configure_neutron.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/01_08_configure_neutron.yml
@@ -38,6 +38,13 @@
force_metadata "True";
when: odl_l3_agent == "Enable"
+- name: configure sfc
+ shell: |
+ crudini --set /etc/neutron/neutron.conf sfc drivers \
+ odl;
+ crudini --set /etc/neutron/neutron.conf flowclassifier drivers \
+ odl;
+ when: odl_sfc == "Enable"
- name: drop and recreate neutron database
shell: |
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
index 2a9622f9..56326563 100644
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/03_02_openvswitch_connect_opendaylight.yml
@@ -21,6 +21,34 @@
when: inventory_hostname in groups['odl']
ignore_errors: "True"
+- name: table configuration for netvirt
+ uri:
+ url: "{{ item.url }}"
+ method: PUT
+ user: "{{ odl_username }}"
+ password: "{{ odl_password }}"
+ body: "{{ item.body }}"
+ force_basic_auth: "yes"
+ body_format: json
+ status_code: 200, 201
+ with_items: "{{ conf_netvirt }}"
+ when: inventory_hostname == haproxy_hosts.keys()[0]
+ and odl_sfc == "Enable"
+
+- name: table configuration for sfc
+ uri:
+ url: "{{ item.url }}"
+ method: PUT
+ user: "{{ odl_username }}"
+ password: "{{ odl_password }}"
+ body: "{{ item.body }}"
+ force_basic_auth: "yes"
+ body_format: json
+ status_code: 200, 201
+ with_items: "{{ conf_sfc }}"
+ when: inventory_hostname == haproxy_hosts.keys()[0]
+ and odl_sfc == "Enable"
+
- name: set opendaylight as the manager
command: |
su -s /bin/sh -c "ovs-vsctl set-manager ptcp:6639:127.0.0.1 tcp:{{ internal_vip.ip }}:6640;"
diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Debian b/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Debian
index eebd47a6..743e7123 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Debian
+++ b/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Debian
@@ -41,7 +41,7 @@ featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.7/xml/features
#
# Comma separated list of features to install at startup
#
-featuresBoot=config,standard,region,package,kar,ssh,management,odl-restconf-all,odl-aaa-authn,odl-dlux-all,odl-ovsdb-openstack,odl-mdsal-apidocs,odl-dlux-core,odl-openflowplugin-nxm-extensions
+featuresBoot=config,standard,region,package,kar,ssh,management,odl-restconf-all,odl-aaa-authn,odl-dlux-all,odl-ovsdb-openstack,odl-mdsal-apidocs,odl-dlux-core,odl-openflowplugin-nxm-extensions,odl-ovsdb-sfc,odl-netvirt-openstack-sfc-translator-rest
#
# Defines if the boot features are started in asynchronous mode (in a dedicated thread)
diff --git a/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml b/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml
index a968e3ec..365247ef 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml
@@ -11,9 +11,19 @@ odl_username: admin
odl_password: admin
odl_api_port: 8181
-# odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/
-# opendaylight/integration/distribution-karaf/0.3.0-Lithium/
-# distribution-karaf-0.3.0-Lithium.tar.gz
+netvirt_table: netvirt-providers-config:netvirt-providers-config
+sfc_table: sfc-of-renderer:sfc-of-renderer-config
+
+conf_netvirt:
+ - url: http://{{ internal_vip.ip }}:{{ odl_api_port }}/restconf/config/{{ netvirt_table }}
+ body: {"netvirt-providers-config":{"table-offset":1}}
+
+# yamllint disable rule:line-length
+conf_sfc:
+ - url: http://{{ internal_vip.ip }}:{{ odl_api_port }}/restconf/config/{{ sfc_table }}
+ body: {"sfc-of-renderer-config":{"sfc-of-table-offset":"150", "sfc-of-app-egress-table-offset":"11"}}
+# yamllint enable rule:line-length
+
odl_pkg_url: distribution-karaf-0.5.2-Boron-SR2.tar.gz
odl_pkg_name: distribution-karaf-0.5.2-Boron-SR2.tar.gz
odl_home: "/opt/opendaylight/"
diff --git a/deploy/adapters/ansible/roles/onos_cluster/files/sfc_plugins.conf b/deploy/adapters/ansible/roles/onos_cluster/files/sfc_plugins.conf
new file mode 100644
index 00000000..ddef928d
--- /dev/null
+++ b/deploy/adapters/ansible/roles/onos_cluster/files/sfc_plugins.conf
@@ -0,0 +1,4 @@
+[DEFAULT]
+
+service_plugins = networking_sfc.services.sfc.plugin.SfcPlugin,networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin,onos_router
+
diff --git a/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_sfc_controller.yml b/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_sfc_controller.yml
index 26679373..a1337bdf 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_sfc_controller.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_sfc_controller.yml
@@ -1,5 +1,5 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+############################################################################
+# copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -36,7 +36,7 @@
- name: install onos required packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: packages
+ with_items: "{{ packages }}"
- name: download oracle-jdk8 package file
get_url:
@@ -93,6 +93,12 @@
- name: extract jar repository
command: su -s /bin/sh -c "tar xvf ~/.m2/repository.tar -C ~/.m2/"
+- name: copy onos.service
+ copy:
+ src: "{{ onos_service_file.src }}"
+ dest: "{{ onos_service_file.dst }}"
+ mode: 0755
+
- name: extract onos package
command: |
su -s /bin/sh -c "tar xzf /opt/{{ onos_pkg_name }} -C {{ onos_home }} \
@@ -104,18 +110,23 @@
echo 'export ONOS_USER=root' >> {{ onos_home }}/options;
mkdir {{ onos_home }}/var;
mkdir {{ onos_home }}/config;
- sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' {{ onos_home }}/init/onos.conf;
- cp -rf {{ onos_home }}/init/onos.conf /etc/init/;
- cp -rf {{ onos_home }}/init/onos.conf /etc/init.d/;
-- name: configure onos boot feature
- shell: >
- sed -i '/^featuresBoot=/c\featuresBoot={{ onos_boot_features }}' \
- {{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg;
+- name: create karaf config
+ template:
+ src: org.apache.karaf.features.cfg
+ dest: "{{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg"
+ owner: onos
+ group: onos
+ mode: 0775
- name: wait for config time
shell: "sleep 10"
+- name: chown onos directory and files
+ shell: >
+ chown -R onos:onos "{{ onos_home }}";
+ chown onos:onos "{{ onos_service_file.dst }}";
+
- name: start onos service
service: name=onos state=started enabled=yes
@@ -129,15 +140,39 @@
#########################################################################################
################################ ONOS connect with OpenStack #######################
#########################################################################################
+- name: copy sfc_plugins.conf
+ copy:
+ src: "{{ sfc_plugins.src }}"
+ dest: "{{ sfc_plugins.dst }}"
+ mode: 0755
+
+- name: Configure SFC driver
+ shell: crudini --merge /etc/neutron/neutron.conf < /opt/sfc_plugins.conf
+
+- name: delete sfc_plugins.conf
+ shell: rm -rf {{ sfc_plugins.dst }}
+
- name: Configure Neutron1
shell: >
- crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins \
- networking_sfc.services.sfc.plugin.SfcPlugin, \
- networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin,onos_router;
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers onos_ml2;
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers vxlan
+- name: configure metadata
+ shell: |
+ crudini --set /etc/neutron/dhcp_agent.ini DEFAULT \
+ enable_isolated_metadata "True";
+
+- name: force metadata
+ shell: |
+ crudini --set /etc/neutron/dhcp_agent.ini DEFAULT \
+ force_metadata "True";
+
+- name: configure vsctl for dhcp agent
+ shell: |
+ crudini --set /etc/neutron/dhcp_agent.ini OVS \
+ ovsdb_interface vsctl;
+
- name: Create ML2 Configuration File
template:
src: ml2_conf.sh
@@ -156,5 +191,8 @@
--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron;
su -s /bin/sh -c "neutron-db-manage --subproject networking-sfc upgrade head" neutron;
+- name: Restart neutron-dhcp-agent
+ service: name=neutron-dhcp-agent state=restarted
+
- name: Restart neutron-server
service: name=neutron-server state=restarted
diff --git a/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml b/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml
index eaf4ed53..3c3dd557 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml
@@ -16,6 +16,9 @@ jdk8_script_name: install_jdk8.tar
onos_driver: networking-onos.tar
onos_sfc_driver: networking-sfc.tar
repository: repository.tar
+sfc_plugins:
+ src: sfc_plugins.conf
+ dst: /opt/sfc_plugins.conf
onos_service_file:
src: onos.service
dst: /lib/systemd/system/onos.service
diff --git a/deploy/adapters/ansible/roles/tacker/vars/RedHat.yml b/deploy/adapters/ansible/roles/tacker/vars/RedHat.yml
index 59a4dbd9..fc944f1b 100755
--- a/deploy/adapters/ansible/roles/tacker/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/tacker/vars/RedHat.yml
@@ -12,3 +12,5 @@ packages:
- crudini
services: []
+
+heat_services: []
diff --git a/deploy/client.py b/deploy/client.py
index a0d70644..50e236f0 100644
--- a/deploy/client.py
+++ b/deploy/client.py
@@ -243,12 +243,15 @@ opts = [
cfg.StrOpt('odl_l3_agent',
help='odl l3 agent enable flag',
default='Disable'),
- cfg.StrOpt('moon',
- help='moon enable flag',
- default='Disable'),
+ cfg.StrOpt('moon_cfg',
+ help='moon config',
+ default='master:flag=Disable,slave:flag=Disable,slave:name=slave1,slave:master_ip=master_ip'), # noqa
cfg.StrOpt('onos_sfc',
help='onos_sfc enable flag',
default='Disable'),
+ cfg.StrOpt('odl_sfc',
+ help='odl_sfc enable flag',
+ default='Disable'),
]
CONF.register_cli_opts(opts)
@@ -718,6 +721,30 @@ class CompassClient(object):
'password': password
}
+ moon_cfgs = [
+ cfg
+ for cfg in CONF.moon_cfg.split(',')
+ if cfg
+ ]
+ LOG.info(
+ 'moon configure: %s', moon_cfgs
+ )
+ moon_cfg = {}
+ for cfg in moon_cfgs:
+ if ':' not in cfg:
+ raise Exception(
+ 'there is no : in cfg %s' % cfg # noqa
+ )
+ role, conf_pair = cfg.split(':', 1)
+ if '=' not in conf_pair:
+ raise Exception(
+ 'there is no = in %s configure pair' % conf_pair
+ )
+ key, value = conf_pair.split('=', 1)
+ moon_cfg[role] = {} if role not in moon_cfg else moon_cfg[role]
+ moon_cfg[role][key] = value
+ package_config["moon_cfg"] = moon_cfg
+
package_config["security"] = {"service_credentials": service_credential_cfg, # noqa
"console_credentials": console_credential_cfg} # noqa
@@ -751,12 +778,12 @@ class CompassClient(object):
package_config['enable_secgroup'] = (CONF.enable_secgroup == "true")
package_config['enable_fwaas'] = (CONF.enable_fwaas == "true")
package_config['enable_vpnaas'] = (CONF.enable_vpnaas == "true")
- package_config[
- 'odl_l3_agent'] = "Enable" if CONF.odl_l3_agent == "Enable" else "Disable" # noqa
- package_config[
- 'moon'] = "Enable" if CONF.moon == "Enable" else "Disable"
- package_config[
- 'onos_sfc'] = "Enable" if CONF.onos_sfc == "Enable" else "Disable"
+ package_config['odl_l3_agent'] = "Enable" \
+ if CONF.odl_l3_agent == "Enable" else "Disable"
+ package_config['onos_sfc'] = "Enable" \
+ if CONF.onos_sfc == "Enable" else "Disable"
+ package_config['odl_sfc'] = "Enable" \
+ if CONF.odl_sfc == "Enable" else "Disable"
status, resp = self.client.update_cluster_config(
cluster_id, package_config=package_config)
diff --git a/deploy/compass_conf/flavor/openstack_newton.conf b/deploy/compass_conf/flavor/openstack_newton.conf
index 2861ccdf..41c6cf2e 100755
--- a/deploy/compass_conf/flavor/openstack_newton.conf
+++ b/deploy/compass_conf/flavor/openstack_newton.conf
@@ -25,7 +25,7 @@ FLAVORS = [{
'display_name': 'HA-ansible-multinodes-newton',
'template': 'HA-ansible-multinodes.tmpl',
'roles': [
- 'controller', 'compute', 'ha', 'odl', 'onos', 'opencontrail', 'ceph', 'ceph-adm', 'ceph-mon', 'ceph-osd', 'sec-patch', 'ceph-osd-node'
+ 'controller', 'compute', 'ha', 'odl', 'onos', 'opencontrail', 'ceph', 'ceph-adm', 'ceph-mon', 'ceph-osd', 'sec-patch', 'ceph-osd-node', 'moon', 'none'
],
}]
diff --git a/deploy/compass_conf/package_metadata/openstack.conf b/deploy/compass_conf/package_metadata/openstack.conf
index b07efd4e..509cc5e0 100755
--- a/deploy/compass_conf/package_metadata/openstack.conf
+++ b/deploy/compass_conf/package_metadata/openstack.conf
@@ -113,14 +113,6 @@ METADATA = {
'default_value': 'Disable'
}
},
- 'moon': {
- '_self': {
- 'mapping_to': 'moon',
- 'field': 'anytype',
- 'is_required':False,
- 'default_value': 'Disable'
- }
- },
'ha_network': {
'_self': {
'mapping_to': 'ha_network',
@@ -463,5 +455,50 @@ METADATA = {
}
}
},
-
+ 'moon_cfg': {
+ '_self': {
+ 'required_in_whole_config': False,
+ 'mapping_to': 'moon_cfg'
+ },
+ 'master': {
+ '_self': {
+ 'required_in_whole_config': False,
+ 'mapping_to': 'master'
+ },
+ 'flag': {
+ '_self': {
+ 'is_required': False,
+ 'field': 'general',
+ 'mapping_to': 'flag'
+ }
+ },
+ },
+ 'slave': {
+ '_self': {
+ 'required_in_whole_config': False,
+ 'mapping_to': 'slave'
+ },
+ 'flag': {
+ '_self': {
+ 'is_required': False,
+ 'field': 'general',
+ 'mapping_to': 'flag'
+ }
+ },
+ 'name': {
+ '_self': {
+ 'is_required': False,
+ 'field': 'general',
+ 'mapping_to': 'name'
+ }
+ },
+ 'master_ip': {
+ '_self': {
+ 'is_required': False,
+ 'field': 'general',
+ 'mapping_to': 'master_ip'
+ }
+ },
+ }
+ }
}
diff --git a/deploy/compass_conf/role/openstack_newton_ansible.conf b/deploy/compass_conf/role/openstack_newton_ansible.conf
index 508ccf24..0558a063 100755
--- a/deploy/compass_conf/role/openstack_newton_ansible.conf
+++ b/deploy/compass_conf/role/openstack_newton_ansible.conf
@@ -112,4 +112,14 @@ ROLES = [{
'display': 'sec-patch node',
'description': 'Security Patch Node',
'optional': True
+}, {
+ 'role': 'none',
+ 'display': 'none node',
+ 'description': 'Only install OS Node',
+ 'optional': True
+}, {
+ 'role': 'moon',
+ 'display': 'moon master or slave node',
+ 'description': "Moon master/slave Node",
+ 'optional': True
}]
diff --git a/deploy/compass_conf/templates/ansible_installer/openstack_newton/inventories/HA-ansible-multinodes.tmpl b/deploy/compass_conf/templates/ansible_installer/openstack_newton/inventories/HA-ansible-multinodes.tmpl
index 94a6a153..f0dab66a 100755
--- a/deploy/compass_conf/templates/ansible_installer/openstack_newton/inventories/HA-ansible-multinodes.tmpl
+++ b/deploy/compass_conf/templates/ansible_installer/openstack_newton/inventories/HA-ansible-multinodes.tmpl
@@ -7,6 +7,7 @@
#set ceph_adm_list = $getVar('ceph_adm',[])
#set ceph_mon_list = $getVar('ceph_mon',[])
#set ceph_osd_list = $getVar('ceph_osd',[])
+#set moon_list = $getVar('moon',[])
#if not $isinstance($controllers, list)
#set controllers = [$controllers]
@@ -75,6 +76,12 @@ $onos_hostname ansible_ssh_host=$onos_ip ansible_ssh_user=$username ansible_ssh_
#set opencontrail_hostname = $opencontrail.hostname
$opencontrail_hostname ansible_ssh_host=$opencontrail_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for
+[moon]
+#for moon in $moon_list
+ #set moon_ip = $moon.install.ip
+ #set moon_hostname = $moon.hostname
+$moon_hostname ansible_ssh_host=$moon_ip
+#end for
[ceph_adm]
#for ceph_adm in $ceph_adm_list
#set ceph_adm_ip = $ceph_adm.install.ip
diff --git a/deploy/compass_conf/templates/ansible_installer/openstack_newton/vars/HA-ansible-multinodes.tmpl b/deploy/compass_conf/templates/ansible_installer/openstack_newton/vars/HA-ansible-multinodes.tmpl
index ac82a981..2202ab7a 100755
--- a/deploy/compass_conf/templates/ansible_installer/openstack_newton/vars/HA-ansible-multinodes.tmpl
+++ b/deploy/compass_conf/templates/ansible_installer/openstack_newton/vars/HA-ansible-multinodes.tmpl
@@ -30,10 +30,10 @@ enable_secgroup: $getVar('enable_secgroup', True)
enable_fwaas: $getVar('enable_fwaas', True)
enable_vpnaas: $getVar('enable_vpnaas', True)
odl_l3_agent: $getVar('odl_l3_agent', 'Disable')
-moon: $getVar('moon', 'Disable')
ha_network: $getVar('ha_network', 'Disable')
onos_nic: $getVar('onos_nic', 'eth2')
onos_sfc: $getVar('onos_sfc', 'Disable')
+odl_sfc: $getVar('odl_sfc', 'Disable')
ip_settings: $ip_settings
network_cfg: $network_cfg
sys_intf_mappings: $sys_intf_mappings
@@ -212,3 +212,10 @@ odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'ma
odl_extra_features: ['odl-l2switch-switch', 'odl-ovsdb-plugin', 'odl-ovsdb-openstack', 'odl-ovsdb-northbound','odl-dlux-core', 'odl-restconf-all', 'odl-mdsal-clustering', 'odl-openflowplugin-flow-services', 'odl-netconf-connector', 'odl-netconf-connector-ssh', 'jolokia-osgi']
odl_features: "{{ odl_base_features + odl_extra_features }}"
odl_api_port: 8080
+
+#set moon_cfg = $getVar('moon_cfg', {})
+moon_master: $moon_cfg.master.flag
+moon_slave: $moon_cfg.slave.flag
+moon_slave_name: $moon_cfg.slave.name
+moon_master_ip: $moon_cfg.slave.master_ip
+
diff --git a/deploy/conf/base.conf b/deploy/conf/base.conf
index 7b9d8290..1e60e0d5 100644
--- a/deploy/conf/base.conf
+++ b/deploy/conf/base.conf
@@ -68,3 +68,5 @@ export OS_CONFIG_FILENAME=""
export SERVICE_CREDENTIALS="image:service=service,compute:service=service,dashboard:service=service,identity:service=service,image:service=service,metering:service=service,network:service=service,rabbitmq:service=service,volume:service=service,mysql:service=service,heat:heat=heat_db_secret,alarming:aodh=aodh_db_secret,policy:congress=service"
export CONSOLE_CREDENTIALS="admin:console=console,demo:console=console,compute:console=console,dashboard:console=console,identity:console=console,image:console=console,metering:console=console,network:console=console,object-store:console=console,volume:console=console,heat:heat=heat_secret,alarming:aodh=aodh_secret,policy:congress=console"
export PACKAGE_CONFIG_FILENAME=""
+export MOON_CFG=${MOON_CFG:-"master:flag=Disable,slave:flag=Disable,slave:name=slave1,slave:master_ip=master_ip"}
+
diff --git a/deploy/adapters/ansible/roles/moon/handlers/main.yml b/deploy/conf/vm_environment/os-odl_l2-sfc-ha.yml
index 608a8a09..8b3c2033 100755..100644
--- a/deploy/adapters/ansible/roles/moon/handlers/main.yml
+++ b/deploy/conf/vm_environment/os-odl_l2-sfc-ha.yml
@@ -6,7 +6,42 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+
---
-- name: restart keystone services
- service: name={{ item }} state=restarted enabled=yes
- with_items: services | union(services_noarch)
+TYPE: virtual
+FLAVOR: cluster
+
+odl_sfc: "Enable"
+
+hosts:
+ - name: host1
+ roles:
+ - controller
+ - ha
+ - odl
+ - ceph-adm
+ - ceph-mon
+
+ - name: host2
+ roles:
+ - controller
+ - ha
+ - odl
+ - ceph-mon
+
+ - name: host3
+ roles:
+ - controller
+ - ha
+ - odl
+ - ceph-mon
+
+ - name: host4
+ roles:
+ - compute
+ - ceph-osd
+
+ - name: host5
+ roles:
+ - compute
+ - ceph-osd
diff --git a/deploy/deploy_host.sh b/deploy/deploy_host.sh
index bf27b31a..6454a4b9 100755
--- a/deploy/deploy_host.sh
+++ b/deploy/deploy_host.sh
@@ -48,7 +48,8 @@ function deploy_host(){
--deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}" \
--cluster_vip="${VIP}" --network_cfg="$NETWORK" --neutron_cfg="$NEUTRON" \
--enable_secgroup="${ENABLE_SECGROUP}" --enable_fwaas="${ENABLE_FWAAS}" --expansion="${EXPANSION}" \
- --rsa_file="$rsa_file" --enable_vpnaas="${ENABLE_VPNAAS}" --odl_l3_agent="${odl_l3_agent}" --moon="${moon}" --onos_sfc="${onos_sfc}"
+ --rsa_file="$rsa_file" --enable_vpnaas="${ENABLE_VPNAAS}" --odl_l3_agent="${odl_l3_agent}" \
+ --moon_cfg="${MOON_CFG}" --onos_sfc="${onos_sfc}" --odl_sfc="${odl_sfc}"
RET=$?
sleep $((AYNC_TIMEOUT+5))
@@ -58,3 +59,4 @@ function deploy_host(){
/bin/false
fi
}
+
diff --git a/deploy/template/vm/compass.xml b/deploy/template/vm/compass.xml
index 182232f4..fb7cbb9a 100644
--- a/deploy/template/vm/compass.xml
+++ b/deploy/template/vm/compass.xml
@@ -13,11 +13,7 @@
<apic/>
<pae/>
</features>
- <cpu mode='host-model'>
- <model fallback='allow'/>
- <feature policy='optional' name='vmx'/>
- <feature policy='optional' name='svm'/>
- </cpu>
+ <cpu mode='host-passthrough'/>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
diff --git a/docs/release/installation/offline-deploy.rst b/docs/release/installation/offline-deploy.rst
index c854f87d..0acfa180 100644
--- a/docs/release/installation/offline-deploy.rst
+++ b/docs/release/installation/offline-deploy.rst
@@ -15,7 +15,8 @@ Preparation for offline deploy
generated by script build.sh in compass4nfv root directory.)
2. Download the Jumphost preparation package from our httpserver. (Download the
- jumphost environment package from `here <http://205.177.226.237:9999/jh_env_package.tar.gz>`_.
+ jumphost environment package from
+ `here <http://artifacts.opnfv.org/compass4nfv/package/master/jh_env_package.tar.gz>`_.
It should be awared that currently we only support ubuntu trusty as offline
jumphost OS.)
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index 44678b87..d8067286 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -25,13 +25,13 @@ Release Data
| **Project** | Compass4nfv |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/tag** | Compass4nfv/Danube.1.0 |
+| **Repo/tag** | Compass4nfv/Danube.3.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Danube.1.0 |
+| **Release designation** | Danube.3.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | March 31 2017 |
+| **Release date** | June 8 2017 |
| | |
+--------------------------------------+--------------------------------------+
| **Purpose of the delivery** | OPNFV Danube release |
@@ -44,7 +44,7 @@ Deliverables
Software deliverables
~~~~~~~~~~~~~~~~~~~~~
- - Compass4nfv/Danube.1.0 ISO, please get it from `OPNFV software download page <https://www.opnfv.org/software/>`_
+ - Compass4nfv/Danube.3.0 ISO, please get it from `OPNFV software download page <https://www.opnfv.org/software/>`_
.. _document-label:
@@ -97,7 +97,12 @@ Feature additions
| COMPASS-491 | Support OpenStack Newton |
| | |
+--------------------------------------+-----------------------------------------+
-
+| COMPASS-483 | Support both ONOS and ODL SFC |
+| | |
++--------------------------------------+-----------------------------------------+
+| COMPASS-483 | Support MOON v4 |
+| | |
++--------------------------------------+-----------------------------------------+
Bug corrections
@@ -109,7 +114,7 @@ Bug corrections
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| | |
+| COMPASS-483 | Fix memcached doesn't start issue |
| | |
+--------------------------------------+--------------------------------------+
diff --git a/repo/openstack/make_ppa/ubuntu/xenial/newton/download_pkg.tmpl b/repo/openstack/make_ppa/ubuntu/xenial/newton/download_pkg.tmpl
index f2a14f48..0790552c 100644
--- a/repo/openstack/make_ppa/ubuntu/xenial/newton/download_pkg.tmpl
+++ b/repo/openstack/make_ppa/ubuntu/xenial/newton/download_pkg.tmpl
@@ -11,7 +11,7 @@ apt-get install -d nova-compute-kvm -y
#make pernoca database
apt-get install -y apt-transport-https debconf-utils libaio1 libc6 libdbd-mysql-perl libgcc1 libgcrypt20 libstdc++6 python-software-properties wget
apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8
-add-apt-repository 'deb https://mirrors.tuna.tsinghua.edu.cn/mariadb/mariadb-10.0.26/repo/ubuntu/ xenial main'
+add-apt-repository 'deb http://ftp.hosteurope.de/mirror/archive.mariadb.org/mariadb-10.0.26/repo/ubuntu/ xenial main'
apt-get update
apt-get install -d mariadb-client mariadb-galera-server-10.0 galera-3 rsync socat -y
#make pernoca database end
@@ -65,7 +65,7 @@ rm -rf /var/cache/apt/python-pyasn1_0.1.7-1ubuntu2_all.deb
# Ubuntu16.04.1 LTS only have gcc-5-base_5.4.0-6ubuntu1~16.04.1, nova-novncproxy need
# gcc-5-base_5.4.0-6ubuntu1~16.04.2, but the docker could not download gcc-5-base package.
-wget http://205.177.226.237:9999/nova-novncproxy_xenial_newton.tar.gz
+wget http://artifacts.opnfv.org/compass4nfv/package/master/nova-novncproxy_xenial_newton.tar.gz
tar -zxvf nova-novncproxy_xenial_newton.tar.gz --strip-components 1 -C /var/cache/apt/
mkdir deb
diff --git a/repo/openstack/special_pkg/Debian/make_openvswitch-switch.sh b/repo/openstack/special_pkg/Debian/make_openvswitch-switch.sh
index 2095e0dc..1c43253b 100644
--- a/repo/openstack/special_pkg/Debian/make_openvswitch-switch.sh
+++ b/repo/openstack/special_pkg/Debian/make_openvswitch-switch.sh
@@ -21,7 +21,7 @@ apt-get install -y build-essential fakeroot debhelper \
pushd .
mkdir -p /home/package_yang/
cd /home/package_yang
-wget http://205.177.226.237:9999/onosfw/package_ovs_debian.tar.gz
+wget http://artifacts.opnfv.org/compass4nfv/package/master/onosfw/package_ovs_debian.tar.gz
tar -zxvf package_ovs_debian.tar.gz
#wget http://openvswitch.org/releases/openvswitch-2.3.1.tar.gz
#tar -zxvf openvswitch-2.3.1.tar.gz
diff --git a/repo/pip/code_url.conf b/repo/pip/code_url.conf
index c3c90c4c..8d82875c 100644
--- a/repo/pip/code_url.conf
+++ b/repo/pip/code_url.conf
@@ -16,7 +16,7 @@ export GIT_URL="https://github.com/openstack/keystone.git \
https://github.com/openstack/heat.git \
https://github.com/openstack/python-heatclient.git \
https://github.com/openstack/aodh.git \
- https://github.com/openstack/python-aodhclient.git \
+ https://github.com/openstack/python-aodhclient.git"
export PIP_GIT_URL="https://github.com/openstack/congress.git \
https://github.com/openstack/python-congressclient.git \
diff --git a/repo/pip/extra-requirement-tar.txt b/repo/pip/extra-requirement-tar.txt
index 6d70d563..46bcd98c 100644
--- a/repo/pip/extra-requirement-tar.txt
+++ b/repo/pip/extra-requirement-tar.txt
@@ -3,3 +3,4 @@
networking-odl==3.2.0
pymongo==3.0.3
+networking-sfc==3.0.0
diff --git a/repo/repo.conf b/repo/repo.conf
index 8fbdd060..b688b80c 100644
--- a/repo/repo.conf
+++ b/repo/repo.conf
@@ -12,12 +12,12 @@ TIMEOUT=100
#export MAKE_ALL=${MAKE_ALL:-"false"}
export JUMP_HOST="trusty"
# PACKAGE_URL will be reset in Jenkins for different branch
-export PACKAGE_URL=${PACKAGE_URL:-http://205.177.226.237:9999}
+export PACKAGE_URL=${PACKAGE_URL:-http://artifacts.opnfv.org/compass4nfv/package/master}
# feature packages
export KVMFORNFV=${kvmfornfv:-https://gerrit.opnfv.org/gerrit/p/kvmfornfv.git}
export ODL_PKG="$PACKAGE_URL/distribution-karaf-0.5.2-Boron.tar.gz"
-export JAVA_PKG="$PACKAGE_URL/jdk-8u51-linux-x64.tar.gz \
+export JAVA_PKG="http://download.oracle.com/otn/java/jdk/8u51-b16/jdk-8u51-linux-x64.tar.gz \
$PACKAGE_URL/install_jdk8.tar"
export ONOS_PKG="https://downloads.onosproject.org/release/onos-1.8.4.tar.gz \
$PACKAGE_URL/onosfw/networking-onos.tar \