summaryrefslogtreecommitdiffstats
path: root/compass-deck/db/api
diff options
context:
space:
mode:
authorJustin chi <chigang@huawei.com>2017-11-03 08:48:34 +0000
committerGerrit Code Review <gerrit@opnfv.org>2017-11-03 08:48:34 +0000
commitcdbfdcb04670bba08b301789c6be08900da44f10 (patch)
treee2d20371b26863551ef6907601d06a538d8eea45 /compass-deck/db/api
parent651da345e4fde74f81213fd08cc3a21a8421089a (diff)
parent905b0231e93ce2409a45dd6c4f5f983689fdb790 (diff)
Merge "Add compass-deck"
Diffstat (limited to 'compass-deck/db/api')
-rw-r--r--compass-deck/db/api/__init__.py13
-rw-r--r--compass-deck/db/api/adapter.py313
-rw-r--r--compass-deck/db/api/adapter_holder.py155
-rw-r--r--compass-deck/db/api/cluster.py2444
-rw-r--r--compass-deck/db/api/database.py264
-rw-r--r--compass-deck/db/api/health_check_report.py190
-rw-r--r--compass-deck/db/api/host.py1120
-rw-r--r--compass-deck/db/api/machine.py317
-rw-r--r--compass-deck/db/api/metadata.py517
-rw-r--r--compass-deck/db/api/metadata_holder.py731
-rw-r--r--compass-deck/db/api/network.py160
-rw-r--r--compass-deck/db/api/permission.py357
-rw-r--r--compass-deck/db/api/switch.py1213
-rw-r--r--compass-deck/db/api/user.py553
-rw-r--r--compass-deck/db/api/user_log.py82
-rw-r--r--compass-deck/db/api/utils.py1286
16 files changed, 9715 insertions, 0 deletions
diff --git a/compass-deck/db/api/__init__.py b/compass-deck/db/api/__init__.py
new file mode 100644
index 0000000..5e42ae9
--- /dev/null
+++ b/compass-deck/db/api/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-deck/db/api/adapter.py b/compass-deck/db/api/adapter.py
new file mode 100644
index 0000000..c3ad48d
--- /dev/null
+++ b/compass-deck/db/api/adapter.py
@@ -0,0 +1,313 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Adapter related database operations."""
+import logging
+import re
+
+from compass.db.api import database
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+OSES = None
+OS_INSTALLERS = None
+PACKAGE_INSTALLERS = None
+ADAPTERS = None
+ADAPTERS_FLAVORS = None
+ADAPTERS_ROLES = None
+
+
+def _get_oses_from_configuration():
+ """Get all os configs from os configuration dir.
+
+ Example: {
+ <os_name>: {
+ 'name': <os_name>,
+ 'id': <os_name>,
+ 'os_id': <os_name>,
+ 'deployable': True
+ }
+ }
+ """
+ configs = util.load_configs(setting.OS_DIR)
+ systems = {}
+ for config in configs:
+ logging.info('get config %s', config)
+ system_name = config['NAME']
+ parent_name = config.get('PARENT', None)
+ system = {
+ 'name': system_name,
+ 'id': system_name,
+ 'os_id': system_name,
+ 'parent': parent_name,
+ 'parent_id': parent_name,
+ 'deployable': config.get('DEPLOYABLE', False)
+ }
+ systems[system_name] = system
+ parents = {}
+ for name, system in systems.items():
+ parent = system.get('parent', None)
+ parents[name] = parent
+ for name, system in systems.items():
+ util.recursive_merge_dict(name, systems, parents)
+ return systems
+
+
+def _get_installers_from_configuration(configs):
+ """Get installers from configurations.
+
+ Example: {
+ <installer_isntance>: {
+ 'alias': <instance_name>,
+ 'id': <instance_name>,
+ 'name': <name>,
+ 'settings': <dict pass to installer plugin>
+ }
+ }
+ """
+ installers = {}
+ for config in configs:
+ name = config['NAME']
+ instance_name = config.get('INSTANCE_NAME', name)
+ installers[instance_name] = {
+ 'alias': instance_name,
+ 'id': instance_name,
+ 'name': name,
+ 'settings': config.get('SETTINGS', {})
+ }
+ return installers
+
+
+def _get_os_installers_from_configuration():
+ """Get os installers from os installer config dir."""
+ configs = util.load_configs(setting.OS_INSTALLER_DIR)
+ return _get_installers_from_configuration(configs)
+
+
+def _get_package_installers_from_configuration():
+ """Get package installers from package installer config dir."""
+ configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR)
+ return _get_installers_from_configuration(configs)
+
+
+def _get_adapters_from_configuration():
+ """Get adapters from adapter config dir."""
+ configs = util.load_configs(setting.ADAPTER_DIR)
+ adapters = {}
+ for config in configs:
+ logging.info('add config %s to adapter', config)
+ if 'OS_INSTALLER' in config:
+ os_installer = OS_INSTALLERS[config['OS_INSTALLER']]
+ else:
+ os_installer = None
+
+ if 'PACKAGE_INSTALLER' in config:
+ package_installer = PACKAGE_INSTALLERS[
+ config['PACKAGE_INSTALLER']
+ ]
+ else:
+ package_installer = None
+
+ adapter_name = config['NAME']
+ parent_name = config.get('PARENT', None)
+ adapter = {
+ 'name': adapter_name,
+ 'id': adapter_name,
+ 'parent': parent_name,
+ 'parent_id': parent_name,
+ 'display_name': config.get('DISPLAY_NAME', adapter_name),
+ 'os_installer': os_installer,
+ 'package_installer': package_installer,
+ 'deployable': config.get('DEPLOYABLE', False),
+ 'health_check_cmd': config.get('HEALTH_CHECK_COMMAND', None),
+ 'supported_oses': [],
+ 'roles': [],
+ 'flavors': []
+ }
+ supported_os_patterns = [
+ re.compile(supported_os_pattern)
+ for supported_os_pattern in config.get('SUPPORTED_OS_PATTERNS', [])
+ ]
+ for os_name, os in OSES.items():
+ if not os.get('deployable', False):
+ continue
+ for supported_os_pattern in supported_os_patterns:
+ if supported_os_pattern.match(os_name):
+ adapter['supported_oses'].append(os)
+ break
+ adapters[adapter_name] = adapter
+
+ parents = {}
+ for name, adapter in adapters.items():
+ parent = adapter.get('parent', None)
+ parents[name] = parent
+ for name, adapter in adapters.items():
+ util.recursive_merge_dict(name, adapters, parents)
+ return adapters
+
+
+def _add_roles_from_configuration():
+ """Get roles from roles config dir and update to adapters."""
+ configs = util.load_configs(setting.ADAPTER_ROLE_DIR)
+ for config in configs:
+ logging.info(
+ 'add config %s to role', config
+ )
+ adapter_name = config['ADAPTER_NAME']
+ adapter = ADAPTERS[adapter_name]
+ adapter_roles = ADAPTERS_ROLES.setdefault(adapter_name, {})
+ for role_dict in config['ROLES']:
+ role_name = role_dict['role']
+ display_name = role_dict.get('display_name', role_name)
+ adapter_roles[role_name] = {
+ 'name': role_name,
+ 'id': '%s:%s' % (adapter_name, role_name),
+ 'adapter_id': adapter_name,
+ 'adapter_name': adapter_name,
+ 'display_name': display_name,
+ 'description': role_dict.get('description', display_name),
+ 'optional': role_dict.get('optional', False)
+ }
+ parents = {}
+ for name, adapter in ADAPTERS.items():
+ parent = adapter.get('parent', None)
+ parents[name] = parent
+ for adapter_name, adapter_roles in ADAPTERS_ROLES.items():
+ util.recursive_merge_dict(adapter_name, ADAPTERS_ROLES, parents)
+ for adapter_name, adapter_roles in ADAPTERS_ROLES.items():
+ adapter = ADAPTERS[adapter_name]
+ adapter['roles'] = adapter_roles.values()
+
+
+def _add_flavors_from_configuration():
+ """Get flavors from flavor config dir and update to adapters."""
+ configs = util.load_configs(setting.ADAPTER_FLAVOR_DIR)
+ for config in configs:
+ logging.info('add config %s to flavor', config)
+ adapter_name = config['ADAPTER_NAME']
+ adapter = ADAPTERS[adapter_name]
+ adapter_flavors = ADAPTERS_FLAVORS.setdefault(adapter_name, {})
+ adapter_roles = ADAPTERS_ROLES[adapter_name]
+ for flavor_dict in config['FLAVORS']:
+ flavor_name = flavor_dict['flavor']
+ flavor_id = '%s:%s' % (adapter_name, flavor_name)
+ flavor = {
+ 'name': flavor_name,
+ 'id': flavor_id,
+ 'adapter_id': adapter_name,
+ 'adapter_name': adapter_name,
+ 'display_name': flavor_dict.get('display_name', flavor_name),
+ 'template': flavor_dict.get('template', None)
+ }
+ flavor_roles = flavor_dict.get('roles', [])
+ roles_in_flavor = []
+ for flavor_role in flavor_roles:
+ if isinstance(flavor_role, basestring):
+ role_name = flavor_role
+ role_in_flavor = {
+ 'name': role_name,
+ 'flavor_id': flavor_id
+ }
+ else:
+ role_in_flavor = flavor_role
+ role_in_flavor['flavor_id'] = flavor_id
+ if 'role' in role_in_flavor:
+ role_in_flavor['name'] = role_in_flavor['role']
+ del role_in_flavor['role']
+ role_name = role_in_flavor['name']
+ role = adapter_roles[role_name]
+ util.merge_dict(role_in_flavor, role, override=False)
+ roles_in_flavor.append(role_in_flavor)
+ flavor['roles'] = roles_in_flavor
+ adapter_flavors[flavor_name] = flavor
+ parents = {}
+ for name, adapter in ADAPTERS.items():
+ parent = adapter.get('parent', None)
+ parents[name] = parent
+ for adapter_name, adapter_roles in ADAPTERS_FLAVORS.items():
+ util.recursive_merge_dict(adapter_name, ADAPTERS_FLAVORS, parents)
+ for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items():
+ adapter = ADAPTERS[adapter_name]
+ adapter['flavors'] = adapter_flavors.values()
+
+
+def load_adapters_internal(force_reload=False):
+ """Load adapter related configurations into memory.
+
+ If force_reload, reload all configurations even it is loaded already.
+ """
+ global OSES
+ if force_reload or OSES is None:
+ OSES = _get_oses_from_configuration()
+ global OS_INSTALLERS
+ if force_reload or OS_INSTALLERS is None:
+ OS_INSTALLERS = _get_os_installers_from_configuration()
+ global PACKAGE_INSTALLERS
+ if force_reload or PACKAGE_INSTALLERS is None:
+ PACKAGE_INSTALLERS = _get_package_installers_from_configuration()
+ global ADAPTERS
+ if force_reload or ADAPTERS is None:
+ ADAPTERS = _get_adapters_from_configuration()
+ global ADAPTERS_ROLES
+ if force_reload or ADAPTERS_ROLES is None:
+ ADAPTERS_ROLES = {}
+ _add_roles_from_configuration()
+ global ADAPTERS_FLAVORS
+ if force_reload or ADAPTERS_FLAVORS is None:
+ ADAPTERS_FLAVORS = {}
+ _add_flavors_from_configuration()
+
+
+def get_adapters_internal(force_reload=False):
+ """Get all deployable adapters."""
+ load_adapters_internal(force_reload=force_reload)
+ adapter_mapping = {}
+ for adapter_name, adapter in ADAPTERS.items():
+ if adapter.get('deployable'):
+ # TODO(xicheng): adapter should be filtered before
+ # return to caller.
+ adapter_mapping[adapter_name] = adapter
+ else:
+ logging.info(
+ 'ignore adapter %s since it is not deployable',
+ adapter_name
+ )
+ return adapter_mapping
+
+
+def get_flavors_internal(force_reload=False):
+ """Get all deployable flavors."""
+ load_adapters_internal(force_reload=force_reload)
+ adapter_flavor_mapping = {}
+ for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items():
+ adapter = ADAPTERS.get(adapter_name, {})
+ for flavor_name, flavor in adapter_flavors.items():
+ if adapter.get('deployable'):
+ # TODO(xicheng): flavor dict should be filtered before
+ # return to caller.
+ adapter_flavor_mapping.setdefault(
+ adapter_name, {}
+ )[flavor_name] = flavor
+ else:
+ logging.info(
+ 'ignore adapter %s since it is not deployable',
+ adapter_name
+ )
+
+ return adapter_flavor_mapping
diff --git a/compass-deck/db/api/adapter_holder.py b/compass-deck/db/api/adapter_holder.py
new file mode 100644
index 0000000..91c65c4
--- /dev/null
+++ b/compass-deck/db/api/adapter_holder.py
@@ -0,0 +1,155 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Adapter related object holder."""
+import logging
+
+from compass.db.api import adapter as adapter_api
+from compass.db.api import database
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+
+
+SUPPORTED_FIELDS = [
+ 'name',
+]
+RESP_FIELDS = [
+ 'id', 'name', 'roles', 'flavors',
+ 'os_installer', 'package_installer',
+ 'supported_oses', 'display_name', 'health_check_cmd'
+]
+RESP_OS_FIELDS = [
+ 'id', 'name', 'os_id'
+]
+RESP_ROLES_FIELDS = [
+ 'id', 'name', 'display_name', 'description', 'optional'
+]
+RESP_FLAVORS_FIELDS = [
+ 'id', 'adapter_id', 'adapter_name', 'name', 'display_name',
+ 'template', 'roles'
+]
+
+
+ADAPTER_MAPPING = None
+FLAVOR_MAPPING = None
+
+
+def load_adapters(force_reload=False):
+ global ADAPTER_MAPPING
+ if force_reload or ADAPTER_MAPPING is None:
+ logging.info('load adapters into memory')
+ ADAPTER_MAPPING = adapter_api.get_adapters_internal(
+ force_reload=force_reload
+ )
+
+
+def load_flavors(force_reload=False):
+ global FLAVOR_MAPPING
+ if force_reload or FLAVOR_MAPPING is None:
+ logging.info('load flavors into memory')
+ FLAVOR_MAPPING = {}
+ adapters_flavors = adapter_api.get_flavors_internal(
+ force_reload=force_reload
+ )
+ for adapter_name, adapter_flavors in adapters_flavors.items():
+ for flavor_name, flavor in adapter_flavors.items():
+ FLAVOR_MAPPING['%s:%s' % (adapter_name, flavor_name)] = flavor
+
+
+def _filter_adapters(adapter_config, filter_name, filter_value):
+ if filter_name not in adapter_config:
+ return False
+ if isinstance(filter_value, list):
+ return bool(
+ adapter_config[filter_name] in filter_value
+ )
+ elif isinstance(filter_value, dict):
+ return all([
+ _filter_adapters(
+ adapter_config[filter_name],
+ sub_filter_key, sub_filter_value
+ )
+ for sub_filter_key, sub_filter_value in filter_value.items()
+ ])
+ else:
+ return adapter_config[filter_name] == filter_value
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_ADAPTERS
+)
+@utils.output_filters(name=utils.general_filter_callback)
+@utils.wrap_to_dict(
+ RESP_FIELDS,
+ supported_oses=RESP_OS_FIELDS,
+ roles=RESP_ROLES_FIELDS,
+ flavors=RESP_FLAVORS_FIELDS
+)
+def list_adapters(user=None, session=None, **filters):
+ """list adapters."""
+ load_adapters()
+ return ADAPTER_MAPPING.values()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_ADAPTERS
+)
+@utils.wrap_to_dict(
+ RESP_FIELDS,
+ supported_oses=RESP_OS_FIELDS,
+ roles=RESP_ROLES_FIELDS,
+ flavors=RESP_FLAVORS_FIELDS
+)
+def get_adapter(adapter_id, user=None, session=None, **kwargs):
+ """get adapter."""
+ load_adapters()
+ if adapter_id not in ADAPTER_MAPPING:
+ raise exception.RecordNotExists(
+ 'adpater %s does not exist' % adapter_id
+ )
+ return ADAPTER_MAPPING[adapter_id]
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
+def list_flavors(user=None, session=None, **filters):
+ """List flavors."""
+ load_flavors()
+ return FLAVOR_MAPPING.values()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
+def get_flavor(flavor_id, user=None, session=None, **kwargs):
+ """Get flavor."""
+ load_flavors()
+ if flavor_id not in FLAVOR_MAPPING:
+ raise exception.RecordNotExists(
+ 'flavor %s does not exist' % flavor_id
+ )
+ return FLAVOR_MAPPING[flavor_id]
diff --git a/compass-deck/db/api/cluster.py b/compass-deck/db/api/cluster.py
new file mode 100644
index 0000000..7a7022c
--- /dev/null
+++ b/compass-deck/db/api/cluster.py
@@ -0,0 +1,2444 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Cluster database operations."""
+import copy
+import functools
+import logging
+import re
+
+from compass.db.api import adapter_holder as adapter_api
+from compass.db.api import database
+from compass.db.api import metadata_holder as metadata_api
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = [
+ 'name', 'os_name', 'owner',
+ 'adapter_name', 'flavor_name'
+]
+SUPPORTED_CLUSTERHOST_FIELDS = []
+RESP_FIELDS = [
+ 'id', 'name', 'os_name', 'os_id', 'adapter_id', 'flavor_id',
+ 'reinstall_distributed_system', 'flavor',
+ 'distributed_system_installed',
+ 'owner', 'adapter_name', 'flavor_name',
+ 'created_at', 'updated_at'
+]
+RESP_CLUSTERHOST_FIELDS = [
+ 'id', 'host_id', 'clusterhost_id', 'machine_id',
+ 'name', 'hostname', 'roles', 'os_installer',
+ 'cluster_id', 'clustername', 'location', 'tag',
+ 'networks', 'mac', 'switch_ip', 'port', 'switches',
+ 'os_installed', 'distributed_system_installed',
+ 'os_name', 'os_id', 'ip',
+ 'reinstall_os', 'reinstall_distributed_system',
+ 'owner', 'cluster_id',
+ 'created_at', 'updated_at',
+ 'patched_roles'
+]
+RESP_CONFIG_FIELDS = [
+ 'os_config',
+ 'package_config',
+ 'config_step',
+ 'config_validated',
+ 'created_at',
+ 'updated_at'
+]
+RESP_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config',
+ 'deployed_package_config',
+ 'created_at',
+ 'updated_at'
+]
+RESP_METADATA_FIELDS = [
+ 'os_config', 'package_config'
+]
+RESP_CLUSTERHOST_CONFIG_FIELDS = [
+ 'package_config',
+ 'os_config',
+ 'config_step',
+ 'config_validated',
+ 'networks',
+ 'created_at',
+ 'updated_at'
+]
+RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config',
+ 'deployed_package_config',
+ 'created_at',
+ 'updated_at'
+]
+RESP_STATE_FIELDS = [
+ 'id', 'state', 'percentage', 'message', 'severity',
+ 'status', 'ready',
+ 'created_at', 'updated_at'
+]
+RESP_CLUSTERHOST_STATE_FIELDS = [
+ 'id', 'state', 'percentage', 'message', 'severity',
+ 'ready', 'created_at', 'updated_at'
+]
+RESP_REVIEW_FIELDS = [
+ 'cluster', 'hosts'
+]
+RESP_DEPLOY_FIELDS = [
+ 'status', 'cluster', 'hosts'
+]
+IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
+ADDED_FIELDS = ['name', 'adapter_id', 'os_id']
+OPTIONAL_ADDED_FIELDS = ['flavor_id']
+UPDATED_FIELDS = ['name', 'reinstall_distributed_system']
+ADDED_HOST_FIELDS = ['machine_id']
+UPDATED_HOST_FIELDS = ['name', 'reinstall_os']
+UPDATED_CLUSTERHOST_FIELDS = ['roles', 'patched_roles']
+PATCHED_CLUSTERHOST_FIELDS = ['patched_roles']
+UPDATED_CONFIG_FIELDS = [
+ 'put_os_config', 'put_package_config', 'config_step'
+]
+UPDATED_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config', 'deployed_package_config'
+]
+PATCHED_CONFIG_FIELDS = [
+ 'patched_os_config', 'patched_package_config', 'config_step'
+]
+UPDATED_CLUSTERHOST_CONFIG_FIELDS = [
+ 'put_os_config',
+ 'put_package_config'
+]
+PATCHED_CLUSTERHOST_CONFIG_FIELDS = [
+ 'patched_os_config',
+ 'patched_package_config'
+]
+UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config',
+ 'deployed_package_config'
+]
+UPDATED_CLUSTERHOST_STATE_FIELDS = [
+ 'state', 'percentage', 'message', 'severity'
+]
+UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS = [
+ 'ready'
+]
+UPDATED_CLUSTER_STATE_FIELDS = ['state']
+IGNORE_UPDATED_CLUSTER_STATE_FIELDS = ['percentage', 'message', 'severity']
+UPDATED_CLUSTER_STATE_INTERNAL_FIELDS = ['ready']
+RESP_CLUSTERHOST_LOG_FIELDS = [
+ 'clusterhost_id', 'id', 'host_id', 'cluster_id',
+ 'filename', 'position', 'partial_line',
+ 'percentage',
+ 'message', 'severity', 'line_matcher_name'
+]
+ADDED_CLUSTERHOST_LOG_FIELDS = [
+ 'filename'
+]
+UPDATED_CLUSTERHOST_LOG_FIELDS = [
+ 'position', 'partial_line', 'percentage',
+ 'message', 'severity', 'line_matcher_name'
+]
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_clusters(user=None, session=None, **filters):
+ """List clusters."""
+ clusters = utils.list_db_objects(
+ session, models.Cluster, **filters
+ )
+ logging.info('user is %s', user.email)
+ if not user.is_admin and len(clusters):
+ clusters = [c for c in clusters if c.owner == user.email]
+ return clusters
+
+
+def _get_cluster(cluster_id, session=None, **kwargs):
+ """Get cluster by id."""
+ if isinstance(cluster_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Cluster, id=cluster_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'cluster id %s type is not int compatible' % cluster_id
+ )
+
+
+def get_cluster_internal(cluster_id, session=None, **kwargs):
+ """Helper function to get cluster.
+
+ Should be only used by other files under db/api.
+ """
+ return _get_cluster(cluster_id, session=session, **kwargs)
+
+
+def _get_cluster_host(
+ cluster_id, host_id, session=None, **kwargs
+):
+ """Get clusterhost by cluster id and host id."""
+ cluster = _get_cluster(cluster_id, session=session, **kwargs)
+ from compass.db.api import host as host_api
+ host = host_api.get_host_internal(host_id, session=session, **kwargs)
+ return utils.get_db_object(
+ session, models.ClusterHost,
+ cluster_id=cluster.id,
+ host_id=host.id,
+ **kwargs
+ )
+
+
+def _get_clusterhost(clusterhost_id, session=None, **kwargs):
+ """Get clusterhost by clusterhost id."""
+ if isinstance(clusterhost_id, (int, long)):
+ return utils.get_db_object(
+ session, models.ClusterHost,
+ clusterhost_id=clusterhost_id,
+ **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'clusterhost id %s type is not int compatible' % clusterhost_id
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_cluster(
+ cluster_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get cluster info."""
+ return _get_cluster(
+ cluster_id,
+ session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERS)
+def is_cluster_os_ready(
+ cluster_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ cluster = utils.get_db_object(
+ session, models.Cluster, exception_when_missing, id=cluster_id)
+
+ all_states = ([i.host.state.ready for i in cluster.clusterhosts])
+
+ logging.info("is_cluster_os_ready: all_states %s" % all_states)
+
+ return all(all_states)
+
+
+def check_cluster_validated(cluster):
+ """Check cluster is validated."""
+ if not cluster.config_validated:
+ raise exception.Forbidden(
+ 'cluster %s is not validated' % cluster.name
+ )
+
+
+def check_clusterhost_validated(clusterhost):
+ """Check clusterhost is validated."""
+ if not clusterhost.config_validated:
+ raise exception.Forbidden(
+ 'clusterhost %s is not validated' % clusterhost.name
+ )
+
+
+def check_cluster_editable(
+ cluster, user=None,
+ check_in_installing=False
+):
+ """Check if cluster is editable.
+
+ If we try to set cluster
+ reinstall_distributed_system attribute or any
+ checking to make sure the cluster is not in installing state,
+ we can set check_in_installing to True.
+ Otherwise we will make sure the cluster is not in deploying or
+ deployed.
+ If user is not admin or not the owner of the cluster, the check
+ will fail to make sure he can not update the cluster attributes.
+ """
+ if check_in_installing:
+ if cluster.state.state == 'INSTALLING':
+ raise exception.Forbidden(
+ 'cluster %s is not editable '
+ 'when state is installing' % cluster.name
+ )
+# elif (
+# cluster.flavor_name and
+# not cluster.reinstall_distributed_system
+# ):
+# raise exception.Forbidden(
+# 'cluster %s is not editable '
+# 'when not to be reinstalled' % cluster.name
+# )
+ if user and not user.is_admin and cluster.creator_id != user.id:
+ raise exception.Forbidden(
+ 'cluster %s is not editable '
+ 'when user is not admin or cluster owner' % cluster.name
+ )
+
+
+def is_cluster_editable(
+ cluster, user=None,
+ check_in_installing=False
+):
+ """Get if cluster is editble."""
+ try:
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=check_in_installing
+ )
+ return True
+ except exception.Forbidden:
+ return False
+
+
+@utils.supported_filters(
+ ADDED_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(name=utils.check_name)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_cluster(
+ exception_when_existing=True,
+ name=None, adapter_id=None, flavor_id=None,
+ user=None, session=None, **kwargs
+):
+ """Create a cluster."""
+ adapter = adapter_api.get_adapter(
+ adapter_id, user=user, session=session
+ )
+ # if flavor_id is not None, also set flavor field.
+ # In future maybe we can move the use of flavor from
+ # models.py to db/api and explictly get flavor when
+ # needed instead of setting flavor into cluster record.
+ flavor = {}
+ if flavor_id:
+ flavor = adapter_api.get_flavor(
+ flavor_id,
+ user=user, session=session
+ )
+ if flavor['adapter_id'] != adapter['id']:
+ raise exception.InvalidParameter(
+ 'flavor %s is not of adapter %s' % (
+ flavor_id, adapter_id
+ )
+ )
+
+ cluster = utils.add_db_object(
+ session, models.Cluster, exception_when_existing,
+ name, user.id, adapter_id=adapter_id,
+ flavor_id=flavor_id, flavor=flavor, **kwargs
+ )
+ return cluster
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(name=utils.check_name)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_cluster(cluster_id, user=None, session=None, **kwargs):
+ """Update a cluster."""
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=(
+ kwargs.get('reinstall_distributed_system', False)
+ )
+ )
+ return utils.update_db_object(session, cluster, **kwargs)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_FIELDS + ['status', 'cluster', 'hosts'],
+ cluster=RESP_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def del_cluster(
+ cluster_id, force=False, from_database_only=False,
+ delete_underlying_host=False, user=None, session=None, **kwargs
+):
+ """Delete a cluster.
+
+ If force, the cluster will be deleted anyway. It is used by cli to
+ force clean a cluster in any case.
+ If from_database_only, the cluster recored will only be removed from
+ database. Otherwise, a del task is sent to celery to do clean deletion.
+ If delete_underlying_host, all hosts under this cluster will also be
+ deleted.
+ The backend will call del_cluster again with from_database_only set
+ when it has done the deletion work on os installer/package installer.
+ """
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ logging.debug(
+ 'delete cluster %s with force=%s '
+ 'from_database_only=%s delete_underlying_host=%s',
+ cluster.id, force, from_database_only, delete_underlying_host
+ )
+ # force set cluster state to ERROR and the state of any clusterhost
+ # in the cluster to ERROR when we want to delete the cluster anyway
+ # even the cluster is in installing or already installed.
+ # It let the api know the deleting is in doing when backend is doing
+ # the real deleting.
+ # In future we may import a new state like INDELETE to indicate
+ # the deleting is processing.
+ # We need discuss about if we can delete a cluster when it is already
+ # installed by api.
+ for clusterhost in cluster.clusterhosts:
+ if clusterhost.state.state != 'UNINITIALIZED' and force:
+ clusterhost.state.state = 'ERROR'
+ if delete_underlying_host:
+ host = clusterhost.host
+ if host.state.state != 'UNINITIALIZED' and force:
+ host.state.state = 'ERROR'
+ if cluster.state.state != 'UNINITIALIZED' and force:
+ cluster.state.state = 'ERROR'
+
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+
+ # delete underlying host if delete_underlying_host is set.
+ if delete_underlying_host:
+ for clusterhost in cluster.clusterhosts:
+ # delete underlying host only user has permission.
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ if host_api.is_host_editable(
+ host, user=user, check_in_installing=True
+ ):
+ # Delete host record directly in database when there is no need
+ # to do the deletion in backend or from_database_only is set.
+ if host.state.state == 'UNINITIALIZED' or from_database_only:
+ utils.del_db_object(
+ session, host
+ )
+
+ # Delete cluster record directly in database when there
+ # is no need to do the deletion in backend or from_database_only is set.
+ if cluster.state.state == 'UNINITIALIZED' or from_database_only:
+ return utils.del_db_object(
+ session, cluster
+ )
+ else:
+ from compass.tasks import client as celery_client
+ logging.info('send del cluster %s task to celery', cluster_id)
+ celery_client.celery.send_task(
+ 'compass.tasks.delete_cluster',
+ (
+ user.email, cluster.id,
+ [
+ clusterhost.host_id
+ for clusterhost in cluster.clusterhosts
+ ],
+ delete_underlying_host
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'delete action is sent',
+ 'cluster': cluster,
+ 'hosts': cluster.clusterhosts
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def get_cluster_config(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster config."""
+ return _get_cluster(cluster_id, session=session)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
+def get_cluster_deployed_config(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster deployed config."""
+ return _get_cluster(cluster_id, session=session)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_cluster_metadata(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster metadata.
+
+ If no flavor in the cluster, it means this is a os only cluster.
+ We ignore package metadata for os only cluster.
+ """
+ cluster = _get_cluster(cluster_id, session=session)
+ metadatas = {}
+ os_name = cluster.os_name
+ if os_name:
+ metadatas.update(
+ metadata_api.get_os_metadata(
+ os_name, session=session
+ )
+ )
+ flavor_id = cluster.flavor_id
+ if flavor_id:
+ metadatas.update(
+ metadata_api.get_flavor_metadata(
+ flavor_id,
+ user=user, session=session
+ )
+ )
+
+ return metadatas
+
+
+def _cluster_os_config_validates(
+ config, cluster, session=None, user=None, **kwargs
+):
+ """Check cluster os config validation."""
+ metadata_api.validate_os_config(
+ config, cluster.os_id
+ )
+
+
+def _cluster_package_config_validates(
+ config, cluster, session=None, user=None, **kwargs
+):
+ """Check cluster package config validation."""
+ metadata_api.validate_flavor_config(
+ config, cluster.flavor_id
+ )
+
+
+@utils.input_validates_with_args(
+ put_os_config=_cluster_os_config_validates,
+ put_package_config=_cluster_package_config_validates
+)
+@utils.output_validates_with_args(
+ os_config=_cluster_os_config_validates,
+ package_config=_cluster_package_config_validates
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def _update_cluster_config(cluster, session=None, user=None, **kwargs):
+ """Update a cluster config."""
+ check_cluster_editable(cluster, user=user)
+ return utils.update_db_object(
+ session, cluster, **kwargs
+ )
+
+
+# replace os_config to deployed_os_config,
+# package_config to deployed_package_config
+@utils.replace_filters(
+ os_config='deployed_os_config',
+ package_config='deployed_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_DEPLOYED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
+def update_cluster_deployed_config(
+ cluster_id, user=None, session=None, **kwargs
+):
+ """Update cluster deployed config."""
+ cluster = _get_cluster(cluster_id, session=session)
+ check_cluster_editable(cluster, user=user)
+ check_cluster_validated(cluster)
+ return utils.update_db_object(
+ session, cluster, **kwargs
+ )
+
+
+# replace os_config to put_os_config,
+# package_config to put_package_config in kwargs.
+# It tells db these fields will be updated not patched.
+@utils.replace_filters(
+ os_config='put_os_config',
+ package_config='put_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER_CONFIG
+)
+def update_cluster_config(cluster_id, user=None, session=None, **kwargs):
+ """Update cluster config."""
+ cluster = _get_cluster(cluster_id, session=session)
+ return _update_cluster_config(
+ cluster, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to patched_os_config and
+# package_config to patched_package_config in kwargs.
+# It tells db these fields will be patched not updated.
+@utils.replace_filters(
+ os_config='patched_os_config',
+ package_config='patched_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER_CONFIG
+)
+def patch_cluster_config(cluster_id, user=None, session=None, **kwargs):
+ """patch cluster config."""
+ cluster = _get_cluster(cluster_id, session=session)
+ return _update_cluster_config(
+ cluster, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def del_cluster_config(cluster_id, user=None, session=None):
+ """Delete a cluster config."""
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ check_cluster_editable(cluster, user=user)
+ return utils.update_db_object(
+ session, cluster, os_config={},
+ package_config={}, config_validated=False
+ )
+
+
+def _roles_validates(roles, cluster, session=None, user=None):
+ """Check roles is validated to a cluster's roles."""
+ if roles:
+ if not cluster.flavor_name:
+ raise exception.InvalidParameter(
+ 'not flavor in cluster %s' % cluster.name
+ )
+ cluster_roles = [role['name'] for role in cluster.flavor['roles']]
+ for role in roles:
+ if role not in cluster_roles:
+ raise exception.InvalidParameter(
+ 'role %s is not in cluster roles %s' % (
+ role, cluster_roles
+ )
+ )
+
+
+def _cluster_host_roles_validates(
+ value, cluster, host, session=None, user=None, **kwargs
+):
+ """Check clusterhost roles is validated by cluster and host."""
+ _roles_validates(value, cluster, session=session, user=user)
+
+
+def _clusterhost_roles_validates(
+ value, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost roles is validated by clusterhost."""
+ _roles_validates(
+ value, clusterhost.cluster, session=session, user=user
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_HOST_FIELDS,
+ ignore_support_keys=UPDATED_CLUSTERHOST_FIELDS
+)
+@utils.input_validates(name=utils.check_name)
+def _add_host_if_not_exist(
+ machine_id, cluster, session=None, user=None, **kwargs
+):
+ """Add underlying host if it does not exist."""
+ from compass.db.api import host as host_api
+ host = host_api.get_host_internal(
+ machine_id, session=session, exception_when_missing=False
+ )
+ if host:
+ if kwargs:
+ # ignore update underlying host if host is not editable.
+ from compass.db.api import host as host_api
+ if host_api.is_host_editable(
+ host, user=cluster.creator,
+ check_in_installing=kwargs.get('reinstall_os', False),
+ ):
+ utils.update_db_object(
+ session, host,
+ **kwargs
+ )
+ else:
+ logging.debug(
+ 'ignore update host host %s '
+ 'since it is not editable' % host.name
+ )
+ else:
+ logging.debug('nothing to update for host %s', host.name)
+ else:
+ from compass.db.api import adapter_holder as adapter_api
+ adapter = adapter_api.get_adapter(
+ cluster.adapter_name, user=user, session=session
+ )
+ host = utils.add_db_object(
+ session, models.Host, False, machine_id,
+ os_name=cluster.os_name,
+ os_installer=adapter['os_installer'],
+ creator=cluster.creator,
+ **kwargs
+ )
+ return host
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=UPDATED_HOST_FIELDS
+)
+@utils.input_validates_with_args(
+ roles=_cluster_host_roles_validates
+)
+def _add_clusterhost_only(
+ cluster, host,
+ exception_when_existing=False,
+ session=None, user=None,
+ **kwargs
+):
+ """Get clusterhost only."""
+ if not cluster.state.state == "UNINITIALIZED":
+ cluster.state.ready = False
+ cluster.state.state = "UNINITIALIZED"
+ cluster.state.percentage = 0.0
+ utils.update_db_object(session, cluster.state, state="UNINITIALIZED")
+
+ return utils.add_db_object(
+ session, models.ClusterHost, exception_when_existing,
+ cluster.id, host.id, **kwargs
+ )
+
+
+@utils.supported_filters(
+ ADDED_HOST_FIELDS,
+ optional_support_keys=UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+def _add_clusterhost(
+ cluster,
+ exception_when_existing=False,
+ session=None, user=None, machine_id=None, **kwargs
+):
+ """Add clusterhost and add underlying host if it does not exist."""
+ host = _add_host_if_not_exist(
+ machine_id, cluster, session=session,
+ user=user, **kwargs
+ )
+
+ return _add_clusterhost_only(
+ cluster, host, exception_when_existing=exception_when_existing,
+ session=session, user=user, **kwargs
+ )
+
+
+def _add_clusterhosts(cluster, machines, session=None, user=None):
+ """Add machines to cluster.
+
+ Args:
+ machines: list of dict which contains clusterost attr to update.
+
+ Examples:
+ [{'machine_id': 1, 'name': 'host1'}]
+ """
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ if cluster.state.state == 'SUCCESSFUL':
+ cluster.state.state == 'UPDATE_PREPARING'
+ for machine_dict in machines:
+ _add_clusterhost(
+ cluster, session=session, user=user, **machine_dict
+ )
+
+
+def _remove_clusterhosts(cluster, hosts, session=None, user=None):
+ """Remove hosts from cluster.
+
+ Args:
+ hosts: list of host id.
+ """
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ utils.del_db_objects(
+ session, models.ClusterHost,
+ cluster_id=cluster.id, host_id=hosts
+ )
+
+
+def _set_clusterhosts(cluster, machines, session=None, user=None):
+ """set machines to cluster.
+
+ Args:
+ machines: list of dict which contains clusterost attr to update.
+
+ Examples:
+ [{'machine_id': 1, 'name': 'host1'}]
+ """
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ utils.del_db_objects(
+ session, models.ClusterHost,
+ cluster_id=cluster.id
+ )
+ if cluster.state.state == 'SUCCESSFUL':
+ cluster.state.state = 'UPDATE_PREPARING'
+ for machine_dict in machines:
+ _add_clusterhost(
+ cluster, True, session=session, user=user, **machine_dict
+ )
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_CLUSTERHOST_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def list_cluster_hosts(cluster_id, user=None, session=None, **filters):
+ """List clusterhosts of a cluster."""
+ cluster = _get_cluster(cluster_id, session=session)
+ return utils.list_db_objects(
+ session, models.ClusterHost, cluster_id=cluster.id,
+ **filters
+ )
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_CLUSTERHOST_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def list_clusterhosts(user=None, session=None, **filters):
+ """List all clusterhosts."""
+ return utils.list_db_objects(
+ session, models.ClusterHost, **filters
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def get_cluster_host(
+ cluster_id, host_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get clusterhost info by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session,
+ exception_when_missing=exception_when_missing,
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def get_clusterhost(
+ clusterhost_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get clusterhost info by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session,
+ exception_when_missing=exception_when_missing,
+ user=user
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def add_cluster_host(
+ cluster_id, exception_when_existing=True,
+ user=None, session=None, **kwargs
+):
+ """Add a host to a cluster."""
+ cluster = _get_cluster(cluster_id, session=session)
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ if cluster.state.state == 'SUCCESSFUL':
+ cluster.state.state = 'UPDATE_PREPARING'
+ return _add_clusterhost(
+ cluster, exception_when_existing,
+ session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_HOST_FIELDS,
+ ignore_support_keys=(
+ UPDATED_CLUSTERHOST_FIELDS +
+ PATCHED_CLUSTERHOST_FIELDS
+ )
+)
+def _update_host_if_necessary(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """Update underlying host if there is something to update."""
+ host = clusterhost.host
+ if kwargs:
+ # ignore update underlying host if the host is not editable.
+ from compass.db.api import host as host_api
+ if host_api.is_host_editable(
+ host, user=clusterhost.cluster.creator,
+ check_in_installing=kwargs.get('reinstall_os', False),
+ ):
+ utils.update_db_object(
+ session, host,
+ **kwargs
+ )
+ else:
+ logging.debug(
+ 'ignore update host %s since it is not editable' % host.name
+ )
+ else:
+ logging.debug(
+ 'nothing to update for host %s', host.name
+ )
+ return host
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_CLUSTERHOST_FIELDS +
+ PATCHED_CLUSTERHOST_FIELDS
+ ),
+ ignore_support_keys=UPDATED_HOST_FIELDS
+)
+@utils.input_validates_with_args(
+ roles=_clusterhost_roles_validates,
+ patched_roles=_clusterhost_roles_validates
+)
+def _update_clusterhost_only(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """Update clusterhost only."""
+ check_cluster_editable(clusterhost.cluster, user=user)
+ return utils.update_db_object(
+ session, clusterhost, **kwargs
+ )
+
+
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def _update_clusterhost(clusterhost, session=None, user=None, **kwargs):
+ """Update clusterhost and underlying host if necessary."""
+ _update_host_if_necessary(
+ clusterhost, session=session, user=user, **kwargs
+ )
+ return _update_clusterhost_only(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def update_cluster_host(
+ cluster_id, host_id, user=None,
+ session=None, **kwargs
+):
+ """Update clusterhost by cluster id and host id."""
+ logging.info('updating kwargs: %s', kwargs)
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def update_clusterhost(
+ clusterhost_id, user=None,
+ session=None, **kwargs
+):
+ """Update clusterhost by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace roles to patched_roles in kwargs.
+# It tells db roles field will be patched.
+@utils.replace_filters(
+ roles='patched_roles'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def patch_cluster_host(
+ cluster_id, host_id, user=None,
+ session=None, **kwargs
+):
+ """Patch clusterhost by cluster id and host id."""
+ logging.info("kwargs are %s", kwargs)
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ updated_clusterhost = _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+ return updated_clusterhost
+
+
+# replace roles to patched_roles in kwargs.
+# It tells db roles field will be patched.
+@utils.replace_filters(
+ roles='patched_roles'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def patch_clusterhost(
+ clusterhost_id, user=None, session=None,
+ **kwargs
+):
+ """Patch clusterhost by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTER_HOST
+)
+@utils.wrap_to_dict(
+ RESP_CLUSTERHOST_FIELDS + ['status', 'host'],
+ host=RESP_CLUSTERHOST_FIELDS
+)
+def _del_cluster_host(
+ clusterhost,
+ force=False, from_database_only=False,
+ delete_underlying_host=False, user=None,
+ session=None, **kwargs
+):
+ """delete clusterhost.
+
+ If force, the cluster host will be deleted anyway.
+ If from_database_only, the cluster host recored will only be
+ deleted from database. Otherwise a celery task sent to do
+ clean deletion.
+ If delete_underlying_host, the underlying host will also be deleted.
+ The backend will call _del_cluster_host again when the clusterhost is
+ deleted from os installer/package installer with from_database_only
+ set.
+ """
+ # force set clusterhost state to ERROR when we want to delete the
+ # clusterhost anyway even the clusterhost is in installing or already
+ # installed. It let the api know the deleting is in doing when backend
+ # is doing the real deleting. In future we may import a new state like
+ # INDELETE to indicate the deleting is processing.
+ # We need discuss about if we can delete a clusterhost when it is already
+ # installed by api.
+ if clusterhost.state.state != 'UNINITIALIZED' and force:
+ clusterhost.state.state = 'ERROR'
+ if not force:
+ check_cluster_editable(
+ clusterhost.cluster, user=user,
+ check_in_installing=True
+ )
+ # delete underlying host if delete_underlying_host is set.
+ if delete_underlying_host:
+ host = clusterhost.host
+ if host.state.state != 'UNINITIALIZED' and force:
+ host.state.state = 'ERROR'
+ # only delete the host when user have the permission to delete it.
+ import compass.db.api.host as host_api
+ if host_api.is_host_editable(
+ host, user=user,
+ check_in_installing=True
+ ):
+ # if there is no need to do the deletion by backend or
+ # from_database_only is set, we only delete the record
+ # in database.
+ if host.state.state == 'UNINITIALIZED' or from_database_only:
+ utils.del_db_object(
+ session, host
+ )
+
+ # if there is no need to do the deletion by backend or
+ # from_database_only is set, we only delete the record in database.
+ if clusterhost.state.state == 'UNINITIALIZED' or from_database_only:
+ return utils.del_db_object(
+ session, clusterhost
+ )
+ else:
+ logging.info(
+ 'send del cluster %s host %s task to celery',
+ clusterhost.cluster_id, clusterhost.host_id
+ )
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.delete_cluster_host',
+ (
+ user.email, clusterhost.cluster_id, clusterhost.host_id,
+ delete_underlying_host
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'delete action sent',
+ 'host': clusterhost,
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+def del_cluster_host(
+ cluster_id, host_id,
+ force=False, from_database_only=False,
+ delete_underlying_host=False, user=None,
+ session=None, **kwargs
+):
+ """Delete clusterhost by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _del_cluster_host(
+ clusterhost, force=force, from_database_only=from_database_only,
+ delete_underlying_host=delete_underlying_host, user=user,
+ session=session, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+def del_clusterhost(
+ clusterhost_id,
+ force=False, from_database_only=False,
+ delete_underlying_host=False, user=None,
+ session=None, **kwargs
+):
+ """Delete clusterhost by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _del_cluster_host(
+ clusterhost, force=force, from_database_only=from_database_only,
+ delete_underlying_host=delete_underlying_host, user=user,
+ session=session, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def get_cluster_host_config(
+ cluster_id, host_id, user=None,
+ session=None, **kwargs
+):
+ """Get clusterhost config by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS)
+def get_cluster_host_deployed_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost deployed config by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def get_clusterhost_config(clusterhost_id, user=None, session=None, **kwargs):
+ """Get clusterhost config by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS)
+def get_clusterhost_deployed_config(
+ clusterhost_id, user=None,
+ session=None, **kwargs
+):
+ """Get clusterhost deployed config by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ )
+
+
+def _clusterhost_os_config_validates(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Validate clusterhost's underlying host os config."""
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ host_api.check_host_editable(host, user=user)
+ metadata_api.validate_os_config(
+ config, host.os_id
+ )
+
+
+def _clusterhost_package_config_validates(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Validate clusterhost's cluster package config."""
+ cluster = clusterhost.cluster
+ check_cluster_editable(cluster, user=user)
+ metadata_api.validate_flavor_config(
+ config, cluster.flavor_id
+ )
+
+
+def _filter_clusterhost_host_editable(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Filter fields if the underlying host is not editable."""
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ return host_api.is_host_editable(host, user=user)
+
+
+@utils.input_filters(
+ put_os_config=_filter_clusterhost_host_editable,
+ patched_os_config=_filter_clusterhost_host_editable
+)
+@utils.input_validates_with_args(
+ put_os_config=_clusterhost_os_config_validates,
+ put_package_config=_clusterhost_package_config_validates
+)
+@utils.output_validates_with_args(
+ os_config=_clusterhost_os_config_validates,
+ package_config=_clusterhost_package_config_validates
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def _update_clusterhost_config(clusterhost, session=None, user=None, **kwargs):
+ """Update clusterhost config."""
+ return utils.update_db_object(
+ session, clusterhost, **kwargs
+ )
+
+
+def _clusterhost_host_validated(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost's underlying host is validated."""
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ host_api.check_host_editable(host, user=user)
+ host_api.check_host_validated(host)
+
+
+def _clusterhost_cluster_validated(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost's cluster is validated."""
+ cluster = clusterhost.cluster
+ check_cluster_editable(cluster, user=user)
+ check_clusterhost_validated(clusterhost)
+
+
+@utils.input_filters(
+ deployed_os_config=_filter_clusterhost_host_editable,
+)
+@utils.input_validates_with_args(
+ deployed_os_config=_clusterhost_host_validated,
+ deployed_package_config=_clusterhost_cluster_validated
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS)
+def _update_clusterhost_deployed_config(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """Update clusterhost deployed config."""
+ return utils.update_db_object(
+ session, clusterhost, **kwargs
+ )
+
+
+# replace os_config to put_os_config and
+# package_config to put_package_config in kwargs.
+# It tells db these fields will be updated not patched.
+@utils.replace_filters(
+ os_config='put_os_config',
+ package_config='put_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_cluster_host_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, user=user, session=session, **kwargs
+ )
+
+
+# replace os_config to deployed_os_config and
+# package_config to deployed_package_config in kwargs.
+@utils.replace_filters(
+ os_config='deployed_os_config',
+ package_config='deployed_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_cluster_host_deployed_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost deployed config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_deployed_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to put_os_config and
+# package_config to put_package_config in kwargs.
+# It tells db these fields will be updated not patched.
+@utils.replace_filters(
+ os_config='put_os_config',
+ package_config='put_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_clusterhost_config(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to deployed_os_config and
+# package_config to deployed_package_config in kwargs.
+@utils.replace_filters(
+ os_config='deployed_os_config',
+ package_config='deployed_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_clusterhost_deployed_config(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost deployed config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost_deployed_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to patched_os_config and
+# package_config to patched_package_config in kwargs
+# It tells db these fields will be patched not updated.
+@utils.replace_filters(
+ os_config='patched_os_config',
+ package_config='patched_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def patch_cluster_host_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """patch clusterhost config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to patched_os_config and
+# package_config to patched_package_config in kwargs
+# It tells db these fields will be patched not updated.
+@utils.replace_filters(
+ os_config='patched_os_config',
+ package_config='patched_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def patch_clusterhost_config(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """patch clusterhost config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+def _clusterhost_host_editable(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost underlying host is editable."""
+ from compass.db.api import host as host_api
+ host_api.check_host_editable(clusterhost.host, user=user)
+
+
+def _clusterhost_cluster_editable(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost's cluster is editable."""
+ check_cluster_editable(clusterhost.cluster, user=user)
+
+
+@utils.supported_filters(
+ optional_support_keys=['os_config', 'package_config']
+)
+@utils.input_filters(
+ os_config=_filter_clusterhost_host_editable,
+)
+@utils.output_validates_with_args(
+ package_config=_clusterhost_cluster_editable
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def _delete_clusterhost_config(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """delete clusterhost config."""
+ return utils.update_db_object(
+ session, clusterhost, config_validated=False,
+ **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTERHOST_CONFIG
+)
+def delete_cluster_host_config(
+ cluster_id, host_id, user=None, session=None
+):
+ """Delete a clusterhost config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _delete_clusterhost_config(
+ clusterhost, session=session, user=user,
+ os_config={}, package_config={}
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def delete_clusterhost_config(clusterhost_id, user=None, session=None):
+ """Delet a clusterhost config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _delete_clusterhost_config(
+ clusterhost, session=session, user=user,
+ os_config={}, package_config={}
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=['add_hosts', 'remove_hosts', 'set_hosts']
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+@utils.wrap_to_dict(
+ ['hosts'],
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def update_cluster_hosts(
+ cluster_id, add_hosts={}, set_hosts=None,
+ remove_hosts={}, user=None, session=None
+):
+ """Update cluster hosts."""
+ cluster = _get_cluster(cluster_id, session=session)
+ if remove_hosts:
+ _remove_clusterhosts(
+ cluster, session=session, user=user, **remove_hosts
+ )
+ if add_hosts:
+ _add_clusterhosts(
+ cluster, session=session, user=user, **add_hosts
+ )
+ if set_hosts is not None:
+ _set_clusterhosts(
+ cluster, session=session, user=user, **set_hosts
+ )
+
+ return {
+ 'hosts': list_cluster_hosts(cluster_id, session=session)
+ }
+
+
+def validate_clusterhost(clusterhost, session=None):
+ """validate clusterhost."""
+ roles = clusterhost.roles
+ if not roles:
+ if clusterhost.cluster.flavor_name:
+ raise exception.InvalidParameter(
+ 'empty roles for clusterhost %s' % clusterhost.name
+ )
+
+
+def validate_cluster(cluster, session=None):
+ """Validate cluster."""
+ if not cluster.clusterhosts:
+ raise exception.InvalidParameter(
+ 'cluster %s does not have any hosts' % cluster.name
+ )
+ if cluster.flavor_name:
+ cluster_roles = cluster.flavor['roles']
+ else:
+ cluster_roles = []
+ necessary_roles = set([
+ role['name'] for role in cluster_roles if not role.get('optional')
+ ])
+ clusterhost_roles = set([])
+ interface_subnets = {}
+ for clusterhost in cluster.clusterhosts:
+ roles = clusterhost.roles
+ for role in roles:
+ clusterhost_roles.add(role['name'])
+ host = clusterhost.host
+ for host_network in host.host_networks:
+ interface_subnets.setdefault(
+ host_network.interface, set([])
+ ).add(host_network.subnet.subnet)
+ missing_roles = necessary_roles - clusterhost_roles
+ if missing_roles:
+ raise exception.InvalidParameter(
+ 'cluster %s have some roles %s not assigned to any host' % (
+ cluster.name, list(missing_roles)
+ )
+ )
+ for interface, subnets in interface_subnets.items():
+ if len(subnets) > 1:
+ raise exception.InvalidParameter(
+ 'cluster %s multi subnets %s in interface %s' % (
+ cluster.name, list(subnets), interface
+ )
+ )
+
+
+@utils.supported_filters(optional_support_keys=['review'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_REVIEW_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_REVIEW_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_CONFIG_FIELDS
+)
+def review_cluster(cluster_id, review={}, user=None, session=None, **kwargs):
+ """review cluster.
+
+ Args:
+ cluster_id: the cluster id.
+ review: dict contains hosts to be reviewed. either contains key
+ hosts or clusterhosts. where hosts is a list of host id,
+ clusterhosts is a list of clusterhost id.
+ """
+ from compass.db.api import host as host_api
+ cluster = _get_cluster(cluster_id, session=session)
+ check_cluster_editable(cluster, user=user)
+ host_ids = review.get('hosts', [])
+ clusterhost_ids = review.get('clusterhosts', [])
+ clusterhosts = []
+ # Get clusterhosts need to be reviewed.
+ for clusterhost in cluster.clusterhosts:
+ if (
+ clusterhost.clusterhost_id in clusterhost_ids or
+ clusterhost.host_id in host_ids
+ ):
+ clusterhosts.append(clusterhost)
+
+ os_config = copy.deepcopy(cluster.os_config)
+ os_config = metadata_api.autofill_os_config(
+ os_config, cluster.os_id, cluster=cluster
+ )
+ metadata_api.validate_os_config(
+ os_config, cluster.os_id, True
+ )
+ for clusterhost in clusterhosts:
+ host = clusterhost.host
+ # ignore underlying host os config validation
+ # since the host is not editable
+ if not host_api.is_host_editable(
+ host, user=user, check_in_installing=False
+ ):
+ logging.info(
+ 'ignore update host %s config '
+ 'since it is not editable' % host.name
+ )
+ continue
+ host_os_config = copy.deepcopy(host.os_config)
+ host_os_config = metadata_api.autofill_os_config(
+ host_os_config, host.os_id,
+ host=host
+ )
+ deployed_os_config = util.merge_dict(
+ os_config, host_os_config
+ )
+ metadata_api.validate_os_config(
+ deployed_os_config, host.os_id, True
+ )
+ host_api.validate_host(host)
+ utils.update_db_object(
+ session, host, os_config=host_os_config, config_validated=True
+ )
+
+ package_config = copy.deepcopy(cluster.package_config)
+ if cluster.flavor_name:
+ package_config = metadata_api.autofill_flavor_config(
+ package_config, cluster.flavor_id,
+ cluster=cluster
+ )
+ metadata_api.validate_flavor_config(
+ package_config, cluster.flavor_id, True
+ )
+ for clusterhost in clusterhosts:
+ clusterhost_package_config = copy.deepcopy(
+ clusterhost.package_config
+ )
+ clusterhost_package_config = (
+ metadata_api.autofill_flavor_config(
+ clusterhost_package_config,
+ cluster.flavor_id,
+ clusterhost=clusterhost
+ )
+ )
+ deployed_package_config = util.merge_dict(
+ package_config, clusterhost_package_config
+ )
+ metadata_api.validate_flavor_config(
+ deployed_package_config,
+ cluster.flavor_id, True
+ )
+ validate_clusterhost(clusterhost, session=session)
+ utils.update_db_object(
+ session, clusterhost,
+ package_config=clusterhost_package_config,
+ config_validated=True
+ )
+
+ validate_cluster(cluster, session=session)
+ utils.update_db_object(
+ session, cluster, os_config=os_config, package_config=package_config,
+ config_validated=True
+ )
+ return {
+ 'cluster': cluster,
+ 'hosts': clusterhosts
+ }
+
+
+@utils.supported_filters(optional_support_keys=['deploy'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def deploy_cluster(
+ cluster_id, deploy={}, user=None, session=None, **kwargs
+):
+ """deploy cluster.
+
+ Args:
+ cluster_id: cluster id.
+ deploy: dict contains key either hosts or clusterhosts.
+ deploy['hosts'] is a list of host id,
+ deploy['clusterhosts'] is a list of clusterhost id.
+ """
+ from compass.db.api import host as host_api
+ from compass.tasks import client as celery_client
+ cluster = _get_cluster(cluster_id, session=session)
+ host_ids = deploy.get('hosts', [])
+ clusterhost_ids = deploy.get('clusterhosts', [])
+ clusterhosts = []
+ # get clusterhost to deploy.
+ for clusterhost in cluster.clusterhosts:
+ if (
+ clusterhost.clusterhost_id in clusterhost_ids or
+ clusterhost.host_id in host_ids
+ ):
+ clusterhosts.append(clusterhost)
+ check_cluster_editable(cluster, user=user)
+ check_cluster_validated(cluster)
+ utils.update_db_object(session, cluster.state, state='INITIALIZED')
+ for clusterhost in clusterhosts:
+ host = clusterhost.host
+ # ignore checking if underlying host is validated if
+ # the host is not editable.
+ if host_api.is_host_editable(host, user=user):
+ host_api.check_host_validated(host)
+ utils.update_db_object(session, host.state, state='INITIALIZED')
+ if cluster.flavor_name:
+ check_clusterhost_validated(clusterhost)
+ utils.update_db_object(
+ session, clusterhost.state, state='INITIALIZED'
+ )
+
+ celery_client.celery.send_task(
+ 'compass.tasks.deploy_cluster',
+ (
+ user.email, cluster_id,
+ [clusterhost.host_id for clusterhost in clusterhosts]
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'deploy action sent',
+ 'cluster': cluster,
+ 'hosts': clusterhosts
+ }
+
+
+@utils.supported_filters(optional_support_keys=['redeploy'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def redeploy_cluster(
+ cluster_id, deploy={}, user=None, session=None, **kwargs
+):
+ """redeploy cluster.
+
+ Args:
+ cluster_id: cluster id.
+ """
+ from compass.db.api import host as host_api
+ from compass.tasks import client as celery_client
+ cluster = _get_cluster(cluster_id, session=session)
+
+ check_cluster_editable(cluster, user=user)
+ check_cluster_validated(cluster)
+ utils.update_db_object(
+ session, cluster.state,
+ state='INITIALIZED',
+ percentage=0,
+ ready=False
+ )
+ for clusterhost in cluster.clusterhosts:
+ host = clusterhost.host
+ # ignore checking if underlying host is validated if
+ # the host is not editable.
+ host_api.check_host_validated(host)
+ utils.update_db_object(
+ session, host.state,
+ state='INITIALIZED',
+ percentage=0,
+ ready=False
+ )
+ if cluster.flavor_name:
+ check_clusterhost_validated(clusterhost)
+ utils.update_db_object(
+ session,
+ clusterhost.state,
+ state='INITIALIZED',
+ percentage=0,
+ ready=False
+ )
+
+ celery_client.celery.send_task(
+ 'compass.tasks.redeploy_cluster',
+ (
+ user.email, cluster_id
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'redeploy action sent',
+ 'cluster': cluster
+ }
+
+
+@utils.supported_filters(optional_support_keys=['apply_patch'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def patch_cluster(cluster_id, user=None, session=None, **kwargs):
+
+ from compass.tasks import client as celery_client
+
+ cluster = _get_cluster(cluster_id, session=session)
+ celery_client.celery.send_task(
+ 'compass.tasks.patch_cluster',
+ (
+ user.email, cluster_id,
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'patch action sent',
+ 'cluster': cluster
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTER_STATE
+)
+@utils.wrap_to_dict(RESP_STATE_FIELDS)
+def get_cluster_state(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster state info."""
+ return _get_cluster(cluster_id, session=session).state_dict()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_cluster_host_state(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost state merged with underlying host state."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ ).state_dict()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_cluster_host_self_state(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost itself state."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ ).state
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_clusterhost_state(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost state merged with underlying host state."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ ).state_dict()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_clusterhost_self_state(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost itself state."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ ).state
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def update_cluster_host_state(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Update a clusterhost itself state."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ # Modify(harry): without progress_update.py to update cluster state
+ # update cluster state here
+ cluster = _get_cluster(clusterhost.cluster_id, session=session)
+ utils.update_db_object(session, clusterhost.state, **kwargs)
+ utils.update_db_object(session, cluster.state, **kwargs)
+ return clusterhost.state_dict()
+
+
+def _update_clusterhost_state(
+ clusterhost, from_database_only=False,
+ session=None, user=None, **kwargs
+):
+ """Update clusterhost state.
+
+ If from_database_only, the state will only be updated in database.
+ Otherwise a task sent to celery and os installer/package installer
+ will also update its state if needed.
+ """
+ if 'ready' in kwargs and kwargs['ready'] and not clusterhost.state.ready:
+ ready_triggered = True
+ else:
+ ready_triggered = False
+ cluster_ready = False
+ host = clusterhost.host
+ cluster = clusterhost.cluster
+ host_ready = not host.state.ready
+ if ready_triggered:
+ cluster_ready = True
+ for clusterhost_in_cluster in cluster.clusterhosts:
+ if (
+ clusterhost_in_cluster.clusterhost_id
+ == clusterhost.clusterhost_id
+ ):
+ continue
+ if not clusterhost_in_cluster.state.ready:
+ cluster_ready = False
+
+ logging.info(
+ 'clusterhost %s ready: %s',
+ clusterhost.name, ready_triggered
+ )
+ logging.info('cluster ready: %s', cluster_ready)
+ logging.info('host ready: %s', host_ready)
+ if not ready_triggered or from_database_only:
+ logging.info('%s state is set to %s', clusterhost.name, kwargs)
+ utils.update_db_object(session, clusterhost.state, **kwargs)
+ if not clusterhost.state.ready:
+ logging.info('%s state ready is set to False', cluster.name)
+ utils.update_db_object(session, cluster.state, ready=False)
+ status = '%s state is updated' % clusterhost.name
+ else:
+ if not user:
+ user_id = cluster.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.package_installed',
+ (
+ clusterhost.cluster_id, clusterhost.host_id,
+ cluster_ready, host_ready
+ ),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ status = '%s: cluster ready %s host ready %s' % (
+ clusterhost.name, cluster_ready, host_ready
+ )
+ logging.info('action status: %s', status)
+ return {
+ 'status': status,
+ 'clusterhost': clusterhost.state_dict()
+ }
+
+
+@util.deprecated
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(['status', 'clusterhost'])
+def update_cluster_host_state_internal(
+ cluster_id, host_id, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Update a clusterhost state by installation process."""
+ # TODO(xicheng): it should be merged into update_cluster_host_state
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_state(
+ clusterhost, from_database_only=from_database_only,
+ session=session, users=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def update_clusterhost_state(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Update a clusterhost itself state."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ # Modify(harry): without progress_update.py to update cluster state
+ # update cluster state here
+ cluster = _get_cluster(clusterhost.cluster_id, session=session)
+ utils.update_db_object(session, clusterhost.state, **kwargs)
+ utils.update_db_object(session, cluster.state, **kwargs)
+ return clusterhost.state_dict()
+
+
+@util.deprecated
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(['status', 'clusterhost'])
+def update_clusterhost_state_internal(
+ clusterhost_id, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Update a clusterhost state by installation process."""
+ # TODO(xicheng): it should be merged into update_clusterhost_state
+ clusterhost = _get_clusterhost(clusterhost_id, session=session)
+ return _update_clusterhost_state(
+ clusterhost, from_database_only=from_database_only,
+ session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTER_STATE_FIELDS,
+ ignore_support_keys=(IGNORE_FIELDS + IGNORE_UPDATED_CLUSTER_STATE_FIELDS)
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_STATE
+)
+@utils.wrap_to_dict(RESP_STATE_FIELDS)
+def update_cluster_state(
+ cluster_id, user=None, session=None, **kwargs
+):
+ """Update a cluster state."""
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ utils.update_db_object(session, cluster.state, **kwargs)
+ return cluster.state_dict()
+
+
+@util.deprecated
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTER_STATE_INTERNAL_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_STATE
+)
+@utils.wrap_to_dict(['status', 'cluster'])
+def update_cluster_state_internal(
+ cluster_id, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Update a cluster state by installation process.
+
+ If from_database_only, the state will only be updated in database.
+ Otherwise a task sent to do state update in os installer and
+ package installer.
+ """
+ # TODO(xicheng): it should be merged into update_cluster_state
+ cluster = _get_cluster(cluster_id, session=session)
+ if 'ready' in kwargs and kwargs['ready'] and not cluster.state.ready:
+ ready_triggered = True
+ else:
+ ready_triggered = False
+ clusterhost_ready = {}
+ if ready_triggered:
+ for clusterhost in cluster.clusterhosts:
+ clusterhost_ready[clusterhost.host_id] = (
+ not clusterhost.state.ready
+ )
+
+ logging.info('cluster %s ready: %s', cluster_id, ready_triggered)
+ logging.info('clusterhost ready: %s', clusterhost_ready)
+
+ if not ready_triggered or from_database_only:
+ logging.info('%s state is set to %s', cluster.name, kwargs)
+ utils.update_db_object(session, cluster.state, **kwargs)
+ if not cluster.state.ready:
+ for clusterhost in cluster.clusterhosts:
+ logging.info('%s state ready is to False', clusterhost.name)
+ utils.update_db_object(
+ session, clusterhost.state, ready=False
+ )
+ status = '%s state is updated' % cluster.name
+ else:
+ if not user:
+ user_id = cluster.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.cluster_installed',
+ (clusterhost.cluster_id, clusterhost_ready),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ status = '%s installed action set clusterhost ready %s' % (
+ cluster.name, clusterhost_ready
+ )
+ logging.info('action status: %s', status)
+ return {
+ 'status': status,
+ 'cluster': cluster.state_dict()
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_cluster_host_log_histories(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost log history by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ ).log_histories
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_clusterhost_log_histories(
+ clusterhost_id, user=None,
+ session=None, **kwargs
+):
+ """Get clusterhost log history by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ ).log_histories
+
+
+def _get_cluster_host_log_history(
+ cluster_id, host_id, filename, session=None, **kwargs
+):
+ """Get clusterhost log history by cluster id, host id and filename."""
+ clusterhost = _get_cluster_host(cluster_id, host_id, session=session)
+ return utils.get_db_object(
+ session, models.ClusterHostLogHistory,
+ clusterhost_id=clusterhost.clusterhost_id, filename=filename,
+ **kwargs
+ )
+
+
+def _get_clusterhost_log_history(
+ clusterhost_id, filename, session=None, **kwargs
+):
+ """Get clusterhost log history by clusterhost id and filename."""
+ clusterhost = _get_clusterhost(clusterhost_id, session=session)
+ return utils.get_db_object(
+ session, models.ClusterHostLogHistory,
+ clusterhost_id=clusterhost.clusterhost_id, filename=filename,
+ **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_cluster_host_log_history(
+ cluster_id, host_id, filename, user=None, session=None, **kwargs
+):
+ """Get clusterhost log history by cluster id, host id and filename."""
+ return _get_cluster_host_log_history(
+ cluster_id, host_id, filename, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_clusterhost_log_history(
+ clusterhost_id, filename, user=None, session=None, **kwargs
+):
+ """Get host log history by clusterhost id and filename."""
+ return _get_clusterhost_log_history(
+ clusterhost_id, filename, session=session
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def update_cluster_host_log_history(
+ cluster_id, host_id, filename, user=None, session=None, **kwargs
+):
+ """Update a host log history by cluster id, host id and filename."""
+ cluster_host_log_history = _get_cluster_host_log_history(
+ cluster_id, host_id, filename, session=session
+ )
+ return utils.update_db_object(
+ session, cluster_host_log_history, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def update_clusterhost_log_history(
+ clusterhost_id, filename, user=None, session=None, **kwargs
+):
+ """Update a host log history by clusterhost id and filename."""
+ clusterhost_log_history = _get_clusterhost_log_history(
+ clusterhost_id, filename, session=session
+ )
+ return utils.update_db_object(session, clusterhost_log_history, **kwargs)
+
+
+@utils.supported_filters(
+ ADDED_CLUSTERHOST_LOG_FIELDS,
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def add_clusterhost_log_history(
+ clusterhost_id, exception_when_existing=False,
+ filename=None, user=None, session=None, **kwargs
+):
+ """add a host log history by clusterhost id and filename."""
+ clusterhost = _get_clusterhost(clusterhost_id, session=session)
+ return utils.add_db_object(
+ session, models.ClusterHostLogHistory,
+ exception_when_existing,
+ clusterhost.clusterhost_id, filename, **kwargs
+ )
+
+
+@utils.supported_filters(
+ ADDED_CLUSTERHOST_LOG_FIELDS,
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def add_cluster_host_log_history(
+ cluster_id, host_id, exception_when_existing=False,
+ filename=None, user=None, session=None, **kwargs
+):
+ """add a host log history by cluster id, host id and filename."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return utils.add_db_object(
+ session, models.ClusterHostLogHistory, exception_when_existing,
+ clusterhost.clusterhost_id, filename, **kwargs
+ )
diff --git a/compass-deck/db/api/database.py b/compass-deck/db/api/database.py
new file mode 100644
index 0000000..49769d7
--- /dev/null
+++ b/compass-deck/db/api/database.py
@@ -0,0 +1,264 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Provider interface to manipulate database."""
+import functools
+import logging
+import netaddr
+
+from contextlib import contextmanager
+from sqlalchemy import create_engine
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.exc import OperationalError
+from sqlalchemy.orm import scoped_session
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.pool import NullPool
+from sqlalchemy.pool import QueuePool
+from sqlalchemy.pool import SingletonThreadPool
+from sqlalchemy.pool import StaticPool
+from threading import local
+
+from compass.db import exception
+from compass.db import models
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+ENGINE = None
+SESSION = sessionmaker(autocommit=False, autoflush=False)
+SCOPED_SESSION = None
+SESSION_HOLDER = local()
+
+POOL_MAPPING = {
+ 'instant': NullPool,
+ 'static': StaticPool,
+ 'queued': QueuePool,
+ 'thread_single': SingletonThreadPool
+}
+
+
+def init(database_url=None):
+ """Initialize database.
+
+ Adjust sqlalchemy logging if necessary.
+
+ :param database_url: string, database url.
+ """
+ global ENGINE
+ global SCOPED_SESSION
+ if not database_url:
+ database_url = setting.SQLALCHEMY_DATABASE_URI
+ logging.info('init database %s', database_url)
+ root_logger = logging.getLogger()
+ fine_debug = root_logger.isEnabledFor(logsetting.LOGLEVEL_MAPPING['fine'])
+ if fine_debug:
+ logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
+ finest_debug = root_logger.isEnabledFor(
+ logsetting.LOGLEVEL_MAPPING['finest']
+ )
+ if finest_debug:
+ logging.getLogger('sqlalchemy.dialects').setLevel(logging.INFO)
+ logging.getLogger('sqlalchemy.pool').setLevel(logging.INFO)
+ logging.getLogger('sqlalchemy.orm').setLevel(logging.INFO)
+ poolclass = POOL_MAPPING[setting.SQLALCHEMY_DATABASE_POOL_TYPE]
+ ENGINE = create_engine(
+ database_url, convert_unicode=True,
+ poolclass=poolclass
+ )
+ SESSION.configure(bind=ENGINE)
+ SCOPED_SESSION = scoped_session(SESSION)
+ models.BASE.query = SCOPED_SESSION.query_property()
+
+
+def in_session():
+ """check if in database session scope."""
+ bool(hasattr(SESSION_HOLDER, 'session'))
+
+
+@contextmanager
+def session(exception_when_in_session=True):
+ """database session scope.
+
+ To operate database, it should be called in database session.
+ If not exception_when_in_session, the with session statement support
+ nested session and only the out most session commit/rollback the
+ transaction.
+ """
+ if not ENGINE:
+ init()
+
+ nested_session = False
+ if hasattr(SESSION_HOLDER, 'session'):
+ if exception_when_in_session:
+ logging.error('we are already in session')
+ raise exception.DatabaseException('session already exist')
+ else:
+ new_session = SESSION_HOLDER.session
+ nested_session = True
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'reuse session %s', nested_session
+ )
+ else:
+ new_session = SCOPED_SESSION()
+ setattr(SESSION_HOLDER, 'session', new_session)
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'enter session %s', new_session
+ )
+ try:
+ yield new_session
+ if not nested_session:
+ new_session.commit()
+ except Exception as error:
+ if not nested_session:
+ new_session.rollback()
+ logging.error('failed to commit session')
+ logging.exception(error)
+ if isinstance(error, IntegrityError):
+ for item in error.statement.split():
+ if item.islower():
+ object = item
+ break
+ raise exception.DuplicatedRecord(
+ '%s in %s' % (error.orig, object)
+ )
+ elif isinstance(error, OperationalError):
+ raise exception.DatabaseException(
+ 'operation error in database'
+ )
+ elif isinstance(error, exception.DatabaseException):
+ raise error
+ else:
+ raise exception.DatabaseException(str(error))
+ finally:
+ if not nested_session:
+ new_session.close()
+ SCOPED_SESSION.remove()
+ delattr(SESSION_HOLDER, 'session')
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'exit session %s', new_session
+ )
+
+
+def current_session():
+ """Get the current session scope when it is called.
+
+ :return: database session.
+ :raises: DatabaseException when it is not in session.
+ """
+ try:
+ return SESSION_HOLDER.session
+ except Exception as error:
+ logging.error('It is not in the session scope')
+ logging.exception(error)
+ if isinstance(error, exception.DatabaseException):
+ raise error
+ else:
+ raise exception.DatabaseException(str(error))
+
+
+def run_in_session(exception_when_in_session=True):
+ """Decorator to make sure the decorated function run in session.
+
+ When not exception_when_in_session, the run_in_session can be
+ decorated several times.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ my_session = kwargs.get('session')
+ if my_session is not None:
+ return func(*args, **kwargs)
+ else:
+ with session(
+ exception_when_in_session=exception_when_in_session
+ ) as my_session:
+ kwargs['session'] = my_session
+ return func(*args, **kwargs)
+ except Exception as error:
+ logging.error(
+ 'got exception with func %s args %s kwargs %s',
+ func, args, kwargs
+ )
+ logging.exception(error)
+ raise error
+ return wrapper
+ return decorator
+
+
+def _setup_user_table(user_session):
+ """Initialize user table with default user."""
+ logging.info('setup user table')
+ from compass.db.api import user
+ user.add_user(
+ session=user_session,
+ email=setting.COMPASS_ADMIN_EMAIL,
+ password=setting.COMPASS_ADMIN_PASSWORD,
+ is_admin=True
+ )
+
+
+def _setup_permission_table(permission_session):
+ """Initialize permission table."""
+ logging.info('setup permission table.')
+ from compass.db.api import permission
+ permission.add_permissions_internal(
+ session=permission_session
+ )
+
+
+def _setup_switch_table(switch_session):
+ """Initialize switch table."""
+ # TODO(xicheng): deprecate setup default switch.
+ logging.info('setup switch table')
+ from compass.db.api import switch
+ switch.add_switch(
+ True, setting.DEFAULT_SWITCH_IP,
+ session=switch_session,
+ machine_filters=['allow ports all']
+ )
+
+
+def _update_others(other_session):
+ """Update other tables."""
+ logging.info('update other tables')
+ from compass.db.api import utils
+ from compass.db import models
+ utils.update_db_objects(
+ other_session, models.Cluster
+ )
+ utils.update_db_objects(
+ other_session, models.Host
+ )
+ utils.update_db_objects(
+ other_session, models.ClusterHost
+ )
+
+
+@run_in_session()
+def create_db(session=None):
+ """Create database."""
+ models.BASE.metadata.create_all(bind=ENGINE)
+ _setup_permission_table(session)
+ _setup_user_table(session)
+ _setup_switch_table(session)
+ _update_others(session)
+
+
+def drop_db():
+ """Drop database."""
+ models.BASE.metadata.drop_all(bind=ENGINE)
diff --git a/compass-deck/db/api/health_check_report.py b/compass-deck/db/api/health_check_report.py
new file mode 100644
index 0000000..aaea7a7
--- /dev/null
+++ b/compass-deck/db/api/health_check_report.py
@@ -0,0 +1,190 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Cluster health check report."""
+import logging
+
+from compass.db.api import cluster as cluster_api
+from compass.db.api import database
+from compass.db.api import host as host_api
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+
+REQUIRED_INSERT_FIELDS = ['name']
+OPTIONAL_INSERT_FIELDS = [
+ 'display_name', 'report', 'category', 'state', 'error_message'
+]
+UPDATE_FIELDS = ['report', 'state', 'error_message']
+RESP_FIELDS = [
+ 'cluster_id', 'name', 'display_name', 'report',
+ 'category', 'state', 'error_message'
+]
+RESP_ACTION_FIELDS = ['cluster_id', 'status']
+
+
+@utils.supported_filters(REQUIRED_INSERT_FIELDS, OPTIONAL_INSERT_FIELDS)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_report_record(cluster_id, name=None, report={},
+ state='verifying', session=None, **kwargs):
+ """Create a health check report record."""
+ # Replace any white space into '-'
+ words = name.split()
+ name = '-'.join(words)
+ cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
+ return utils.add_db_object(
+ session, models.HealthCheckReport, True, cluster.id, name,
+ report=report, state=state, **kwargs
+ )
+
+
+def _get_report(cluster_id, name, session=None):
+ cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
+ return utils.get_db_object(
+ session, models.HealthCheckReport, cluster_id=cluster.id, name=name
+ )
+
+
+@utils.supported_filters(UPDATE_FIELDS)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_report(cluster_id, name, session=None, **kwargs):
+ """Update health check report."""
+ report = _get_report(cluster_id, name, session=session)
+ if report.state == 'finished':
+ err_msg = 'Report cannot be updated if state is in "finished"'
+ raise exception.Forbidden(err_msg)
+
+ return utils.update_db_object(session, report, **kwargs)
+
+
+@utils.supported_filters(UPDATE_FIELDS)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_multi_reports(cluster_id, session=None, **kwargs):
+ """Bulk update reports."""
+ # TODO(grace): rename the fuction if needed to reflect the fact.
+ return set_error(cluster_id, session=session, **kwargs)
+
+
+def set_error(cluster_id, report={}, session=None,
+ state='error', error_message=None):
+ cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
+ logging.debug(
+ "updates all reports as %s in cluster %s",
+ state, cluster_id
+ )
+ return utils.update_db_objects(
+ session, models.HealthCheckReport,
+ updates={
+ 'report': {},
+ 'state': 'error',
+ 'error_message': error_message
+ }, cluster_id=cluster.id
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HEALTH_REPORT
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_health_reports(cluster_id, user=None, session=None):
+ """List all reports in the specified cluster."""
+ cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
+ return utils.list_db_objects(
+ session, models.HealthCheckReport, cluster_id=cluster.id
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_HEALTH_REPORT
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_health_report(cluster_id, name, user=None, session=None):
+ return _get_report(
+ cluster_id, name, session=session
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DELETE_REPORT
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def delete_reports(cluster_id, name=None, user=None, session=None):
+ # TODO(grace): better to separate this function into two.
+ # One is to delete a report of a cluster, the other to delete all
+ # reports under a cluster.
+ if name:
+ report = _get_report(cluster_id, name, session=session)
+ return utils.del_db_object(session, report)
+ else:
+ cluster = cluster_api.get_cluster_internal(
+ cluster_id, session=session
+ )
+ return utils.del_db_objects(
+ session, models.HealthCheckReport, cluster_id=cluster.id
+ )
+
+
+@utils.supported_filters(optional_support_keys=['check_health'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_CHECK_CLUSTER_HEALTH
+)
+@utils.wrap_to_dict(RESP_ACTION_FIELDS)
+def start_check_cluster_health(cluster_id, send_report_url,
+ user=None, session=None, check_health={}):
+ """Start to check cluster health."""
+ cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
+
+ if cluster.state.state != 'SUCCESSFUL':
+ logging.debug("state is %s" % cluster.state.state)
+ err_msg = "Healthcheck starts only after cluster finished deployment!"
+ raise exception.Forbidden(err_msg)
+
+ reports = utils.list_db_objects(
+ session, models.HealthCheckReport,
+ cluster_id=cluster.id, state='verifying'
+ )
+ if reports:
+ err_msg = 'Healthcheck in progress, please wait for it to complete!'
+ raise exception.Forbidden(err_msg)
+
+ # Clear all preivous report
+ # TODO(grace): the delete should be moved into celery task.
+ # We should consider the case that celery task is down.
+ utils.del_db_objects(
+ session, models.HealthCheckReport, cluster_id=cluster.id
+ )
+
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.cluster_health',
+ (cluster.id, send_report_url, user.email),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ "cluster_id": cluster.id,
+ "status": "start to check cluster health."
+ }
diff --git a/compass-deck/db/api/host.py b/compass-deck/db/api/host.py
new file mode 100644
index 0000000..15e0bb6
--- /dev/null
+++ b/compass-deck/db/api/host.py
@@ -0,0 +1,1120 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Host database operations."""
+import functools
+import logging
+import netaddr
+import re
+
+from compass.db.api import database
+from compass.db.api import metadata_holder as metadata_api
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = ['name', 'os_name', 'owner', 'mac', 'id']
+SUPPORTED_MACHINE_HOST_FIELDS = [
+ 'mac', 'tag', 'location', 'os_name', 'os_id'
+]
+SUPPORTED_NETOWORK_FIELDS = [
+ 'interface', 'ip', 'is_mgmt', 'is_promiscuous'
+]
+RESP_FIELDS = [
+ 'id', 'name', 'hostname', 'os_name', 'owner', 'mac',
+ 'switch_ip', 'port', 'switches', 'os_installer', 'os_id', 'ip',
+ 'reinstall_os', 'os_installed', 'tag', 'location', 'networks',
+ 'created_at', 'updated_at'
+]
+RESP_CLUSTER_FIELDS = [
+ 'id', 'name', 'os_name', 'reinstall_distributed_system',
+ 'owner', 'adapter_name', 'flavor_name',
+ 'distributed_system_installed', 'created_at', 'updated_at'
+]
+RESP_NETWORK_FIELDS = [
+ 'id', 'ip', 'interface', 'netmask', 'is_mgmt', 'is_promiscuous',
+ 'created_at', 'updated_at'
+]
+RESP_CONFIG_FIELDS = [
+ 'os_config',
+ 'config_setp',
+ 'config_validated',
+ 'networks',
+ 'created_at',
+ 'updated_at'
+]
+RESP_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config'
+]
+RESP_DEPLOY_FIELDS = [
+ 'status', 'host'
+]
+UPDATED_FIELDS = ['name', 'reinstall_os']
+UPDATED_CONFIG_FIELDS = [
+ 'put_os_config'
+]
+PATCHED_CONFIG_FIELDS = [
+ 'patched_os_config'
+]
+UPDATED_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config'
+]
+ADDED_NETWORK_FIELDS = [
+ 'interface', 'ip', 'subnet_id'
+]
+OPTIONAL_ADDED_NETWORK_FIELDS = ['is_mgmt', 'is_promiscuous']
+UPDATED_NETWORK_FIELDS = [
+ 'interface', 'ip', 'subnet_id', 'subnet', 'is_mgmt',
+ 'is_promiscuous'
+]
+IGNORE_FIELDS = [
+ 'id', 'created_at', 'updated_at'
+]
+RESP_STATE_FIELDS = [
+ 'id', 'state', 'percentage', 'message', 'severity', 'ready'
+]
+UPDATED_STATE_FIELDS = [
+ 'state', 'percentage', 'message', 'severity'
+]
+UPDATED_STATE_INTERNAL_FIELDS = [
+ 'ready'
+]
+RESP_LOG_FIELDS = [
+ 'id', 'filename', 'position', 'partial_line', 'percentage',
+ 'message', 'severity', 'line_matcher_name'
+]
+ADDED_LOG_FIELDS = [
+ 'filename'
+]
+UPDATED_LOG_FIELDS = [
+ 'position', 'partial_line', 'percentage',
+ 'message', 'severity', 'line_matcher_name'
+]
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOSTS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_hosts(user=None, session=None, **filters):
+ """List hosts."""
+ return utils.list_db_objects(
+ session, models.Host, **filters
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_MACHINE_HOST_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOSTS
+)
+@utils.output_filters(
+ missing_ok=True,
+ tag=utils.general_filter_callback,
+ location=utils.general_filter_callback,
+ os_name=utils.general_filter_callback,
+ os_id=utils.general_filter_callback
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_machines_or_hosts(user=None, session=None, **filters):
+ """List machines or hosts if possible."""
+ machines = utils.list_db_objects(
+ session, models.Machine, **filters
+ )
+ machines_or_hosts = []
+ for machine in machines:
+ host = machine.host
+ if host:
+ machines_or_hosts.append(host)
+ else:
+ machines_or_hosts.append(machine)
+ return machines_or_hosts
+
+
+def _get_host(host_id, session=None, **kwargs):
+ """Get host by id."""
+ if isinstance(host_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Host,
+ id=host_id, **kwargs
+ )
+ else:
+ raise exception.InvalidParameter(
+ 'host id %s type is not int compatible' % host_id
+ )
+
+
+def get_host_internal(host_id, session=None, **kwargs):
+ """Helper function to get host.
+
+ Used by other files under db/api.
+ """
+ return _get_host(host_id, session=session, **kwargs)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOSTS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_host(
+ host_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get host info."""
+ return _get_host(
+ host_id,
+ exception_when_missing=exception_when_missing,
+ session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOSTS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_machine_or_host(
+ host_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get machine or host if possible."""
+ from compass.db.api import machine as machine_api
+ machine = machine_api.get_machine_internal(
+ host_id,
+ exception_when_missing=exception_when_missing,
+ session=session
+ )
+ if machine.host:
+ return machine.host
+ else:
+ return machine
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_CLUSTERS
+)
+@utils.wrap_to_dict(RESP_CLUSTER_FIELDS)
+def get_host_clusters(host_id, user=None, session=None, **kwargs):
+ """get host clusters."""
+ host = _get_host(host_id, session=session)
+ return [clusterhost.cluster for clusterhost in host.clusterhosts]
+
+
+def check_host_validated(host):
+ """Check host is validated."""
+ if not host.config_validated:
+ raise exception.Forbidden(
+ 'host %s is not validated' % host.name
+ )
+
+
+def check_host_editable(
+ host, user=None,
+ check_in_installing=False
+):
+ """Check host is editable.
+
+ If we try to set reinstall_os or check the host is not in installing
+ state, we should set check_in_installing to True.
+ Otherwise we will check the host is not in installing or installed.
+ We also make sure the user is admin or the owner of the host to avoid
+ unauthorized user to update host attributes.
+ """
+ if check_in_installing:
+ if host.state.state == 'INSTALLING':
+ raise exception.Forbidden(
+ 'host %s is not editable '
+ 'when state is in installing' % host.name
+ )
+ elif not host.reinstall_os:
+ raise exception.Forbidden(
+ 'host %s is not editable '
+ 'when not to be reinstalled' % host.name
+ )
+ if user and not user.is_admin and host.creator_id != user.id:
+ raise exception.Forbidden(
+ 'host %s is not editable '
+ 'when user is not admin or the owner of the host' % host.name
+ )
+
+
+def is_host_editable(
+ host, user=None,
+ check_in_installing=False
+):
+ """Get if host is editable."""
+ try:
+ check_host_editable(
+ host, user=user,
+ check_in_installing=check_in_installing
+ )
+ return True
+ except exception.Forbidden:
+ return False
+
+
+def validate_host(host):
+ """Validate host.
+
+ Makesure hostname is not empty, there is only one mgmt network,
+ The mgmt network is not in promiscuous mode.
+ """
+ if not host.hostname:
+ raise exception.Invalidparameter(
+ 'host %s does not set hostname' % host.name
+ )
+ if not host.host_networks:
+ raise exception.InvalidParameter(
+ 'host %s does not have any network' % host.name
+ )
+ mgmt_interface_set = False
+ for host_network in host.host_networks:
+ if host_network.is_mgmt:
+ if mgmt_interface_set:
+ raise exception.InvalidParameter(
+ 'host %s multi interfaces set mgmt ' % host.name
+ )
+ if host_network.is_promiscuous:
+ raise exception.InvalidParameter(
+ 'host %s interface %s is mgmt but promiscuous' % (
+ host.name, host_network.interface
+ )
+ )
+ mgmt_interface_set = True
+ if not mgmt_interface_set:
+ raise exception.InvalidParameter(
+ 'host %s has no mgmt interface' % host.name
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(name=utils.check_name)
+@utils.wrap_to_dict(RESP_FIELDS)
+def _update_host(host_id, session=None, user=None, **kwargs):
+ """Update a host internal."""
+ host = _get_host(host_id, session=session)
+ if host.state.state == "SUCCESSFUL" and not host.reinstall_os:
+ logging.info("ignoring successful host: %s", host_id)
+ return {}
+ check_host_editable(
+ host, user=user,
+ check_in_installing=kwargs.get('reinstall_os', False)
+ )
+ return utils.update_db_object(session, host, **kwargs)
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_HOST
+)
+def update_host(host_id, user=None, session=None, **kwargs):
+ """Update a host."""
+ return _update_host(host_id, session=session, user=user, **kwargs)
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_HOST
+)
+def update_hosts(data=[], user=None, session=None):
+ """Update hosts."""
+ # TODO(xicheng): this batch function is not similar as others.
+ # try to make it similar output as others and batch update should
+ # tolerate partial failure.
+ hosts = []
+ for host_data in data:
+ hosts.append(_update_host(session=session, user=user, **host_data))
+ return hosts
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_HOST
+)
+@utils.wrap_to_dict(
+ RESP_FIELDS + ['status', 'host'],
+ host=RESP_FIELDS
+)
+def del_host(
+ host_id, force=False, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Delete a host.
+
+ If force, we delete the host anyway.
+ If from_database_only, we only delete the host record in databaes.
+ Otherwise we send to del host task to celery to delete the host
+ record in os installer and package installer, clean installation logs
+ and at last clean database record.
+ The backend will call this function again after it deletes the record
+ in os installer and package installer with from_database_only set.
+ """
+ from compass.db.api import cluster as cluster_api
+ host = _get_host(host_id, session=session)
+ # force set host state to ERROR when we want to delete the
+ # host anyway even the host is in installing or already
+ # installed. It let the api know the deleting is in doing when backend
+ # is doing the real deleting. In future we may import a new state like
+ # INDELETE to indicate the deleting is processing.
+ # We need discuss about if we can delete a host when it is already
+ # installed by api.
+ if host.state.state != 'UNINITIALIZED' and force:
+ host.state.state = 'ERROR'
+ check_host_editable(
+ host, user=user,
+ check_in_installing=True
+ )
+ cluster_ids = []
+ for clusterhost in host.clusterhosts:
+ if clusterhost.state.state != 'UNINITIALIZED' and force:
+ clusterhost.state.state = 'ERROR'
+ # TODO(grace): here we check all clusters which use this host editable.
+ # Because in backend we do not have functions to delete host without
+ # reference its cluster. After deleting pure host supported in backend,
+ # we should change code here to is_cluster_editable.
+ # Here delete a host may fail even we set force flag.
+ cluster_api.check_cluster_editable(
+ clusterhost.cluster, user=user,
+ check_in_installing=True
+ )
+ cluster_ids.append(clusterhost.cluster_id)
+
+ # Delete host record directly if there is no need to delete it
+ # in backend or from_database_only is set.
+ if host.state.state == 'UNINITIALIZED' or from_database_only:
+ return utils.del_db_object(session, host)
+ else:
+ logging.info(
+ 'send del host %s task to celery', host_id
+ )
+ if not user:
+ user_id = host.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.delete_host',
+ (
+ user.email, host.id, cluster_ids
+ ),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'delete action sent',
+ 'host': host,
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def get_host_config(host_id, user=None, session=None, **kwargs):
+ """Get host config."""
+ return _get_host(host_id, session=session)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
+def get_host_deployed_config(host_id, user=None, session=None, **kwargs):
+ """Get host deployed config."""
+ return _get_host(host_id, session=session)
+
+
+# replace os_config to deployed_os_config in kwargs.
+@utils.replace_filters(
+ os_config='deployed_os_config'
+)
+@utils.supported_filters(
+ UPDATED_DEPLOYED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def update_host_deployed_config(host_id, user=None, session=None, **kwargs):
+ """Update host deployed config."""
+ host = _get_host(host_id, session=session)
+ check_host_editable(host, user=user)
+ check_host_validated(host)
+ return utils.update_db_object(session, host, **kwargs)
+
+
+def _host_os_config_validates(
+ config, host, session=None, user=None, **kwargs
+):
+ """Check host os config's validation."""
+ metadata_api.validate_os_config(
+ config, host.os_id
+ )
+
+
+@utils.input_validates_with_args(
+ put_os_config=_host_os_config_validates
+)
+@utils.output_validates_with_args(
+ os_config=_host_os_config_validates
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def _update_host_config(host, session=None, user=None, **kwargs):
+ """Update host config."""
+ check_host_editable(host, user=user)
+ return utils.update_db_object(session, host, **kwargs)
+
+
+# replace os_config to put_os_config in kwargs.
+# It tells db the os_config will be updated not patched.
+@utils.replace_filters(
+ os_config='put_os_config'
+)
+@utils.supported_filters(
+ UPDATED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_CONFIG
+)
+def update_host_config(host_id, user=None, session=None, **kwargs):
+ """Update host config."""
+ host = _get_host(host_id, session=session)
+ return _update_host_config(
+ host, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to patched_os_config in kwargs.
+# It tells db os_config will be patched not be updated.
+@utils.replace_filters(
+ os_config='patched_os_config'
+)
+@utils.supported_filters(
+ PATCHED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_CONFIG
+)
+def patch_host_config(host_id, user=None, session=None, **kwargs):
+ """Patch host config."""
+ host = _get_host(host_id, session=session)
+ return _update_host_config(
+ host, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_HOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def del_host_config(host_id, user=None, session=None):
+ """delete a host config."""
+ host = _get_host(host_id, session=session)
+ check_host_editable(host, user=user)
+ return utils.update_db_object(
+ session, host, os_config={}, config_validated=False
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_NETOWORK_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_NETWORKS
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def list_host_networks(host_id, user=None, session=None, **filters):
+ """Get host networks for a host."""
+ host = _get_host(host_id, session=session)
+ return utils.list_db_objects(
+ session, models.HostNetwork,
+ host_id=host.id, **filters
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_NETOWORK_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_NETWORKS
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def list_hostnetworks(user=None, session=None, **filters):
+ """Get host networks."""
+ return utils.list_db_objects(
+ session, models.HostNetwork, **filters
+ )
+
+
+def _get_hostnetwork(host_network_id, session=None, **kwargs):
+ """Get hostnetwork by hostnetwork id."""
+ if isinstance(host_network_id, (int, long)):
+ return utils.get_db_object(
+ session, models.HostNetwork,
+ id=host_network_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'host network id %s type is not int compatible' % host_network_id
+ )
+
+
+def _get_host_network(host_id, host_network_id, session=None, **kwargs):
+ """Get hostnetwork by host id and hostnetwork id."""
+ host = _get_host(host_id, session=session)
+ host_network = _get_hostnetwork(host_network_id, session=session, **kwargs)
+ if host_network.host_id != host.id:
+ raise exception.RecordNotExists(
+ 'host %s does not own host network %s' % (
+ host.id, host_network.id
+ )
+ )
+ return host_network
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_NETWORKS
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def get_host_network(
+ host_id, host_network_id,
+ user=None, session=None, **kwargs
+):
+ """Get host network."""
+ return _get_host_network(
+ host_id, host_network_id, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_NETWORKS
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def get_hostnetwork(host_network_id, user=None, session=None, **kwargs):
+ """Get host network."""
+ return _get_hostnetwork(host_network_id, session=session)
+
+
+@utils.supported_filters(
+ ADDED_NETWORK_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_NETWORK_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ ip=utils.check_ip
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def _add_host_network(
+ host_id, exception_when_existing=True,
+ session=None, user=None, interface=None, ip=None, **kwargs
+):
+ """Add hostnetwork to a host."""
+ host = _get_host(host_id, session=session)
+ check_host_editable(host, user=user)
+ user_id = user.id
+ return utils.add_db_object(
+ session, models.HostNetwork,
+ exception_when_existing,
+ host.id, interface, user_id, ip=ip, **kwargs
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_NETWORK
+)
+def add_host_network(
+ host_id, exception_when_existing=True,
+ interface=None, user=None, session=None, **kwargs
+):
+ """Create a hostnetwork to a host."""
+ return _add_host_network(
+ host_id,
+ exception_when_existing,
+ interface=interface, session=session, user=user, **kwargs
+ )
+
+
+def _get_hostnetwork_by_ip(
+ ip, session=None, **kwargs
+):
+ ip_int = long(netaddr.IPAddress(ip))
+ return utils.get_db_object(
+ session, models.HostNetwork,
+ ip_int=ip_int, **kwargs
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_NETWORK
+)
+def add_host_networks(
+ exception_when_existing=False,
+ data=[], user=None, session=None
+):
+ """Create host networks."""
+ hosts = []
+ failed_hosts = []
+ for host_data in data:
+ host_id = host_data['host_id']
+ host = _get_host(host_id, session=session)
+ networks = host_data['networks']
+ host_networks = []
+ failed_host_networks = []
+ for network in networks:
+ host_network = _get_hostnetwork_by_ip(
+ network['ip'], session=session,
+ exception_when_missing=False
+ )
+ if (
+ host_network and not (
+ host_network.host_id == host.id and
+ host_network.interface == network['interface']
+ )
+ ):
+ logging.error('ip %s exists in host network %s' % (
+ network['ip'], host_network.id
+ ))
+ failed_host_networks.append(network)
+ else:
+ host_networks.append(_add_host_network(
+ host.id, exception_when_existing,
+ session=session, user=user, **network
+ ))
+ if host_networks:
+ hosts.append({'host_id': host.id, 'networks': host_networks})
+ if failed_host_networks:
+ failed_hosts.append({
+ 'host_id': host.id, 'networks': failed_host_networks
+ })
+ return {
+ 'hosts': hosts,
+ 'failed_hosts': failed_hosts
+ }
+
+
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def _update_host_network(
+ host_network, session=None, user=None, **kwargs
+):
+ """Update host network."""
+ check_host_editable(host_network.host, user=user)
+ return utils.update_db_object(session, host_network, **kwargs)
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_NETWORK_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ ip=utils.check_ip
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_NETWORK
+)
+def update_host_network(
+ host_id, host_network_id, user=None, session=None, **kwargs
+):
+ """Update a host network by host id and host network id."""
+ host = _get_host(
+ host_id, session=session
+ )
+ if host.state.state == "SUCCESSFUL" and not host.reinstall_os:
+ logging.info("ignoring updating request for successful hosts")
+ return {}
+
+ host_network = _get_host_network(
+ host_id, host_network_id, session=session
+ )
+ return _update_host_network(
+ host_network, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_NETWORK_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ ip=utils.check_ip
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_NETWORK
+)
+def update_hostnetwork(host_network_id, user=None, session=None, **kwargs):
+ """Update a host network by host network id."""
+ host_network = _get_hostnetwork(
+ host_network_id, session=session
+ )
+ return _update_host_network(
+ host_network, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_HOST_NETWORK
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def del_host_network(
+ host_id, host_network_id, user=None,
+ session=None, **kwargs
+):
+ """Delete a host network by host id and host network id."""
+ host_network = _get_host_network(
+ host_id, host_network_id, session=session
+ )
+ check_host_editable(host_network.host, user=user)
+ return utils.del_db_object(session, host_network)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_HOST_NETWORK
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def del_hostnetwork(host_network_id, user=None, session=None, **kwargs):
+ """Delete a host network by host network id."""
+ host_network = _get_hostnetwork(
+ host_network_id, session=session
+ )
+ check_host_editable(host_network.host, user=user)
+ return utils.del_db_object(session, host_network)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_HOST_STATE
+)
+@utils.wrap_to_dict(RESP_STATE_FIELDS)
+def get_host_state(host_id, user=None, session=None, **kwargs):
+ """Get host state info."""
+ return _get_host(host_id, session=session).state
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_STATE_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_HOST_STATE
+)
+@utils.wrap_to_dict(RESP_STATE_FIELDS)
+def update_host_state(host_id, user=None, session=None, **kwargs):
+ """Update a host state."""
+ host = _get_host(host_id, session=session)
+ utils.update_db_object(session, host.state, **kwargs)
+ return host.state
+
+
+@util.deprecated
+@utils.supported_filters(
+ optional_support_keys=UPDATED_STATE_INTERNAL_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_HOST_STATE
+)
+@utils.wrap_to_dict(['status', 'host'])
+def update_host_state_internal(
+ host_id, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Update a host state.
+
+ This function is called when host os is installed.
+ If from_database_only, the state is updated in database.
+ Otherwise a celery task sent to os installer and package installer
+ to do some future actions.
+ """
+ # TODO(xicheng): should be merged into update_host_state
+ host = _get_host(host_id, session=session)
+ logging.info("======host state: %s", host.state)
+ if 'ready' in kwargs and kwargs['ready'] and not host.state.ready:
+ ready_triggered = True
+ else:
+ ready_triggered = False
+ clusterhosts_ready = {}
+ clusters_os_ready = {}
+ if ready_triggered:
+ for clusterhost in host.clusterhosts:
+ cluster = clusterhost.cluster
+ if cluster.flavor_name:
+ clusterhosts_ready[cluster.id] = False
+ else:
+ clusterhosts_ready[cluster.id] = True
+ all_os_ready = True
+ for clusterhost_in_cluster in cluster.clusterhosts:
+ host_in_cluster = clusterhost_in_cluster.host
+ if host_in_cluster.id == host.id:
+ continue
+ if not host_in_cluster.state.ready:
+ all_os_ready = False
+ clusters_os_ready[cluster.id] = all_os_ready
+ logging.debug('host %s ready: %s', host_id, ready_triggered)
+ logging.debug("clusterhosts_ready is: %s", clusterhosts_ready)
+ logging.debug("clusters_os_ready is %s", clusters_os_ready)
+
+ if not ready_triggered or from_database_only:
+ logging.debug('%s state is set to %s', host.name, kwargs)
+ utils.update_db_object(session, host.state, **kwargs)
+ if not host.state.ready:
+ for clusterhost in host.clusterhosts:
+ utils.update_db_object(
+ session, clusterhost.state, ready=False
+ )
+ utils.update_db_object(
+ session, clusterhost.cluster.state, ready=False
+ )
+ status = '%s state is updated' % host.name
+ else:
+ if not user:
+ user_id = host.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.os_installed',
+ (
+ host.id, clusterhosts_ready,
+ clusters_os_ready
+ ),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ status = '%s: clusterhosts ready %s clusters os ready %s' % (
+ host.name, clusterhosts_ready, clusters_os_ready
+ )
+ logging.info('action status: %s', status)
+ return {
+ 'status': status,
+ 'host': host.state
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_LOG_FIELDS)
+def get_host_log_histories(host_id, user=None, session=None, **kwargs):
+ """Get host log history."""
+ host = _get_host(host_id, session=session)
+ return utils.list_db_objects(
+ session, models.HostLogHistory, id=host.id, **kwargs
+ )
+
+
+def _get_host_log_history(host_id, filename, session=None, **kwargs):
+ host = _get_host(host_id, session=session)
+ return utils.get_db_object(
+ session, models.HostLogHistory, id=host.id,
+ filename=filename, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_LOG_FIELDS)
+def get_host_log_history(host_id, filename, user=None, session=None, **kwargs):
+ """Get host log history."""
+ return _get_host_log_history(
+ host_id, filename, session=session
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_LOG_FIELDS)
+def update_host_log_history(
+ host_id, filename, user=None,
+ session=None, **kwargs
+):
+ """Update a host log history."""
+ host_log_history = _get_host_log_history(
+ host_id, filename, session=session
+ )
+ return utils.update_db_object(session, host_log_history, **kwargs)
+
+
+@utils.supported_filters(
+ ADDED_LOG_FIELDS,
+ optional_support_keys=UPDATED_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_LOG_FIELDS)
+def add_host_log_history(
+ host_id, exception_when_existing=False,
+ filename=None, user=None, session=None, **kwargs
+):
+ """add a host log history."""
+ host = _get_host(host_id, session=session)
+ return utils.add_db_object(
+ session, models.HostLogHistory, exception_when_existing,
+ host.id, filename, **kwargs
+ )
+
+
+@utils.supported_filters(optional_support_keys=['poweron'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ host=RESP_CONFIG_FIELDS
+)
+def poweron_host(
+ host_id, poweron={}, user=None, session=None, **kwargs
+):
+ """power on host."""
+ from compass.tasks import client as celery_client
+ host = _get_host(host_id, session=session)
+ check_host_validated(host)
+ if not user:
+ user_id = host.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.poweron_host',
+ (host.id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'poweron %s action sent' % host.name,
+ 'host': host
+ }
+
+
+@utils.supported_filters(optional_support_keys=['poweroff'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ host=RESP_CONFIG_FIELDS
+)
+def poweroff_host(
+ host_id, poweroff={}, user=None, session=None, **kwargs
+):
+ """power off host."""
+ from compass.tasks import client as celery_client
+ host = _get_host(host_id, session=session)
+ check_host_validated(host)
+ if not user:
+ user_id = host.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.poweroff_host',
+ (host.id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'poweroff %s action sent' % host.name,
+ 'host': host
+ }
+
+
+@utils.supported_filters(optional_support_keys=['reset'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ host=RESP_CONFIG_FIELDS
+)
+def reset_host(
+ host_id, reset={}, user=None, session=None, **kwargs
+):
+ """reset host."""
+ from compass.tasks import client as celery_client
+ host = _get_host(host_id, session=session)
+ check_host_validated(host)
+ if not user:
+ user_id = host.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.reset_host',
+ (host.id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'reset %s action sent' % host.name,
+ 'host': host
+ }
diff --git a/compass-deck/db/api/machine.py b/compass-deck/db/api/machine.py
new file mode 100644
index 0000000..b7b16b2
--- /dev/null
+++ b/compass-deck/db/api/machine.py
@@ -0,0 +1,317 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Switch database operations."""
+import logging
+import re
+
+from compass.db.api import database
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+MACHINE_PRIMARY_FILEDS = ['mac', 'owner_id']
+SUPPORTED_FIELDS = [
+ 'mac', 'tag', 'location',
+ 'machine_attributes', 'owner_id']
+IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
+UPDATED_FIELDS = [
+ 'ipmi_credentials', 'machine_attributes',
+ 'tag', 'location']
+PATCHED_FIELDS = [
+ 'patched_ipmi_credentials', 'patched_tag',
+ 'patched_location'
+]
+RESP_FIELDS = [
+ 'id', 'mac', 'ipmi_credentials', 'switches', 'switch_ip',
+ 'port', 'vlans', 'machine_attributes', 'owner_id',
+ 'tag', 'location', 'created_at', 'updated_at'
+]
+RESP_DEPLOY_FIELDS = [
+ 'status', 'machine'
+]
+
+
+def _get_machine(machine_id, session=None, **kwargs):
+ """Get machine by id."""
+ if isinstance(machine_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Machine,
+ id=machine_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'machine id %s type is not int compatible' % machine_id
+ )
+
+
+@utils.supported_filters(
+ MACHINE_PRIMARY_FILEDS,
+ optional_support_keys=SUPPORTED_FIELDS
+)
+@utils.input_validates(mac=utils.check_mac)
+def _add_machine(mac, owner_id=None, session=None, **kwargs):
+ """Add a machine."""
+ if isinstance(owner_id, (int, long)):
+ return utils.add_db_object(
+ session, models.Machine,
+ True,
+ mac,
+ owner_id=owner_id,
+ **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'owner id %s type is not int compatible' % owner_id
+ )
+
+
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_machine(
+ mac, owner_id=None, session=None, user=None, **kwargs
+):
+ """Add a machine."""
+ return _add_machine(
+ mac,
+ owner_id=owner_id,
+ session=session, **kwargs
+ )
+
+
+def get_machine_internal(machine_id, session=None, **kwargs):
+ """Helper function to other files under db/api."""
+ return _get_machine(machine_id, session=session, **kwargs)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_MACHINES
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_machine(
+ machine_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get a machine."""
+ return _get_machine(
+ machine_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_MACHINES
+)
+@utils.output_filters(
+ tag=utils.general_filter_callback,
+ location=utils.general_filter_callback
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_machines(user=None, session=None, **filters):
+ """List machines."""
+ machines = utils.list_db_objects(
+ session, models.Machine, **filters
+ )
+ if not user.is_admin and len(machines):
+ machines = [m for m in machines if m.owner_id == user.id]
+ return machines
+
+
+@utils.wrap_to_dict(RESP_FIELDS)
+def _update_machine(machine_id, session=None, **kwargs):
+ """Update a machine."""
+ machine = _get_machine(machine_id, session=session)
+ return utils.update_db_object(session, machine, **kwargs)
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(ipmi_credentials=utils.check_ipmi_credentials)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_MACHINE
+)
+def update_machine(machine_id, user=None, session=None, **kwargs):
+ """Update a machine."""
+ return _update_machine(
+ machine_id, session=session, **kwargs
+ )
+
+
+# replace [ipmi_credentials, tag, location] to
+# [patched_ipmi_credentials, patched_tag, patched_location]
+# in kwargs. It tells db these fields will be patched.
+@utils.replace_filters(
+ ipmi_credentials='patched_ipmi_credentials',
+ tag='patched_tag',
+ location='patched_location'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.output_validates(ipmi_credentials=utils.check_ipmi_credentials)
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_MACHINE
+)
+def patch_machine(machine_id, user=None, session=None, **kwargs):
+ """Patch a machine."""
+ return _update_machine(
+ machine_id, session=session, **kwargs
+ )
+
+
+def _check_machine_deletable(machine):
+ """Check a machine deletable."""
+ if machine.host:
+ host = machine.host
+ raise exception.NotAcceptable(
+ 'machine %s has host %s on it' % (
+ machine.mac, host.name
+ )
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_MACHINE
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_machine(machine_id, user=None, session=None, **kwargs):
+ """Delete a machine."""
+ machine = _get_machine(machine_id, session=session)
+ _check_machine_deletable(machine)
+ return utils.del_db_object(session, machine)
+
+
+@utils.supported_filters(optional_support_keys=['poweron'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ machine=RESP_FIELDS
+)
+def poweron_machine(
+ machine_id, poweron={}, user=None, session=None, **kwargs
+):
+ """power on machine."""
+ from compass.tasks import client as celery_client
+ machine = _get_machine(
+ machine_id, session=session
+ )
+ if not user:
+ user_id = machine.owner_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.poweron_machine',
+ (machine_id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'poweron %s action sent' % machine.mac,
+ 'machine': machine
+ }
+
+
+@utils.supported_filters(optional_support_keys=['poweroff'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ machine=RESP_FIELDS
+)
+def poweroff_machine(
+ machine_id, poweroff={}, user=None, session=None, **kwargs
+):
+ """power off machine."""
+ from compass.tasks import client as celery_client
+ machine = _get_machine(
+ machine_id, session=session
+ )
+ if not user:
+ user_id = machine.owner_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.poweroff_machine',
+ (machine_id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'poweroff %s action sent' % machine.mac,
+ 'machine': machine
+ }
+
+
+@utils.supported_filters(optional_support_keys=['reset'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ machine=RESP_FIELDS
+)
+def reset_machine(
+ machine_id, reset={}, user=None, session=None, **kwargs
+):
+ """reset machine."""
+ from compass.tasks import client as celery_client
+ machine = _get_machine(
+ machine_id, session=session
+ )
+ if not user:
+ user_id = machine.owner_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.reset_machine',
+ (machine_id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'reset %s action sent' % machine.mac,
+ 'machine': machine
+ }
diff --git a/compass-deck/db/api/metadata.py b/compass-deck/db/api/metadata.py
new file mode 100644
index 0000000..16310c8
--- /dev/null
+++ b/compass-deck/db/api/metadata.py
@@ -0,0 +1,517 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Metadata related database operations."""
+import copy
+import logging
+import string
+
+from compass.db.api import adapter as adapter_api
+from compass.db.api import database
+from compass.db.api import utils
+from compass.db import callback as metadata_callback
+from compass.db import exception
+from compass.db import models
+from compass.db import validator as metadata_validator
+
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+OS_FIELDS = None
+PACKAGE_FIELDS = None
+FLAVOR_FIELDS = None
+OSES_METADATA = None
+PACKAGES_METADATA = None
+FLAVORS_METADATA = None
+OSES_METADATA_UI_CONVERTERS = None
+FLAVORS_METADATA_UI_CONVERTERS = None
+
+
+def _get_field_from_configuration(configs):
+ """Get fields from configurations."""
+ fields = {}
+ for config in configs:
+ if not isinstance(config, dict):
+ raise exception.InvalidParameter(
+ 'config %s is not dict' % config
+ )
+ field_name = config['NAME']
+ fields[field_name] = {
+ 'name': field_name,
+ 'id': field_name,
+ 'field_type': config.get('FIELD_TYPE', basestring),
+ 'display_type': config.get('DISPLAY_TYPE', 'text'),
+ 'validator': config.get('VALIDATOR', None),
+ 'js_validator': config.get('JS_VALIDATOR', None),
+ 'description': config.get('DESCRIPTION', field_name)
+ }
+ return fields
+
+
+def _get_os_fields_from_configuration():
+ """Get os fields from os field config dir."""
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.OS_FIELD_DIR,
+ env_locals=env_locals
+ )
+ return _get_field_from_configuration(
+ configs
+ )
+
+
+def _get_package_fields_from_configuration():
+ """Get package fields from package field config dir."""
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.PACKAGE_FIELD_DIR,
+ env_locals=env_locals
+ )
+ return _get_field_from_configuration(
+ configs
+ )
+
+
+def _get_flavor_fields_from_configuration():
+ """Get flavor fields from flavor field config dir."""
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.FLAVOR_FIELD_DIR,
+ env_locals=env_locals
+ )
+ return _get_field_from_configuration(
+ configs
+ )
+
+
+def _get_metadata_from_configuration(
+ path, name, config,
+ fields, **kwargs
+):
+ """Recursively get metadata from configuration.
+
+ Args:
+ path: used to indicate the path to the root element.
+ mainly for trouble shooting.
+ name: the key of the metadata section.
+ config: the value of the metadata section.
+ fields: all fields defined in os fields or package fields dir.
+ """
+ if not isinstance(config, dict):
+ raise exception.InvalidParameter(
+ '%s config %s is not dict' % (path, config)
+ )
+ metadata_self = config.get('_self', {})
+ if 'field' in metadata_self:
+ field_name = metadata_self['field']
+ field = fields[field_name]
+ else:
+ field = {}
+ # mapping to may contain $ like $partition. Here we replace the
+ # $partition to the key of the correspendent config. The backend then
+ # can use this kind of feature to support multi partitions when we
+ # only declare the partition metadata in one place.
+ mapping_to_template = metadata_self.get('mapping_to', None)
+ if mapping_to_template:
+ mapping_to = string.Template(
+ mapping_to_template
+ ).safe_substitute(
+ **kwargs
+ )
+ else:
+ mapping_to = None
+ self_metadata = {
+ 'name': name,
+ 'display_name': metadata_self.get('display_name', name),
+ 'field_type': field.get('field_type', dict),
+ 'display_type': field.get('display_type', None),
+ 'description': metadata_self.get(
+ 'description', field.get('description', None)
+ ),
+ 'is_required': metadata_self.get('is_required', False),
+ 'required_in_whole_config': metadata_self.get(
+ 'required_in_whole_config', False),
+ 'mapping_to': mapping_to,
+ 'validator': metadata_self.get(
+ 'validator', field.get('validator', None)
+ ),
+ 'js_validator': metadata_self.get(
+ 'js_validator', field.get('js_validator', None)
+ ),
+ 'default_value': metadata_self.get('default_value', None),
+ 'default_callback': metadata_self.get('default_callback', None),
+ 'default_callback_params': metadata_self.get(
+ 'default_callback_params', {}),
+ 'options': metadata_self.get('options', None),
+ 'options_callback': metadata_self.get('options_callback', None),
+ 'options_callback_params': metadata_self.get(
+ 'options_callback_params', {}),
+ 'autofill_callback': metadata_self.get(
+ 'autofill_callback', None),
+ 'autofill_callback_params': metadata_self.get(
+ 'autofill_callback_params', {}),
+ 'required_in_options': metadata_self.get(
+ 'required_in_options', False)
+ }
+ self_metadata.update(kwargs)
+ metadata = {'_self': self_metadata}
+ # Key extension used to do two things:
+ # one is to return the extended metadata that $<something>
+ # will be replace to possible extensions.
+ # The other is to record the $<something> to extended value
+ # and used in future mapping_to subsititution.
+ # TODO(grace): select proper name instead of key_extensions if
+ # you think it is better.
+ # Suppose key_extension is {'$partition': ['/var', '/']} for $partition
+ # the metadata for $partition will be mapped to {
+ # '/var': ..., '/': ...} and kwargs={'partition': '/var'} and
+ # kwargs={'partition': '/'} will be parsed to recursive metadata parsing
+ # for sub metadata under '/var' and '/'. Then in the metadata parsing
+ # for the sub metadata, this kwargs will be used to substitute mapping_to.
+ key_extensions = metadata_self.get('key_extensions', {})
+ general_keys = []
+ for key, value in config.items():
+ if key.startswith('_'):
+ continue
+ if key in key_extensions:
+ if not key.startswith('$'):
+ raise exception.InvalidParameter(
+ '%s subkey %s should start with $' % (
+ path, key
+ )
+ )
+ extended_keys = key_extensions[key]
+ for extended_key in extended_keys:
+ if extended_key.startswith('$'):
+ raise exception.InvalidParameter(
+ '%s extended key %s should not start with $' % (
+ path, extended_key
+ )
+ )
+ sub_kwargs = dict(kwargs)
+ sub_kwargs[key[1:]] = extended_key
+ metadata[extended_key] = _get_metadata_from_configuration(
+ '%s/%s' % (path, extended_key), extended_key, value,
+ fields, **sub_kwargs
+ )
+ else:
+ if key.startswith('$'):
+ general_keys.append(key)
+ metadata[key] = _get_metadata_from_configuration(
+ '%s/%s' % (path, key), key, value,
+ fields, **kwargs
+ )
+ if len(general_keys) > 1:
+ raise exception.InvalidParameter(
+ 'foud multi general keys in %s: %s' % (
+ path, general_keys
+ )
+ )
+ return metadata
+
+
+def _get_oses_metadata_from_configuration():
+ """Get os metadata from os metadata config dir."""
+ oses_metadata = {}
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.OS_METADATA_DIR,
+ env_locals=env_locals
+ )
+ for config in configs:
+ os_name = config['OS']
+ os_metadata = oses_metadata.setdefault(os_name, {})
+ for key, value in config['METADATA'].items():
+ os_metadata[key] = _get_metadata_from_configuration(
+ key, key, value, OS_FIELDS
+ )
+
+ oses = adapter_api.OSES
+ parents = {}
+ for os_name, os in oses.items():
+ parent = os.get('parent', None)
+ parents[os_name] = parent
+ for os_name, os in oses.items():
+ oses_metadata[os_name] = util.recursive_merge_dict(
+ os_name, oses_metadata, parents
+ )
+ return oses_metadata
+
+
+def _get_packages_metadata_from_configuration():
+ """Get package metadata from package metadata config dir."""
+ packages_metadata = {}
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.PACKAGE_METADATA_DIR,
+ env_locals=env_locals
+ )
+ for config in configs:
+ adapter_name = config['ADAPTER']
+ package_metadata = packages_metadata.setdefault(adapter_name, {})
+ for key, value in config['METADATA'].items():
+ package_metadata[key] = _get_metadata_from_configuration(
+ key, key, value, PACKAGE_FIELDS
+ )
+ adapters = adapter_api.ADAPTERS
+ parents = {}
+ for adapter_name, adapter in adapters.items():
+ parent = adapter.get('parent', None)
+ parents[adapter_name] = parent
+ for adapter_name, adapter in adapters.items():
+ packages_metadata[adapter_name] = util.recursive_merge_dict(
+ adapter_name, packages_metadata, parents
+ )
+ return packages_metadata
+
+
+def _get_flavors_metadata_from_configuration():
+ """Get flavor metadata from flavor metadata config dir."""
+ flavors_metadata = {}
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.FLAVOR_METADATA_DIR,
+ env_locals=env_locals
+ )
+ for config in configs:
+ adapter_name = config['ADAPTER']
+ flavor_name = config['FLAVOR']
+ flavor_metadata = flavors_metadata.setdefault(
+ adapter_name, {}
+ ).setdefault(flavor_name, {})
+ for key, value in config['METADATA'].items():
+ flavor_metadata[key] = _get_metadata_from_configuration(
+ key, key, value, FLAVOR_FIELDS
+ )
+
+ packages_metadata = PACKAGES_METADATA
+ adapters_flavors = adapter_api.ADAPTERS_FLAVORS
+ for adapter_name, adapter_flavors in adapters_flavors.items():
+ package_metadata = packages_metadata.get(adapter_name, {})
+ for flavor_name, flavor in adapter_flavors.items():
+ flavor_metadata = flavors_metadata.setdefault(
+ adapter_name, {}
+ ).setdefault(flavor_name, {})
+ util.merge_dict(flavor_metadata, package_metadata, override=False)
+ return flavors_metadata
+
+
+def _filter_metadata(metadata, **kwargs):
+ if not isinstance(metadata, dict):
+ return metadata
+ filtered_metadata = {}
+ for key, value in metadata.items():
+ if key == '_self':
+ default_value = value.get('default_value', None)
+ if default_value is None:
+ default_callback_params = value.get(
+ 'default_callback_params', {}
+ )
+ callback_params = dict(kwargs)
+ if default_callback_params:
+ callback_params.update(default_callback_params)
+ default_callback = value.get('default_callback', None)
+ if default_callback:
+ default_value = default_callback(key, **callback_params)
+ options = value.get('options', None)
+ if options is None:
+ options_callback_params = value.get(
+ 'options_callback_params', {}
+ )
+ callback_params = dict(kwargs)
+ if options_callback_params:
+ callback_params.update(options_callback_params)
+
+ options_callback = value.get('options_callback', None)
+ if options_callback:
+ options = options_callback(key, **callback_params)
+ filtered_metadata[key] = value
+ if default_value is not None:
+ filtered_metadata[key]['default_value'] = default_value
+ if options is not None:
+ filtered_metadata[key]['options'] = options
+ else:
+ filtered_metadata[key] = _filter_metadata(value, **kwargs)
+ return filtered_metadata
+
+
+def _load_metadata(force_reload=False):
+ """Load metadata information into memory.
+
+ If force_reload, the metadata information will be reloaded
+ even if the metadata is already loaded.
+ """
+ adapter_api.load_adapters_internal(force_reload=force_reload)
+ global OS_FIELDS
+ if force_reload or OS_FIELDS is None:
+ OS_FIELDS = _get_os_fields_from_configuration()
+ global PACKAGE_FIELDS
+ if force_reload or PACKAGE_FIELDS is None:
+ PACKAGE_FIELDS = _get_package_fields_from_configuration()
+ global FLAVOR_FIELDS
+ if force_reload or FLAVOR_FIELDS is None:
+ FLAVOR_FIELDS = _get_flavor_fields_from_configuration()
+ global OSES_METADATA
+ if force_reload or OSES_METADATA is None:
+ OSES_METADATA = _get_oses_metadata_from_configuration()
+ global PACKAGES_METADATA
+ if force_reload or PACKAGES_METADATA is None:
+ PACKAGES_METADATA = _get_packages_metadata_from_configuration()
+ global FLAVORS_METADATA
+ if force_reload or FLAVORS_METADATA is None:
+ FLAVORS_METADATA = _get_flavors_metadata_from_configuration()
+ global OSES_METADATA_UI_CONVERTERS
+ if force_reload or OSES_METADATA_UI_CONVERTERS is None:
+ OSES_METADATA_UI_CONVERTERS = (
+ _get_oses_metadata_ui_converters_from_configuration()
+ )
+ global FLAVORS_METADATA_UI_CONVERTERS
+ if force_reload or FLAVORS_METADATA_UI_CONVERTERS is None:
+ FLAVORS_METADATA_UI_CONVERTERS = (
+ _get_flavors_metadata_ui_converters_from_configuration()
+ )
+
+
+def _get_oses_metadata_ui_converters_from_configuration():
+ """Get os metadata ui converters from os metadata mapping config dir.
+
+ os metadata ui converter is used to convert os metadata to
+ the format UI can understand and show.
+ """
+ oses_metadata_ui_converters = {}
+ configs = util.load_configs(setting.OS_MAPPING_DIR)
+ for config in configs:
+ os_name = config['OS']
+ oses_metadata_ui_converters[os_name] = config.get('CONFIG_MAPPING', {})
+
+ oses = adapter_api.OSES
+ parents = {}
+ for os_name, os in oses.items():
+ parent = os.get('parent', None)
+ parents[os_name] = parent
+ for os_name, os in oses.items():
+ oses_metadata_ui_converters[os_name] = util.recursive_merge_dict(
+ os_name, oses_metadata_ui_converters, parents
+ )
+ return oses_metadata_ui_converters
+
+
+def _get_flavors_metadata_ui_converters_from_configuration():
+ """Get flavor metadata ui converters from flavor mapping config dir."""
+ flavors_metadata_ui_converters = {}
+ configs = util.load_configs(setting.FLAVOR_MAPPING_DIR)
+ for config in configs:
+ adapter_name = config['ADAPTER']
+ flavor_name = config['FLAVOR']
+ flavors_metadata_ui_converters.setdefault(
+ adapter_name, {}
+ )[flavor_name] = config.get('CONFIG_MAPPING', {})
+ adapters = adapter_api.ADAPTERS
+ parents = {}
+ for adapter_name, adapter in adapters.items():
+ parent = adapter.get('parent', None)
+ parents[adapter_name] = parent
+ for adapter_name, adapter in adapters.items():
+ flavors_metadata_ui_converters[adapter_name] = (
+ util.recursive_merge_dict(
+ adapter_name, flavors_metadata_ui_converters, parents
+ )
+ )
+ return flavors_metadata_ui_converters
+
+
+def get_packages_metadata_internal(force_reload=False):
+ """Get deployable package metadata."""
+ _load_metadata(force_reload=force_reload)
+ metadata_mapping = {}
+ adapters = adapter_api.ADAPTERS
+ for adapter_name, adapter in adapters.items():
+ if adapter.get('deployable'):
+ metadata_mapping[adapter_name] = _filter_metadata(
+ PACKAGES_METADATA.get(adapter_name, {})
+ )
+ else:
+ logging.info(
+ 'ignore metadata since its adapter %s is not deployable',
+ adapter_name
+ )
+ return metadata_mapping
+
+
+def get_flavors_metadata_internal(force_reload=False):
+ """Get deployable flavor metadata."""
+ _load_metadata(force_reload=force_reload)
+ metadata_mapping = {}
+ adapters_flavors = adapter_api.ADAPTERS_FLAVORS
+ for adapter_name, adapter_flavors in adapters_flavors.items():
+ adapter = adapter_api.ADAPTERS[adapter_name]
+ if not adapter.get('deployable'):
+ logging.info(
+ 'ignore metadata since its adapter %s is not deployable',
+ adapter_name
+ )
+ continue
+ for flavor_name, flavor in adapter_flavors.items():
+ flavor_metadata = FLAVORS_METADATA.get(
+ adapter_name, {}
+ ).get(flavor_name, {})
+ metadata = _filter_metadata(flavor_metadata)
+ metadata_mapping.setdefault(
+ adapter_name, {}
+ )[flavor_name] = metadata
+ return metadata_mapping
+
+
+def get_flavors_metadata_ui_converters_internal(force_reload=False):
+ """Get usable flavor metadata ui converters."""
+ _load_metadata(force_reload=force_reload)
+ return FLAVORS_METADATA_UI_CONVERTERS
+
+
+def get_oses_metadata_internal(force_reload=False):
+ """Get deployable os metadata."""
+ _load_metadata(force_reload=force_reload)
+ metadata_mapping = {}
+ oses = adapter_api.OSES
+ for os_name, os in oses.items():
+ if os.get('deployable'):
+ metadata_mapping[os_name] = _filter_metadata(
+ OSES_METADATA.get(os_name, {})
+ )
+ else:
+ logging.info(
+ 'ignore metadata since its os %s is not deployable',
+ os_name
+ )
+ return metadata_mapping
+
+
+def get_oses_metadata_ui_converters_internal(force_reload=False):
+ """Get usable os metadata ui converters."""
+ _load_metadata(force_reload=force_reload)
+ return OSES_METADATA_UI_CONVERTERS
diff --git a/compass-deck/db/api/metadata_holder.py b/compass-deck/db/api/metadata_holder.py
new file mode 100644
index 0000000..24afc67
--- /dev/null
+++ b/compass-deck/db/api/metadata_holder.py
@@ -0,0 +1,731 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Metadata related object holder."""
+import logging
+
+from compass.db.api import adapter as adapter_api
+from compass.db.api import adapter_holder as adapter_holder_api
+from compass.db.api import database
+from compass.db.api import metadata as metadata_api
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+RESP_METADATA_FIELDS = [
+ 'os_config', 'package_config'
+]
+RESP_UI_METADATA_FIELDS = [
+ 'os_global_config', 'flavor_config'
+]
+
+
+def load_metadatas(force_reload=False):
+ """Load metadatas."""
+ # TODO(xicheng): today we load metadata in memory as it original
+ # format in files in metadata.py. We get these inmemory metadata
+ # and do some translation, store the translated metadata into memory
+ # too in metadata_holder.py. api can only access the global inmemory
+ # data in metadata_holder.py.
+ _load_os_metadatas(force_reload=force_reload)
+ _load_package_metadatas(force_reload=force_reload)
+ _load_flavor_metadatas(force_reload=force_reload)
+ _load_os_metadata_ui_converters(force_reload=force_reload)
+ _load_flavor_metadata_ui_converters(force_reload=force_reload)
+
+
+def _load_os_metadata_ui_converters(force_reload=False):
+ global OS_METADATA_UI_CONVERTERS
+ if force_reload or OS_METADATA_UI_CONVERTERS is None:
+ logging.info('load os metadatas ui converters into memory')
+ OS_METADATA_UI_CONVERTERS = (
+ metadata_api.get_oses_metadata_ui_converters_internal(
+ force_reload=force_reload
+ )
+ )
+
+
+def _load_os_metadatas(force_reload=False):
+ """Load os metadata from inmemory db and map it by os_id."""
+ global OS_METADATA_MAPPING
+ if force_reload or OS_METADATA_MAPPING is None:
+ logging.info('load os metadatas into memory')
+ OS_METADATA_MAPPING = metadata_api.get_oses_metadata_internal(
+ force_reload=force_reload
+ )
+
+
+def _load_flavor_metadata_ui_converters(force_reload=False):
+ """Load flavor metadata ui converters from inmemory db.
+
+ The loaded metadata is mapped by flavor id.
+ """
+ global FLAVOR_METADATA_UI_CONVERTERS
+ if force_reload or FLAVOR_METADATA_UI_CONVERTERS is None:
+ logging.info('load flavor metadata ui converters into memory')
+ FLAVOR_METADATA_UI_CONVERTERS = {}
+ adapters_flavors_metadata_ui_converters = (
+ metadata_api.get_flavors_metadata_ui_converters_internal(
+ force_reload=force_reload
+ )
+ )
+ for adapter_name, adapter_flavors_metadata_ui_converters in (
+ adapters_flavors_metadata_ui_converters.items()
+ ):
+ for flavor_name, flavor_metadata_ui_converter in (
+ adapter_flavors_metadata_ui_converters.items()
+ ):
+ FLAVOR_METADATA_UI_CONVERTERS[
+ '%s:%s' % (adapter_name, flavor_name)
+ ] = flavor_metadata_ui_converter
+
+
+@util.deprecated
+def _load_package_metadatas(force_reload=False):
+ """Load deployable package metadata from inmemory db."""
+ global PACKAGE_METADATA_MAPPING
+ if force_reload or PACKAGE_METADATA_MAPPING is None:
+ logging.info('load package metadatas into memory')
+ PACKAGE_METADATA_MAPPING = (
+ metadata_api.get_packages_metadata_internal(
+ force_reload=force_reload
+ )
+ )
+
+
+def _load_flavor_metadatas(force_reload=False):
+ """Load flavor metadata from inmemory db.
+
+ The loaded metadata are mapped by flavor id.
+ """
+ global FLAVOR_METADATA_MAPPING
+ if force_reload or FLAVOR_METADATA_MAPPING is None:
+ logging.info('load flavor metadatas into memory')
+ FLAVOR_METADATA_MAPPING = {}
+ adapters_flavors_metadata = (
+ metadata_api.get_flavors_metadata_internal(
+ force_reload=force_reload
+ )
+ )
+ for adapter_name, adapter_flavors_metadata in (
+ adapters_flavors_metadata.items()
+ ):
+ for flavor_name, flavor_metadata in (
+ adapter_flavors_metadata.items()
+ ):
+ FLAVOR_METADATA_MAPPING[
+ '%s:%s' % (adapter_name, flavor_name)
+ ] = flavor_metadata
+
+
+OS_METADATA_MAPPING = None
+PACKAGE_METADATA_MAPPING = None
+FLAVOR_METADATA_MAPPING = None
+OS_METADATA_UI_CONVERTERS = None
+FLAVOR_METADATA_UI_CONVERTERS = None
+
+
+def validate_os_config(
+ config, os_id, whole_check=False, **kwargs
+):
+ """Validate os config."""
+ load_metadatas()
+ if os_id not in OS_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'os %s is not found in os metadata mapping' % os_id
+ )
+ _validate_config(
+ '', config, OS_METADATA_MAPPING[os_id],
+ whole_check, **kwargs
+ )
+
+
+@util.deprecated
+def validate_package_config(
+ config, adapter_id, whole_check=False, **kwargs
+):
+ """Validate package config."""
+ load_metadatas()
+ if adapter_id not in PACKAGE_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'adapter %s is not found in package metedata mapping' % adapter_id
+ )
+ _validate_config(
+ '', config, PACKAGE_METADATA_MAPPING[adapter_id],
+ whole_check, **kwargs
+ )
+
+
+def validate_flavor_config(
+ config, flavor_id, whole_check=False, **kwargs
+):
+ """Validate flavor config."""
+ load_metadatas()
+ if not flavor_id:
+ logging.info('There is no flavor, skipping flavor validation...')
+ elif flavor_id not in FLAVOR_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'flavor %s is not found in flavor metedata mapping' % flavor_id
+ )
+ else:
+ _validate_config(
+ '', config, FLAVOR_METADATA_MAPPING[flavor_id],
+ whole_check, **kwargs
+ )
+
+
+def _filter_metadata(metadata, **kwargs):
+ """Filter metadata before return it to api.
+
+ Some metadata fields are not json compatible or
+ only used in db/api internally.
+ We should strip these fields out before return to api.
+ """
+ if not isinstance(metadata, dict):
+ return metadata
+ filtered_metadata = {}
+ for key, value in metadata.items():
+ if key == '_self':
+ filtered_metadata[key] = {
+ 'name': value['name'],
+ 'description': value.get('description', None),
+ 'default_value': value.get('default_value', None),
+ 'is_required': value.get('is_required', False),
+ 'required_in_whole_config': value.get(
+ 'required_in_whole_config', False),
+ 'js_validator': value.get('js_validator', None),
+ 'options': value.get('options', None),
+ 'required_in_options': value.get(
+ 'required_in_options', False),
+ 'field_type': value.get(
+ 'field_type_data', 'str'),
+ 'display_type': value.get('display_type', None),
+ 'mapping_to': value.get('mapping_to', None)
+ }
+ else:
+ filtered_metadata[key] = _filter_metadata(value, **kwargs)
+ return filtered_metadata
+
+
+@util.deprecated
+def _get_package_metadata(adapter_id):
+ """get package metadata."""
+ load_metadatas()
+ if adapter_id not in PACKAGE_METADATA_MAPPING:
+ raise exception.RecordNotExists(
+ 'adpater %s does not exist' % adapter_id
+ )
+ return _filter_metadata(
+ PACKAGE_METADATA_MAPPING[adapter_id]
+ )
+
+
+@util.deprecated
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_package_metadata(adapter_id, user=None, session=None, **kwargs):
+ """Get package metadata from adapter."""
+ return {
+ 'package_config': _get_package_metadata(adapter_id)
+ }
+
+
+def _get_flavor_metadata(flavor_id):
+ """get flavor metadata."""
+ load_metadatas()
+ if not flavor_id:
+ logging.info('There is no flavor id, skipping...')
+ elif flavor_id not in FLAVOR_METADATA_MAPPING:
+ raise exception.RecordNotExists(
+ 'flavor %s does not exist' % flavor_id
+ )
+ else:
+ return _filter_metadata(FLAVOR_METADATA_MAPPING[flavor_id])
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_flavor_metadata(flavor_id, user=None, session=None, **kwargs):
+ """Get flavor metadata by flavor."""
+ return {
+ 'package_config': _get_flavor_metadata(flavor_id)
+ }
+
+
+def _get_os_metadata(os_id):
+ """get os metadata."""
+ load_metadatas()
+ if os_id not in OS_METADATA_MAPPING:
+ raise exception.RecordNotExists(
+ 'os %s does not exist' % os_id
+ )
+ return _filter_metadata(OS_METADATA_MAPPING[os_id])
+
+
+def _get_os_metadata_ui_converter(os_id):
+ """get os metadata ui converter."""
+ load_metadatas()
+ if os_id not in OS_METADATA_UI_CONVERTERS:
+ raise exception.RecordNotExists(
+ 'os %s does not exist' % os_id
+ )
+ return OS_METADATA_UI_CONVERTERS[os_id]
+
+
+def _get_flavor_metadata_ui_converter(flavor_id):
+ """get flavor metadata ui converter."""
+ load_metadatas()
+ if flavor_id not in FLAVOR_METADATA_UI_CONVERTERS:
+ raise exception.RecordNotExists(
+ 'flavor %s does not exist' % flavor_id
+ )
+ return FLAVOR_METADATA_UI_CONVERTERS[flavor_id]
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_os_metadata(os_id, user=None, session=None, **kwargs):
+ """get os metadatas."""
+ return {'os_config': _get_os_metadata(os_id)}
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_UI_METADATA_FIELDS)
+def get_os_ui_metadata(os_id, user=None, session=None, **kwargs):
+ """Get os metadata ui converter by os."""
+ metadata = _get_os_metadata(os_id)
+ metadata_ui_converter = _get_os_metadata_ui_converter(os_id)
+ return _get_ui_metadata(metadata, metadata_ui_converter)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_UI_METADATA_FIELDS)
+def get_flavor_ui_metadata(flavor_id, user=None, session=None, **kwargs):
+ """Get flavor ui metadata by flavor."""
+ metadata = _get_flavor_metadata(flavor_id)
+ metadata_ui_converter = _get_flavor_metadata_ui_converter(flavor_id)
+ return _get_ui_metadata(metadata, metadata_ui_converter)
+
+
+def _get_ui_metadata(metadata, metadata_ui_converter):
+ """convert metadata to ui metadata.
+
+ Args:
+ metadata: metadata we defined in metadata files.
+ metadata_ui_converter: metadata ui converter defined in metadata
+ mapping files. Used to convert orignal
+ metadata to ui understandable metadata.
+
+ Returns:
+ ui understandable metadata.
+ """
+ ui_metadata = {}
+ ui_metadata[metadata_ui_converter['mapped_name']] = []
+ for mapped_child in metadata_ui_converter['mapped_children']:
+ data_dict = {}
+ for ui_key, ui_value in mapped_child.items():
+ for key, value in ui_value.items():
+ if 'data' == key:
+ result_data = []
+ _get_ui_metadata_data(
+ metadata[ui_key], value, result_data
+ )
+ data_dict['data'] = result_data
+ else:
+ data_dict[key] = value
+ ui_metadata[metadata_ui_converter['mapped_name']].append(data_dict)
+ return ui_metadata
+
+
+def _get_ui_metadata_data(metadata, config, result_data):
+ """Get ui metadata data and fill to result."""
+ data_dict = {}
+ for key, config_value in config.items():
+ if isinstance(config_value, dict) and key != 'content_data':
+ if key in metadata.keys():
+ _get_ui_metadata_data(metadata[key], config_value, result_data)
+ else:
+ _get_ui_metadata_data(metadata, config_value, result_data)
+ elif isinstance(config_value, list):
+ option_list = []
+ for item in config_value:
+ if isinstance(item, dict):
+ option_list.append(item)
+ data_dict[key] = option_list
+ else:
+ if isinstance(metadata['_self'][item], bool):
+ data_dict[item] = str(metadata['_self'][item]).lower()
+ else:
+ data_dict[item] = metadata['_self'][item]
+ else:
+ data_dict[key] = config_value
+ if data_dict:
+ result_data.append(data_dict)
+ return result_data
+
+
+@util.deprecated
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_package_os_metadata(
+ adapter_id, os_id,
+ user=None, session=None, **kwargs
+):
+ """Get metadata by adapter and os."""
+ adapter = adapter_holder_api.get_adapter(
+ adapter_id, user=user, session=session
+ )
+ os_ids = [os['id'] for os in adapter['supported_oses']]
+ if os_id not in os_ids:
+ raise exception.InvalidParameter(
+ 'os %s is not in the supported os list of adapter %s' % (
+ os_id, adapter_id
+ )
+ )
+ metadatas = {}
+ metadatas['os_config'] = _get_os_metadata(
+ os_id
+ )
+ metadatas['package_config'] = _get_package_metadata(
+ adapter_id
+ )
+ return metadatas
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_flavor_os_metadata(
+ flavor_id, os_id,
+ user=None, session=None, **kwargs
+):
+ """Get metadata by flavor and os."""
+ flavor = adapter_holder_api.get_flavor(
+ flavor_id, user=user, session=session
+ )
+ adapter_id = flavor['adapter_id']
+ adapter = adapter_holder_api.get_adapter(
+ adapter_id, user=user, session=session
+ )
+ os_ids = [os['id'] for os in adapter['supported_oses']]
+ if os_id not in os_ids:
+ raise exception.InvalidParameter(
+ 'os %s is not in the supported os list of adapter %s' % (
+ os_id, adapter_id
+ )
+ )
+ metadatas = {}
+ metadatas['os_config'] = _get_os_metadata(
+ session, os_id
+ )
+ metadatas['package_config'] = _get_flavor_metadata(
+ session, flavor_id
+ )
+ return metadatas
+
+
+def _validate_self(
+ config_path, config_key, config,
+ metadata, whole_check,
+ **kwargs
+):
+ """validate config by metadata self section."""
+ logging.debug('validate config self %s', config_path)
+ if '_self' not in metadata:
+ if isinstance(config, dict):
+ _validate_config(
+ config_path, config, metadata, whole_check, **kwargs
+ )
+ return
+ field_type = metadata['_self'].get('field_type', basestring)
+ if not isinstance(config, field_type):
+ raise exception.InvalidParameter(
+ '%s config type is not %s: %s' % (config_path, field_type, config)
+ )
+ is_required = metadata['_self'].get(
+ 'is_required', False
+ )
+ required_in_whole_config = metadata['_self'].get(
+ 'required_in_whole_config', False
+ )
+ if isinstance(config, basestring):
+ if config == '' and not is_required and not required_in_whole_config:
+ # ignore empty config when it is optional
+ return
+ required_in_options = metadata['_self'].get(
+ 'required_in_options', False
+ )
+ options = metadata['_self'].get('options', None)
+ if required_in_options:
+ if field_type in [int, basestring, float, bool]:
+ if options and config not in options:
+ raise exception.InvalidParameter(
+ '%s config is not in %s: %s' % (
+ config_path, options, config
+ )
+ )
+ elif field_type in [list, tuple]:
+ if options and not set(config).issubset(set(options)):
+ raise exception.InvalidParameter(
+ '%s config is not in %s: %s' % (
+ config_path, options, config
+ )
+ )
+ elif field_type == dict:
+ if options and not set(config.keys()).issubset(set(options)):
+ raise exception.InvalidParameter(
+ '%s config is not in %s: %s' % (
+ config_path, options, config
+ )
+ )
+ validator = metadata['_self'].get('validator', None)
+ logging.debug('validate by validator %s', validator)
+ if validator:
+ if not validator(config_key, config, **kwargs):
+ raise exception.InvalidParameter(
+ '%s config is invalid' % config_path
+ )
+ if isinstance(config, dict):
+ _validate_config(
+ config_path, config, metadata, whole_check, **kwargs
+ )
+
+
+def _validate_config(
+ config_path, config, metadata, whole_check,
+ **kwargs
+):
+ """validate config by metadata."""
+ logging.debug('validate config %s', config_path)
+ generals = {}
+ specified = {}
+ for key, value in metadata.items():
+ if key.startswith('$'):
+ generals[key] = value
+ elif key.startswith('_'):
+ pass
+ else:
+ specified[key] = value
+ config_keys = set(config.keys())
+ specified_keys = set(specified.keys())
+ intersect_keys = config_keys & specified_keys
+ not_found_keys = config_keys - specified_keys
+ redundant_keys = specified_keys - config_keys
+ for key in redundant_keys:
+ if '_self' not in specified[key]:
+ continue
+ if specified[key]['_self'].get('is_required', False):
+ raise exception.InvalidParameter(
+ '%s/%s does not find but it is required' % (
+ config_path, key
+ )
+ )
+ if (
+ whole_check and
+ specified[key]['_self'].get(
+ 'required_in_whole_config', False
+ )
+ ):
+ raise exception.InvalidParameter(
+ '%s/%s does not find but it is required in whole config' % (
+ config_path, key
+ )
+ )
+ for key in intersect_keys:
+ _validate_self(
+ '%s/%s' % (config_path, key),
+ key, config[key], specified[key], whole_check,
+ **kwargs
+ )
+ for key in not_found_keys:
+ if not generals:
+ raise exception.InvalidParameter(
+ 'key %s missing in metadata %s' % (
+ key, config_path
+ )
+ )
+ for general_key, general_value in generals.items():
+ _validate_self(
+ '%s/%s' % (config_path, key),
+ key, config[key], general_value, whole_check,
+ **kwargs
+ )
+
+
+def _autofill_self_config(
+ config_path, config_key, config,
+ metadata,
+ **kwargs
+):
+ """Autofill config by metadata self section."""
+ if '_self' not in metadata:
+ if isinstance(config, dict):
+ _autofill_config(
+ config_path, config, metadata, **kwargs
+ )
+ return config
+ logging.debug(
+ 'autofill %s by metadata %s', config_path, metadata['_self']
+ )
+ autofill_callback = metadata['_self'].get(
+ 'autofill_callback', None
+ )
+ autofill_callback_params = metadata['_self'].get(
+ 'autofill_callback_params', {}
+ )
+ callback_params = dict(kwargs)
+ if autofill_callback_params:
+ callback_params.update(autofill_callback_params)
+ default_value = metadata['_self'].get(
+ 'default_value', None
+ )
+ if default_value is not None:
+ callback_params['default_value'] = default_value
+ options = metadata['_self'].get(
+ 'options', None
+ )
+ if options is not None:
+ callback_params['options'] = options
+ if autofill_callback:
+ config = autofill_callback(
+ config_key, config, **callback_params
+ )
+ if config is None:
+ new_config = {}
+ else:
+ new_config = config
+ if isinstance(new_config, dict):
+ _autofill_config(
+ config_path, new_config, metadata, **kwargs
+ )
+ if new_config:
+ config = new_config
+ return config
+
+
+def _autofill_config(
+ config_path, config, metadata, **kwargs
+):
+ """autofill config by metadata."""
+ generals = {}
+ specified = {}
+ for key, value in metadata.items():
+ if key.startswith('$'):
+ generals[key] = value
+ elif key.startswith('_'):
+ pass
+ else:
+ specified[key] = value
+ config_keys = set(config.keys())
+ specified_keys = set(specified.keys())
+ intersect_keys = config_keys & specified_keys
+ not_found_keys = config_keys - specified_keys
+ redundant_keys = specified_keys - config_keys
+ for key in redundant_keys:
+ self_config = _autofill_self_config(
+ '%s/%s' % (config_path, key),
+ key, None, specified[key], **kwargs
+ )
+ if self_config is not None:
+ config[key] = self_config
+ for key in intersect_keys:
+ config[key] = _autofill_self_config(
+ '%s/%s' % (config_path, key),
+ key, config[key], specified[key],
+ **kwargs
+ )
+ for key in not_found_keys:
+ for general_key, general_value in generals.items():
+ config[key] = _autofill_self_config(
+ '%s/%s' % (config_path, key),
+ key, config[key], general_value,
+ **kwargs
+ )
+ return config
+
+
+def autofill_os_config(
+ config, os_id, **kwargs
+):
+ load_metadatas()
+ if os_id not in OS_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'os %s is not found in os metadata mapping' % os_id
+ )
+
+ return _autofill_config(
+ '', config, OS_METADATA_MAPPING[os_id], **kwargs
+ )
+
+
+def autofill_package_config(
+ config, adapter_id, **kwargs
+):
+ load_metadatas()
+ if adapter_id not in PACKAGE_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'adapter %s is not found in package metadata mapping' % adapter_id
+ )
+
+ return _autofill_config(
+ '', config, PACKAGE_METADATA_MAPPING[adapter_id], **kwargs
+ )
+
+
+def autofill_flavor_config(
+ config, flavor_id, **kwargs
+):
+ load_metadatas()
+ if not flavor_id:
+ logging.info('There is no flavor, skipping...')
+ elif flavor_id not in FLAVOR_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'flavor %s is not found in flavor metadata mapping' % flavor_id
+ )
+ else:
+ return _autofill_config(
+ '', config, FLAVOR_METADATA_MAPPING[flavor_id], **kwargs
+ )
diff --git a/compass-deck/db/api/network.py b/compass-deck/db/api/network.py
new file mode 100644
index 0000000..e2bf7d3
--- /dev/null
+++ b/compass-deck/db/api/network.py
@@ -0,0 +1,160 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Network related database operations."""
+import logging
+import netaddr
+import re
+
+from compass.db.api import database
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+
+SUPPORTED_FIELDS = ['subnet', 'name']
+RESP_FIELDS = [
+ 'id', 'name', 'subnet', 'created_at', 'updated_at'
+]
+ADDED_FIELDS = ['subnet']
+OPTIONAL_ADDED_FIELDS = ['name']
+IGNORE_FIELDS = [
+ 'id', 'created_at', 'updated_at'
+]
+UPDATED_FIELDS = ['subnet', 'name']
+
+
+def _check_subnet(subnet):
+ """Check subnet format is correct."""
+ try:
+ netaddr.IPNetwork(subnet)
+ except Exception as error:
+ logging.exception(error)
+ raise exception.InvalidParameter(
+ 'subnet %s format unrecognized' % subnet)
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SUBNETS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_subnets(user=None, session=None, **filters):
+ """List subnets."""
+ return utils.list_db_objects(
+ session, models.Subnet, **filters
+ )
+
+
+def _get_subnet(subnet_id, session=None, **kwargs):
+ """Get subnet by subnet id."""
+ if isinstance(subnet_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Subnet,
+ id=subnet_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'subnet id %s type is not int compatible' % subnet_id
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SUBNETS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_subnet(
+ subnet_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get subnet info."""
+ return _get_subnet(
+ subnet_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters(
+ ADDED_FIELDS, optional_support_keys=OPTIONAL_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(subnet=_check_subnet)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SUBNET
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_subnet(
+ exception_when_existing=True, subnet=None,
+ user=None, session=None, **kwargs
+):
+ """Create a subnet."""
+ return utils.add_db_object(
+ session, models.Subnet,
+ exception_when_existing, subnet, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(subnet=_check_subnet)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SUBNET
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_subnet(subnet_id, user=None, session=None, **kwargs):
+ """Update a subnet."""
+ subnet = _get_subnet(
+ subnet_id, session=session
+ )
+ return utils.update_db_object(session, subnet, **kwargs)
+
+
+def _check_subnet_deletable(subnet):
+ """Check a subnet deletable."""
+ if subnet.host_networks:
+ host_networks = [
+ '%s:%s=%s' % (
+ host_network.host.name, host_network.interface,
+ host_network.ip
+ )
+ for host_network in subnet.host_networks
+ ]
+ raise exception.NotAcceptable(
+ 'subnet %s contains host networks %s' % (
+ subnet.subnet, host_networks
+ )
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_SUBNET
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_subnet(subnet_id, user=None, session=None, **kwargs):
+ """Delete a subnet."""
+ subnet = _get_subnet(
+ subnet_id, session=session
+ )
+ _check_subnet_deletable(subnet)
+ return utils.del_db_object(session, subnet)
diff --git a/compass-deck/db/api/permission.py b/compass-deck/db/api/permission.py
new file mode 100644
index 0000000..f4d777a
--- /dev/null
+++ b/compass-deck/db/api/permission.py
@@ -0,0 +1,357 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Permission database operations."""
+import re
+
+from compass.db.api import database
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = ['id', 'name', 'alias', 'description']
+RESP_FIELDS = ['id', 'name', 'alias', 'description']
+
+
+class PermissionWrapper(object):
+ def __init__(self, name, alias, description):
+ self.name = name
+ self.alias = alias
+ self.description = description
+
+ def to_dict(self):
+ return {
+ 'name': self.name,
+ 'alias': self.alias,
+ 'description': self.description
+ }
+
+
+PERMISSION_LIST_PERMISSIONS = PermissionWrapper(
+ 'list_permissions', 'list permissions', 'list all permissions'
+)
+PERMISSION_LIST_SWITCHES = PermissionWrapper(
+ 'list_switches', 'list switches', 'list all switches'
+)
+PERMISSION_LIST_SWITCH_FILTERS = PermissionWrapper(
+ 'list_switch_filters',
+ 'list switch filters',
+ 'list switch filters'
+)
+PERMISSION_ADD_SWITCH = PermissionWrapper(
+ 'add_switch', 'add switch', 'add switch'
+)
+PERMISSION_UPDATE_SWITCH_FILTERS = PermissionWrapper(
+ 'update_switch_filters',
+ 'update switch filters',
+ 'update switch filters'
+)
+PERMISSION_DEL_SWITCH = PermissionWrapper(
+ 'delete_switch', 'delete switch', 'delete switch'
+)
+PERMISSION_LIST_SWITCH_MACHINES = PermissionWrapper(
+ 'list_switch_machines', 'list switch machines', 'list switch machines'
+)
+PERMISSION_ADD_SWITCH_MACHINE = PermissionWrapper(
+ 'add_switch_machine', 'add switch machine', 'add switch machine'
+)
+PERMISSION_DEL_SWITCH_MACHINE = PermissionWrapper(
+ 'del_switch_machine', 'delete switch machine', 'del switch machine'
+)
+PERMISSION_UPDATE_SWITCH_MACHINES = PermissionWrapper(
+ 'update_switch_machines',
+ 'update switch machines',
+ 'update switch machines'
+)
+PERMISSION_LIST_MACHINES = PermissionWrapper(
+ 'list_machines', 'list machines', 'list machines'
+)
+PERMISSION_ADD_MACHINE = PermissionWrapper(
+ 'add_machine', 'add machine', 'add machine'
+)
+PERMISSION_DEL_MACHINE = PermissionWrapper(
+ 'delete_machine', 'delete machine', 'delete machine'
+)
+PERMISSION_LIST_ADAPTERS = PermissionWrapper(
+ 'list_adapters', 'list adapters', 'list adapters'
+)
+PERMISSION_LIST_METADATAS = PermissionWrapper(
+ 'list_metadatas', 'list metadatas', 'list metadatas'
+)
+PERMISSION_LIST_SUBNETS = PermissionWrapper(
+ 'list_subnets', 'list subnets', 'list subnets'
+)
+PERMISSION_ADD_SUBNET = PermissionWrapper(
+ 'add_subnet', 'add subnet', 'add subnet'
+)
+PERMISSION_DEL_SUBNET = PermissionWrapper(
+ 'del_subnet', 'del subnet', 'del subnet'
+)
+PERMISSION_LIST_CLUSTERS = PermissionWrapper(
+ 'list_clusters', 'list clusters', 'list clusters'
+)
+PERMISSION_ADD_CLUSTER = PermissionWrapper(
+ 'add_cluster', 'add cluster', 'add cluster'
+)
+PERMISSION_DEL_CLUSTER = PermissionWrapper(
+ 'del_cluster', 'del cluster', 'del cluster'
+)
+PERMISSION_LIST_CLUSTER_CONFIG = PermissionWrapper(
+ 'list_cluster_config', 'list cluster config', 'list cluster config'
+)
+PERMISSION_ADD_CLUSTER_CONFIG = PermissionWrapper(
+ 'add_cluster_config', 'add cluster config', 'add cluster config'
+)
+PERMISSION_DEL_CLUSTER_CONFIG = PermissionWrapper(
+ 'del_cluster_config', 'del cluster config', 'del cluster config'
+)
+PERMISSION_UPDATE_CLUSTER_HOSTS = PermissionWrapper(
+ 'update_cluster_hosts',
+ 'update cluster hosts',
+ 'update cluster hosts'
+)
+PERMISSION_DEL_CLUSTER_HOST = PermissionWrapper(
+ 'del_clusterhost', 'delete clusterhost', 'delete clusterhost'
+)
+PERMISSION_REVIEW_CLUSTER = PermissionWrapper(
+ 'review_cluster', 'review cluster', 'review cluster'
+)
+PERMISSION_DEPLOY_CLUSTER = PermissionWrapper(
+ 'deploy_cluster', 'deploy cluster', 'deploy cluster'
+)
+PERMISSION_DEPLOY_HOST = PermissionWrapper(
+ 'deploy_host', 'deploy host', 'deploy host'
+)
+PERMISSION_GET_CLUSTER_STATE = PermissionWrapper(
+ 'get_cluster_state', 'get cluster state', 'get cluster state'
+)
+PERMISSION_UPDATE_CLUSTER_STATE = PermissionWrapper(
+ 'update_cluster_state', 'update cluster state',
+ 'update cluster state'
+)
+PERMISSION_LIST_HOSTS = PermissionWrapper(
+ 'list_hosts', 'list hosts', 'list hosts'
+)
+PERMISSION_LIST_HOST_CLUSTERS = PermissionWrapper(
+ 'list_host_clusters',
+ 'list host clusters',
+ 'list host clusters'
+)
+PERMISSION_UPDATE_HOST = PermissionWrapper(
+ 'update_host', 'update host', 'update host'
+)
+PERMISSION_DEL_HOST = PermissionWrapper(
+ 'del_host', 'del host', 'del host'
+)
+PERMISSION_LIST_HOST_CONFIG = PermissionWrapper(
+ 'list_host_config', 'list host config', 'list host config'
+)
+PERMISSION_ADD_HOST_CONFIG = PermissionWrapper(
+ 'add_host_config', 'add host config', 'add host config'
+)
+PERMISSION_DEL_HOST_CONFIG = PermissionWrapper(
+ 'del_host_config', 'del host config', 'del host config'
+)
+PERMISSION_LIST_HOST_NETWORKS = PermissionWrapper(
+ 'list_host_networks',
+ 'list host networks',
+ 'list host networks'
+)
+PERMISSION_ADD_HOST_NETWORK = PermissionWrapper(
+ 'add_host_network', 'add host network', 'add host network'
+)
+PERMISSION_DEL_HOST_NETWORK = PermissionWrapper(
+ 'del_host_network', 'del host network', 'del host network'
+)
+PERMISSION_GET_HOST_STATE = PermissionWrapper(
+ 'get_host_state', 'get host state', 'get host state'
+)
+PERMISSION_UPDATE_HOST_STATE = PermissionWrapper(
+ 'update_host_state', 'update host sate', 'update host state'
+)
+PERMISSION_LIST_CLUSTERHOSTS = PermissionWrapper(
+ 'list_clusterhosts', 'list cluster hosts', 'list cluster hosts'
+)
+PERMISSION_LIST_CLUSTERHOST_CONFIG = PermissionWrapper(
+ 'list_clusterhost_config',
+ 'list clusterhost config',
+ 'list clusterhost config'
+)
+PERMISSION_ADD_CLUSTERHOST_CONFIG = PermissionWrapper(
+ 'add_clusterhost_config',
+ 'add clusterhost config',
+ 'add clusterhost config'
+)
+PERMISSION_DEL_CLUSTERHOST_CONFIG = PermissionWrapper(
+ 'del_clusterhost_config',
+ 'del clusterhost config',
+ 'del clusterhost config'
+)
+PERMISSION_GET_CLUSTERHOST_STATE = PermissionWrapper(
+ 'get_clusterhost_state',
+ 'get clusterhost state',
+ 'get clusterhost state'
+)
+PERMISSION_UPDATE_CLUSTERHOST_STATE = PermissionWrapper(
+ 'update_clusterhost_state',
+ 'update clusterhost state',
+ 'update clusterhost state'
+)
+PERMISSION_LIST_HEALTH_REPORT = PermissionWrapper(
+ 'list_health_reports',
+ 'list health check report',
+ 'list health check report'
+)
+PERMISSION_GET_HEALTH_REPORT = PermissionWrapper(
+ 'get_health_report',
+ 'get health report',
+ 'get health report'
+)
+PERMISSION_CHECK_CLUSTER_HEALTH = PermissionWrapper(
+ 'start_check_cluster_health',
+ 'start check cluster health',
+ 'start check cluster health'
+)
+PERMISSION_SET_HEALTH_CHECK_ERROR = PermissionWrapper(
+ 'set_error_state',
+ 'set health check into error state',
+ 'set health check into error state'
+)
+PERMISSION_DELETE_REPORT = PermissionWrapper(
+ 'delete_reports',
+ 'delete health reports',
+ 'delete health reports'
+)
+PERMISSIONS = [
+ PERMISSION_LIST_PERMISSIONS,
+ PERMISSION_LIST_SWITCHES,
+ PERMISSION_ADD_SWITCH,
+ PERMISSION_DEL_SWITCH,
+ PERMISSION_LIST_SWITCH_FILTERS,
+ PERMISSION_UPDATE_SWITCH_FILTERS,
+ PERMISSION_LIST_SWITCH_MACHINES,
+ PERMISSION_ADD_SWITCH_MACHINE,
+ PERMISSION_DEL_SWITCH_MACHINE,
+ PERMISSION_UPDATE_SWITCH_MACHINES,
+ PERMISSION_LIST_MACHINES,
+ PERMISSION_ADD_MACHINE,
+ PERMISSION_DEL_MACHINE,
+ PERMISSION_LIST_ADAPTERS,
+ PERMISSION_LIST_METADATAS,
+ PERMISSION_LIST_SUBNETS,
+ PERMISSION_ADD_SUBNET,
+ PERMISSION_DEL_SUBNET,
+ PERMISSION_LIST_CLUSTERS,
+ PERMISSION_ADD_CLUSTER,
+ PERMISSION_DEL_CLUSTER,
+ PERMISSION_LIST_CLUSTER_CONFIG,
+ PERMISSION_ADD_CLUSTER_CONFIG,
+ PERMISSION_DEL_CLUSTER_CONFIG,
+ PERMISSION_UPDATE_CLUSTER_HOSTS,
+ PERMISSION_DEL_CLUSTER_HOST,
+ PERMISSION_REVIEW_CLUSTER,
+ PERMISSION_DEPLOY_CLUSTER,
+ PERMISSION_GET_CLUSTER_STATE,
+ PERMISSION_UPDATE_CLUSTER_STATE,
+ PERMISSION_LIST_HOSTS,
+ PERMISSION_LIST_HOST_CLUSTERS,
+ PERMISSION_UPDATE_HOST,
+ PERMISSION_DEL_HOST,
+ PERMISSION_LIST_HOST_CONFIG,
+ PERMISSION_ADD_HOST_CONFIG,
+ PERMISSION_DEL_HOST_CONFIG,
+ PERMISSION_LIST_HOST_NETWORKS,
+ PERMISSION_ADD_HOST_NETWORK,
+ PERMISSION_DEL_HOST_NETWORK,
+ PERMISSION_GET_HOST_STATE,
+ PERMISSION_UPDATE_HOST_STATE,
+ PERMISSION_DEPLOY_HOST,
+ PERMISSION_LIST_CLUSTERHOSTS,
+ PERMISSION_LIST_CLUSTERHOST_CONFIG,
+ PERMISSION_ADD_CLUSTERHOST_CONFIG,
+ PERMISSION_DEL_CLUSTERHOST_CONFIG,
+ PERMISSION_GET_CLUSTERHOST_STATE,
+ PERMISSION_UPDATE_CLUSTERHOST_STATE,
+ PERMISSION_LIST_HEALTH_REPORT,
+ PERMISSION_GET_HEALTH_REPORT,
+ PERMISSION_CHECK_CLUSTER_HEALTH,
+ PERMISSION_SET_HEALTH_CHECK_ERROR,
+ PERMISSION_DELETE_REPORT
+]
+
+
+@util.deprecated
+def list_permissions_internal(session, **filters):
+ """internal functions used only by other db.api modules."""
+ return utils.list_db_objects(session, models.Permission, **filters)
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(PERMISSION_LIST_PERMISSIONS)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_permissions(user=None, session=None, **filters):
+ """list permissions."""
+ return utils.list_db_objects(
+ session, models.Permission, **filters
+ )
+
+
+def _get_permission(permission_id, session=None, **kwargs):
+ """Get permission object by the unique key of Permission table."""
+ if isinstance(permission_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Permission, id=permission_id, **kwargs)
+ raise exception.InvalidParameter(
+ 'permission id %s type is not int compatible' % permission_id
+ )
+
+
+def get_permission_internal(permission_id, session=None, **kwargs):
+ return _get_permission(permission_id, session=session, **kwargs)
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_permission(PERMISSION_LIST_PERMISSIONS)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_permission(
+ permission_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get permissions."""
+ return _get_permission(
+ permission_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+def add_permissions_internal(session=None):
+ """internal functions used by other db.api modules only."""
+ permissions = []
+ for permission in PERMISSIONS:
+ permissions.append(
+ utils.add_db_object(
+ session, models.Permission,
+ True,
+ permission.name,
+ alias=permission.alias,
+ description=permission.description
+ )
+ )
+
+ return permissions
diff --git a/compass-deck/db/api/switch.py b/compass-deck/db/api/switch.py
new file mode 100644
index 0000000..647eec0
--- /dev/null
+++ b/compass-deck/db/api/switch.py
@@ -0,0 +1,1213 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Switch database operations."""
+import logging
+import netaddr
+import re
+
+from compass.db.api import database
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = ['ip_int', 'vendor', 'state']
+SUPPORTED_FILTER_FIELDS = ['ip_int', 'vendor', 'state']
+SUPPORTED_SWITCH_MACHINES_FIELDS = [
+ 'switch_ip_int', 'port', 'vlans', 'mac', 'tag', 'location',
+ 'owner_id'
+]
+SUPPORTED_MACHINES_FIELDS = [
+ 'port', 'vlans', 'mac', 'tag', 'location', 'owner_id'
+]
+SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS = [
+ 'switch_ip_int', 'port', 'vlans', 'mac',
+ 'tag', 'location', 'os_name'
+]
+SUPPORTED_MACHINES_HOSTS_FIELDS = [
+ 'port', 'vlans', 'mac', 'tag', 'location',
+ 'os_name'
+]
+IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
+ADDED_FIELDS = ['ip']
+OPTIONAL_ADDED_FIELDS = [
+ 'credentials', 'vendor', 'state', 'err_msg', 'machine_filters'
+]
+UPDATED_FIELDS = [
+ 'ip', 'credentials', 'vendor', 'state',
+ 'err_msg', 'put_machine_filters'
+]
+PATCHED_FIELDS = ['patched_credentials', 'patched_machine_filters']
+UPDATED_FILTERS_FIELDS = ['put_machine_filters']
+PATCHED_FILTERS_FIELDS = ['patched_machine_filters']
+ADDED_MACHINES_FIELDS = ['mac']
+OPTIONAL_ADDED_MACHINES_FIELDS = [
+ 'ipmi_credentials', 'tag', 'location', 'owner_id'
+]
+ADDED_SWITCH_MACHINES_FIELDS = ['port']
+OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS = ['vlans']
+UPDATED_MACHINES_FIELDS = [
+ 'ipmi_credentials',
+ 'tag', 'location'
+]
+UPDATED_SWITCH_MACHINES_FIELDS = ['port', 'vlans', 'owner_id']
+PATCHED_MACHINES_FIELDS = [
+ 'patched_ipmi_credentials',
+ 'patched_tag', 'patched_location'
+]
+PATCHED_SWITCH_MACHINES_FIELDS = ['patched_vlans']
+RESP_FIELDS = [
+ 'id', 'ip', 'credentials', 'vendor', 'state', 'err_msg',
+ 'filters', 'created_at', 'updated_at'
+]
+RESP_FILTERS_FIELDS = [
+ 'id', 'ip', 'filters', 'created_at', 'updated_at'
+]
+RESP_ACTION_FIELDS = [
+ 'status', 'details'
+]
+RESP_MACHINES_FIELDS = [
+ 'id', 'switch_id', 'switch_ip', 'machine_id', 'switch_machine_id',
+ 'port', 'vlans', 'mac', 'owner_id',
+ 'ipmi_credentials', 'tag', 'location',
+ 'created_at', 'updated_at'
+]
+RESP_MACHINES_HOSTS_FIELDS = [
+ 'id', 'switch_id', 'switch_ip', 'machine_id', 'switch_machine_id',
+ 'port', 'vlans', 'mac',
+ 'ipmi_credentials', 'tag', 'location', 'ip',
+ 'name', 'hostname', 'os_name', 'owner',
+ 'os_installer', 'reinstall_os', 'os_installed',
+ 'clusters', 'created_at', 'updated_at'
+]
+RESP_CLUSTER_FIELDS = [
+ 'name', 'id'
+]
+
+
+def _check_machine_filters(machine_filters):
+ """Check if machine filters format is acceptable."""
+ logging.debug('check machine filters: %s', machine_filters)
+ models.Switch.parse_filters(machine_filters)
+
+
+def _check_vlans(vlans):
+ """Check vlans format is acceptable."""
+ for vlan in vlans:
+ if not isinstance(vlan, int):
+ raise exception.InvalidParameter(
+ 'vlan %s is not int' % vlan
+ )
+
+
+@utils.supported_filters(
+ ADDED_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ ip=utils.check_ip,
+ credentials=utils.check_switch_credentials,
+ machine_filters=_check_machine_filters
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def _add_switch(
+ ip, exception_when_existing=True,
+ machine_filters=setting.SWITCHES_DEFAULT_FILTERS,
+ session=None, **kwargs
+):
+ """Add switch by switch ip."""
+ ip_int = long(netaddr.IPAddress(ip))
+ return utils.add_db_object(
+ session, models.Switch, exception_when_existing, ip_int,
+ machine_filters=machine_filters, **kwargs
+ )
+
+
+def get_switch_internal(
+ switch_id, session=None, **kwargs
+):
+ """Get switch by switch id.
+
+ Should only be used by other files under db/api
+ """
+ return _get_switch(switch_id, session=session, **kwargs)
+
+
+def _get_switch(switch_id, session=None, **kwargs):
+ """Get Switch object switch id."""
+ if isinstance(switch_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Switch,
+ id=switch_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'switch id %s type is not int compatible' % switch_id)
+
+
+def _get_switch_by_ip(switch_ip, session=None, **kwargs):
+ """Get switch by switch ip."""
+ switch_ip_int = long(netaddr.IPAddress(switch_ip))
+ return utils.get_db_object(
+ session, models.Switch,
+ ip_int=switch_ip_int, **kwargs
+ )
+
+
+def _get_switch_machine(switch_id, machine_id, session=None, **kwargs):
+ """Get switch machine by switch id and machine id."""
+ switch = _get_switch(switch_id, session=session)
+ from compass.db.api import machine as machine_api
+ machine = machine_api.get_machine_internal(machine_id, session=session)
+ return utils.get_db_object(
+ session, models.SwitchMachine,
+ switch_id=switch.id, machine_id=machine.id, **kwargs
+ )
+
+
+def _get_switchmachine(switch_machine_id, session=None, **kwargs):
+ """Get switch machine by switch_machine_id."""
+ if not isinstance(switch_machine_id, (int, long)):
+ raise exception.InvalidParameter(
+ 'switch machine id %s type is not int compatible' % (
+ switch_machine_id
+ )
+ )
+ return utils.get_db_object(
+ session, models.SwitchMachine,
+ switch_machine_id=switch_machine_id, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCHES
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_switch(
+ switch_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get a switch by switch id."""
+ return _get_switch(
+ switch_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCHES
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_switches(user=None, session=None, **filters):
+ """List switches."""
+ # TODO(xicheng): should discuss with weidong.
+ # If we can deprecate the use of DEFAULT_SWITCH_IP,
+ # The code will be simpler.
+ # The UI should use /machines-hosts instead of
+ # /switches-machines-hosts and can show multi switch ip/port
+ # under one row of machine info.
+ switches = utils.list_db_objects(
+ session, models.Switch, **filters
+ )
+ if 'ip_int' in filters:
+ return switches
+ else:
+ return [
+ switch for switch in switches
+ if switch.ip != setting.DEFAULT_SWITCH_IP
+ ]
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_SWITCH
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_switch(switch_id, user=None, session=None, **kwargs):
+ """Delete a switch.
+
+ If switch is not the default switch, and the machine under this switch
+ is only connected to this switch, the machine will be moved to connect
+ to default switch. Otherwise we can only simply delete the switch
+ machine. The purpose here to make sure there is no machine not
+ connecting to any switch.
+ """
+ # TODO(xicheng): Simplify the logic if the default switch feature
+ # can be deprecated.
+ switch = _get_switch(switch_id, session=session)
+ default_switch = _get_switch_by_ip(
+ setting.DEFAULT_SWITCH_IP, session=session
+ )
+ if switch.id != default_switch.id:
+ for switch_machine in switch.switch_machines:
+ machine = switch_machine.machine
+ if len(machine.switch_machines) <= 1:
+ utils.add_db_object(
+ session, models.SwitchMachine,
+ False,
+ default_switch.id, machine.id,
+ port=switch_machine.port
+ )
+ return utils.del_db_object(session, switch)
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH
+)
+def add_switch(
+ exception_when_existing=True, ip=None,
+ user=None, session=None, **kwargs
+):
+ """Create a switch."""
+ return _add_switch(
+ ip,
+ exception_when_existing=exception_when_existing,
+ session=session, **kwargs
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH
+)
+def add_switches(
+ exception_when_existing=False,
+ data=[], user=None, session=None
+):
+ """Create switches."""
+ # TODO(xicheng): simplify the batch api.
+ switches = []
+ fail_switches = []
+ for switch_data in data:
+ switch_object = _get_switch_by_ip(
+ switch_data['ip'], session=session,
+ exception_when_missing=False
+ )
+ if switch_object:
+ logging.error('ip %s exists in switch %s' % (
+ switch_data['ip'], switch_object.id
+ ))
+ fail_switches.append(switch_data)
+ else:
+ switches.append(
+ _add_switch(
+ exception_when_existing=exception_when_existing,
+ session=session,
+ **switch_data
+ )
+ )
+ return {
+ 'switches': switches,
+ 'fail_switches': fail_switches
+ }
+
+
+@utils.wrap_to_dict(RESP_FIELDS)
+def _update_switch(switch_id, session=None, **kwargs):
+ """Update a switch."""
+ switch = _get_switch(switch_id, session=session)
+ return utils.update_db_object(session, switch, **kwargs)
+
+
+# replace machine_filters in kwargs to put_machine_filters,
+# which is used to tell db this is a put action for the field.
+@utils.replace_filters(
+ machine_filters='put_machine_filters'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ credentials=utils.check_switch_credentials,
+ put_machine_filters=_check_machine_filters
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH
+)
+def update_switch(switch_id, user=None, session=None, **kwargs):
+ """Update fields of a switch."""
+ return _update_switch(switch_id, session=session, **kwargs)
+
+
+# replace credentials to patched_credentials,
+# machine_filters to patched_machine_filters in kwargs.
+# This is to tell db they are patch action to the above fields.
+@utils.replace_filters(
+ credentials='patched_credentials',
+ machine_filters='patched_machine_filters'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ patched_machine_filters=_check_machine_filters
+)
+@database.run_in_session()
+@utils.output_validates(
+ credentials=utils.check_switch_credentials
+)
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH
+)
+def patch_switch(switch_id, user=None, session=None, **kwargs):
+ """Patch fields of a switch."""
+ return _update_switch(switch_id, session=session, **kwargs)
+
+
+@util.deprecated
+@utils.supported_filters(optional_support_keys=SUPPORTED_FILTER_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_FILTERS
+)
+@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
+def list_switch_filters(user=None, session=None, **filters):
+ """List all switches' filters."""
+ return utils.list_db_objects(
+ session, models.Switch, **filters
+ )
+
+
+@util.deprecated
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_FILTERS
+)
+@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
+def get_switch_filters(
+ switch_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get filters of a switch."""
+ return _get_switch(
+ switch_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@util.deprecated
+@utils.replace_filters(
+ machine_filters='put_machine_filters'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FILTERS_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(put_machine_filters=_check_machine_filters)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_SWITCH_FILTERS
+)
+@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
+def update_switch_filters(switch_id, user=None, session=None, **kwargs):
+ """Update filters of a switch."""
+ switch = _get_switch(switch_id, session=session)
+ return utils.update_db_object(session, switch, **kwargs)
+
+
+@util.deprecated
+@utils.replace_filters(
+ machine_filters='patched_machine_filters'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_FILTERS_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(patched_machine_filters=_check_machine_filters)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_SWITCH_FILTERS
+)
+@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
+def patch_switch_filter(switch_id, user=None, session=None, **kwargs):
+ """Patch filters to a switch."""
+ switch = _get_switch(switch_id, session=session)
+ return utils.update_db_object(session, switch, **kwargs)
+
+
+@util.deprecated
+def get_switch_machines_internal(session, **filters):
+ return utils.list_db_objects(
+ session, models.SwitchMachine, **filters
+ )
+
+
+def _filter_port(port_filter, obj):
+ """filter switch machines by port.
+
+ supported port_filter keys: [
+ 'startswith', 'endswith', 'resp_lt',
+ 'resp_le', 'resp_gt', 'resp_ge', 'resp_range'
+ ]
+
+ port_filter examples:
+ {
+ 'startswitch': 'ae', 'endswith': '',
+ 'resp_ge': 20, 'resp_le': 30,
+ }
+ """
+ port_prefix = port_filter.get('startswith', '')
+ port_suffix = port_filter.get('endswith', '')
+ pattern = re.compile(r'%s(\d+)%s' % (port_prefix, port_suffix))
+ match = pattern.match(obj)
+ if not match:
+ return False
+ port_number = int(match.group(1))
+ if (
+ 'resp_lt' in port_filter and
+ port_number >= port_filter['resp_lt']
+ ):
+ return False
+ if (
+ 'resp_le' in port_filter and
+ port_number > port_filter['resp_le']
+ ):
+ return False
+ if (
+ 'resp_gt' in port_filter and
+ port_number <= port_filter['resp_gt']
+ ):
+ return False
+ if (
+ 'resp_ge' in port_filter and
+ port_number < port_filter['resp_ge']
+ ):
+ return False
+ if 'resp_range' in port_filter:
+ resp_range = port_filter['resp_range']
+ if not isinstance(resp_range, list):
+ resp_range = [resp_range]
+ in_range = False
+ for port_start, port_end in resp_range:
+ if port_start <= port_number <= port_end:
+ in_range = True
+ break
+ if not in_range:
+ return False
+ return True
+
+
+def _filter_vlans(vlan_filter, obj):
+ """Filter switch machines by vlan.
+
+ supported keys in vlan_filter:
+ ['resp_in']
+ """
+ vlans = set(obj)
+ if 'resp_in' in vlan_filter:
+ resp_vlans = set(vlan_filter['resp_in'])
+ if not (vlans & resp_vlans):
+ return False
+ return True
+
+
+@utils.output_filters(
+ port=_filter_port, vlans=_filter_vlans,
+ tag=utils.general_filter_callback,
+ location=utils.general_filter_callback
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def _filter_switch_machines(switch_machines):
+ """Get filtered switch machines.
+
+ The filters are defined in each switch.
+ """
+ return [
+ switch_machine for switch_machine in switch_machines
+ if not switch_machine.filtered
+ ]
+
+
+@utils.output_filters(
+ missing_ok=True,
+ port=_filter_port, vlans=_filter_vlans,
+ tag=utils.general_filter_callback,
+ location=utils.general_filter_callback,
+ os_name=utils.general_filter_callback,
+)
+@utils.wrap_to_dict(
+ RESP_MACHINES_HOSTS_FIELDS,
+ clusters=RESP_CLUSTER_FIELDS
+)
+def _filter_switch_machines_hosts(switch_machines):
+ """Similar as _filter_switch_machines, but also return host info."""
+ filtered_switch_machines = [
+ switch_machine for switch_machine in switch_machines
+ if not switch_machine.filtered
+ ]
+ switch_machines_hosts = []
+ for switch_machine in filtered_switch_machines:
+ machine = switch_machine.machine
+ host = machine.host
+ if host:
+ switch_machine_host_dict = host.to_dict()
+ else:
+ switch_machine_host_dict = machine.to_dict()
+ switch_machine_host_dict.update(
+ switch_machine.to_dict()
+ )
+ switch_machines_hosts.append(switch_machine_host_dict)
+ return switch_machines_hosts
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_MACHINES_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+def list_switch_machines(
+ switch_id, user=None, session=None, **filters
+):
+ """Get switch machines of a switch."""
+ switch = _get_switch(switch_id, session=session)
+ switch_machines = utils.list_db_objects(
+ session, models.SwitchMachine, switch_id=switch.id, **filters
+ )
+ if not user.is_admin and len(switch_machines):
+ switch_machines = [m for m in switch_machines if m.machine.owner_id == user.id]
+ return _filter_switch_machines(switch_machines)
+
+
+# replace ip_int to switch_ip_int in kwargs
+@utils.replace_filters(
+ ip_int='switch_ip_int'
+)
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_SWITCH_MACHINES_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+def list_switchmachines(user=None, session=None, **filters):
+ """List switch machines."""
+ switch_machines = utils.list_db_objects(
+ session, models.SwitchMachine, **filters
+ )
+ return _filter_switch_machines(
+ switch_machines
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_MACHINES_HOSTS_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+def list_switch_machines_hosts(
+ switch_id, user=None, session=None, **filters
+):
+ """Get switch machines and possible hosts of a switch."""
+ switch = _get_switch(switch_id, session=session)
+ switch_machines = utils.list_db_objects(
+ session, models.SwitchMachine, switch_id=switch.id, **filters
+ )
+ return _filter_switch_machines_hosts(
+ switch_machines
+ )
+
+
+# replace ip_int to switch_ip_int in kwargs
+@utils.replace_filters(
+ ip_int='switch_ip_int'
+)
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+def list_switchmachines_hosts(user=None, session=None, **filters):
+ """List switch machines hnd possible hosts."""
+ switch_machines = utils.list_db_objects(
+ session, models.SwitchMachine, **filters
+ )
+ if not user.is_admin and len(switch_machines):
+ switch_machines = [m for m in switch_machines if m.machine.owner_id == user.id]
+ return _filter_switch_machines_hosts(
+ switch_machines
+ )
+
+
+@utils.supported_filters(
+ ADDED_MACHINES_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_MACHINES_FIELDS,
+ ignore_support_keys=OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS
+)
+@utils.input_validates(mac=utils.check_mac)
+def _add_machine_if_not_exist(mac=None, session=None, **kwargs):
+ """Add machine if the mac does not exist in any machine."""
+ return utils.add_db_object(
+ session, models.Machine, False,
+ mac, **kwargs)
+
+
+@utils.supported_filters(
+ ADDED_SWITCH_MACHINES_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS,
+ ignore_support_keys=OPTIONAL_ADDED_MACHINES_FIELDS
+)
+@utils.input_validates(vlans=_check_vlans)
+def _add_switch_machine_only(
+ switch, machine, exception_when_existing=True,
+ session=None, owner_id=None, port=None, **kwargs
+):
+ """add a switch machine."""
+ return utils.add_db_object(
+ session, models.SwitchMachine,
+ exception_when_existing,
+ switch.id, machine.id, port=port,
+ owner_id=owner_id,
+ **kwargs
+ )
+
+
+@utils.supported_filters(
+ ADDED_MACHINES_FIELDS + ADDED_SWITCH_MACHINES_FIELDS,
+ optional_support_keys=(
+ OPTIONAL_ADDED_MACHINES_FIELDS +
+ OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def _add_switch_machine(
+ switch_id, exception_when_existing=True,
+ mac=None, port=None, session=None, owner_id=None, **kwargs
+):
+ """Add switch machine.
+
+ If underlying machine does not exist, also create the underlying
+ machine.
+ """
+ switch = _get_switch(switch_id, session=session)
+ machine = _add_machine_if_not_exist(
+ mac=mac, session=session, owner_id=owner_id, **kwargs
+ )
+ return _add_switch_machine_only(
+ switch, machine,
+ exception_when_existing,
+ port=port, session=session, **kwargs
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+def add_switch_machine(
+ switch_id, exception_when_existing=True,
+ mac=None, user=None, session=None,
+ owner_id=None, **kwargs
+):
+ """Add switch machine to a switch."""
+ return _add_switch_machine(
+ switch_id,
+ exception_when_existing=exception_when_existing,
+ mac=mac, session=session, owner_id=owner_id, **kwargs
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(
+ [
+ 'switches_machines',
+ 'duplicate_switches_machines',
+ 'fail_switches_machines'
+ ],
+ switches_machines=RESP_MACHINES_FIELDS,
+ duplicate_switches_machines=RESP_MACHINES_FIELDS
+)
+def add_switch_machines(
+ exception_when_existing=False,
+ data=[], user=None, session=None, owner_id=None
+):
+ """Add switch machines."""
+ switch_machines = []
+ duplicate_switch_machines = []
+ failed_switch_machines = []
+ switches_mapping = {}
+ switch_machines_mapping = {}
+ switch_ips = []
+ for item_data in data:
+ switch_ip = item_data['switch_ip']
+ if switch_ip not in switches_mapping:
+ switch_object = _get_switch_by_ip(
+ switch_ip, session=session,
+ exception_when_missing=False
+ )
+ if switch_object:
+ switch_ips.append(switch_ip)
+ switches_mapping[switch_ip] = switch_object
+ else:
+ logging.error(
+ 'switch %s does not exist' % switch_ip
+ )
+ item_data.pop('switch_ip')
+ failed_switch_machines.append(item_data)
+ else:
+ switch_object = switches_mapping[switch_ip]
+ if switch_object:
+ item_data.pop('switch_ip')
+ switch_machines_mapping.setdefault(
+ switch_object.id, []
+ ).append(item_data)
+
+ for switch_ip in switch_ips:
+ switch_object = switches_mapping[switch_ip]
+ switch_id = switch_object.id
+ machines = switch_machines_mapping[switch_id]
+ for machine in machines:
+ mac = machine['mac']
+ machine_object = _add_machine_if_not_exist(
+ mac=mac, session=session
+ )
+ switch_machine_object = _get_switch_machine(
+ switch_id, machine_object.id, session=session,
+ exception_when_missing=False
+ )
+ if switch_machine_object:
+ port = machine['port']
+ switch_machine_id = switch_machine_object.switch_machine_id
+ exist_port = switch_machine_object.port
+ if exist_port != port:
+ logging.error(
+ 'switch machine %s exist port %s is '
+ 'different from added port %s' % (
+ switch_machine_id,
+ exist_port, port
+ )
+ )
+ failed_switch_machines.append(machine)
+ else:
+ logging.error(
+ 'iswitch machine %s is dulicate, '
+ 'will not be override' % switch_machine_id
+ )
+ duplicate_switch_machines.append(machine)
+ else:
+ del machine['mac']
+ switch_machines.append(_add_switch_machine_only(
+ switch_object, machine_object,
+ exception_when_existing,
+ session=session, owner_id=owner_id, **machine
+ ))
+ return {
+ 'switches_machines': switch_machines,
+ 'duplicate_switches_machines': duplicate_switch_machines,
+ 'fail_switches_machines': failed_switch_machines
+ }
+
+
+@utils.supported_filters(optional_support_keys=['find_machines'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_SWITCH_MACHINES
+)
+@utils.wrap_to_dict(RESP_ACTION_FIELDS)
+def poll_switch(switch_id, user=None, session=None, **kwargs):
+ """poll switch to get machines."""
+ from compass.tasks import client as celery_client
+ switch = _get_switch(switch_id, session=session)
+ celery_client.celery.send_task(
+ 'compass.tasks.pollswitch',
+ (user.email, switch.ip, switch.credentials),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'action %s sent' % kwargs,
+ 'details': {
+ }
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def get_switch_machine(
+ switch_id, machine_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get a switch machine by switch id and machine id."""
+ return _get_switch_machine(
+ switch_id, machine_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def get_switchmachine(
+ switch_machine_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get a switch machine by switch_machine_id."""
+ return _get_switchmachine(
+ switch_machine_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_MACHINES_FIELDS + PATCHED_MACHINES_FIELDS
+ ),
+ ignore_support_keys=(
+ UPDATED_SWITCH_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
+ )
+)
+def _update_machine_if_necessary(
+ machine, session=None, **kwargs
+):
+ """Update machine is there is something to update."""
+ utils.update_db_object(
+ session, machine, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_SWITCH_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=(
+ UPDATED_MACHINES_FIELDS + PATCHED_MACHINES_FIELDS
+ )
+)
+def _update_switch_machine_only(switch_machine, session=None, **kwargs):
+ """Update switch machine."""
+ return utils.update_db_object(
+ session, switch_machine, **kwargs
+ )
+
+
+def _update_switch_machine(
+ switch_machine, session=None, **kwargs
+):
+ """Update switch machine.
+
+ If there are some attributes of underlying machine need to update,
+ also update them in underlying machine.
+ """
+ _update_machine_if_necessary(
+ switch_machine.machine, session=session, **kwargs
+ )
+ return _update_switch_machine_only(
+ switch_machine, session=session, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_MACHINES_FIELDS + UPDATED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(vlans=_check_vlans)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def update_switch_machine(
+ switch_id, machine_id, user=None,
+ session=None, **kwargs
+):
+ """Update switch machine by switch id and machine id."""
+ switch_machine = _get_switch_machine(
+ switch_id, machine_id, session=session
+ )
+ return _update_switch_machine(
+ switch_machine,
+ session=session, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_MACHINES_FIELDS + UPDATED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(vlans=_check_vlans)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def update_switchmachine(switch_machine_id, user=None, session=None, **kwargs):
+ """Update switch machine by switch_machine_id."""
+ switch_machine = _get_switchmachine(
+ switch_machine_id, session=session
+ )
+ return _update_switch_machine(
+ switch_machine,
+ session=session, **kwargs
+ )
+
+
+# replace [vlans, ipmi_credentials, tag, location] to
+# [patched_vlans, patched_ipmi_credentials, patched_tag,
+# patched_location] in kwargs. It tells db these fields will
+# be patched.
+@utils.replace_filters(
+ vlans='patched_vlans',
+ ipmi_credentials='patched_ipmi_credentials',
+ tag='patched_tag',
+ location='patched_location'
+)
+@utils.supported_filters(
+ optional_support_keys=(
+ PATCHED_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(patched_vlans=_check_vlans)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def patch_switch_machine(
+ switch_id, machine_id, user=None,
+ session=None, **kwargs
+):
+ """Patch switch machine by switch_id and machine_id."""
+ switch_machine = _get_switch_machine(
+ switch_id, machine_id, session=session
+ )
+ return _update_switch_machine(
+ switch_machine,
+ session=session, **kwargs
+ )
+
+
+# replace [vlans, ipmi_credentials, tag, location] to
+# [patched_vlans, patched_ipmi_credentials, patched_tag,
+# patched_location] in kwargs. It tells db these fields will
+# be patched.
+@utils.replace_filters(
+ vlans='patched_vlans',
+ ipmi_credentials='patched_ipmi_credentials',
+ tag='patched_tag',
+ location='patched_location'
+)
+@utils.supported_filters(
+ optional_support_keys=(
+ PATCHED_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(patched_vlans=_check_vlans)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def patch_switchmachine(switch_machine_id, user=None, session=None, **kwargs):
+ """Patch switch machine by switch_machine_id."""
+ switch_machine = _get_switchmachine(
+ switch_machine_id, session=session
+ )
+ return _update_switch_machine(
+ switch_machine,
+ session=session, **kwargs
+ )
+
+
+def _del_switch_machine(
+ switch_machine, session=None
+):
+ """Delete switch machine.
+
+ If this is the last switch machine associated to underlying machine,
+ add a switch machine record to default switch to make the machine
+ searchable.
+ """
+ default_switch = _get_switch_by_ip(
+ setting.DEFAULT_SWITCH_IP, session=session
+ )
+ machine = switch_machine.machine
+ if len(machine.switch_machines) <= 1:
+ utils.add_db_object(
+ session, models.SwitchMachine,
+ False,
+ default_switch.id, machine.id,
+ port=switch_machine.port
+ )
+ return utils.del_db_object(session, switch_machine)
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def del_switch_machine(
+ switch_id, machine_id, user=None,
+ session=None, **kwargs
+):
+ """Delete switch machine by switch id and machine id."""
+ switch_machine = _get_switch_machine(
+ switch_id, machine_id, session=session
+ )
+ return _del_switch_machine(switch_machine, session=session)
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def del_switchmachine(switch_machine_id, user=None, session=None, **kwargs):
+ """Delete switch machine by switch_machine_id."""
+ switch_machine = _get_switchmachine(
+ switch_machine_id, session=session
+ )
+ return _del_switch_machine(switch_machine, session=session)
+
+
+@utils.supported_filters(
+ ['machine_id'],
+ optional_support_keys=UPDATED_SWITCH_MACHINES_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+def _add_machine_to_switch(
+ switch_id, machine_id, session=None, **kwargs
+):
+ """Add machine to switch."""
+ switch = _get_switch(switch_id, session=session)
+ from compass.db.api import machine as machine_api
+ machine = machine_api.get_machine_internal(
+ machine_id, session=session
+ )
+ _add_switch_machine_only(
+ switch, machine, False,
+ owner_id=machine.owner_id, **kwargs
+ )
+
+
+def _add_machines(switch, machines, session=None):
+ """Add machines to switch.
+
+ Args:
+ machines: list of dict which contains attributes to
+ add machine to switch.
+
+ machines example:
+ {{'machine_id': 1, 'port': 'ae20'}]
+ """
+ for machine in machines:
+ _add_machine_to_switch(
+ switch.id, session=session, **machine
+ )
+
+
+def _remove_machines(switch, machines, session=None):
+ """Remove machines from switch.
+
+ Args:
+ machines: list of machine id.
+
+ machines example:
+ [1,2]
+ """
+ utils.del_db_objects(
+ session, models.SwitchMachine,
+ switch_id=switch.id, machine_id=machines
+ )
+
+
+def _set_machines(switch, machines, session=None):
+ """Reset machines to a switch.
+
+ Args:
+ machines: list of dict which contains attributes to
+ add machine to switch.
+
+ machines example:
+ {{'machine_id': 1, 'port': 'ae20'}]
+ """
+ utils.del_db_objects(
+ session, models.SwitchMachine,
+ switch_id=switch.id
+ )
+ for switch_machine in machines:
+ _add_machine_to_switch(
+ switch.id, session=session, **switch_machine
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=[
+ 'add_machines', 'remove_machines', 'set_machines'
+ ]
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_SWITCH_MACHINES
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def update_switch_machines(
+ switch_id, add_machines=[], remove_machines=[],
+ set_machines=None, user=None, session=None, **kwargs
+):
+ """update switch's machines"""
+ switch = _get_switch(switch_id, session=session)
+ if remove_machines:
+ _remove_machines(
+ switch, remove_machines, session=session
+ )
+ if add_machines:
+ _add_machines(
+ switch, add_machines, session=session
+ )
+ if set_machines is not None:
+ _set_machines(
+ switch, set_machines, session=session
+ )
+ return switch.switch_machines
diff --git a/compass-deck/db/api/user.py b/compass-deck/db/api/user.py
new file mode 100644
index 0000000..db039eb
--- /dev/null
+++ b/compass-deck/db/api/user.py
@@ -0,0 +1,553 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User database operations."""
+import datetime
+import functools
+import logging
+import re
+
+from flask.ext.login import UserMixin
+
+from compass.db.api import database
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = ['email', 'is_admin', 'active']
+PERMISSION_SUPPORTED_FIELDS = ['name']
+SELF_UPDATED_FIELDS = ['email', 'firstname', 'lastname', 'password']
+ADMIN_UPDATED_FIELDS = ['is_admin', 'active']
+IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
+UPDATED_FIELDS = [
+ 'email', 'firstname', 'lastname', 'password', 'is_admin', 'active'
+]
+ADDED_FIELDS = ['email', 'password']
+OPTIONAL_ADDED_FIELDS = ['is_admin', 'active']
+PERMISSION_ADDED_FIELDS = ['permission_id']
+RESP_FIELDS = [
+ 'id', 'email', 'is_admin', 'active', 'firstname',
+ 'lastname', 'created_at', 'updated_at'
+]
+RESP_TOKEN_FIELDS = [
+ 'id', 'user_id', 'token', 'expire_timestamp'
+]
+PERMISSION_RESP_FIELDS = [
+ 'id', 'user_id', 'permission_id', 'name', 'alias', 'description',
+ 'created_at', 'updated_at'
+]
+
+
+def _check_email(email):
+ """Check email is email format."""
+ if '@' not in email:
+ raise exception.InvalidParameter(
+ 'there is no @ in email address %s.' % email
+ )
+
+
+def _check_user_permission(user, permission, session=None):
+ """Check user has permission."""
+ if not user:
+ logging.info('empty user means the call is from internal')
+ return
+ if user.is_admin:
+ return
+
+ user_permission = utils.get_db_object(
+ session, models.UserPermission,
+ False, user_id=user.id, name=permission.name
+ )
+ if not user_permission:
+ raise exception.Forbidden(
+ 'user %s does not have permission %s' % (
+ user.email, permission.name
+ )
+ )
+
+
+def check_user_permission(permission):
+ """Decorator to check user having permission."""
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ user = kwargs.get('user')
+ if user is not None:
+ session = kwargs.get('session')
+ if session is None:
+ raise exception.DatabaseException(
+ 'wrapper check_user_permission does not run in session'
+ )
+ _check_user_permission(user, permission, session=session)
+ return func(*args, **kwargs)
+ else:
+ return func(*args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def check_user_admin():
+ """Decorator to check user is admin."""
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ user = kwargs.get('user')
+ if user is not None:
+ if not user.is_admin:
+ raise exception.Forbidden(
+ 'User %s is not admin.' % (
+ user.email
+ )
+ )
+ return func(*args, **kwargs)
+ else:
+ return func(*args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def check_user_admin_or_owner():
+ """Decorator to check user is admin or the owner of the resource."""
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(user_id, *args, **kwargs):
+ user = kwargs.get('user')
+ if user is not None:
+ session = kwargs.get('session')
+ if session is None:
+ raise exception.DatabaseException(
+ 'wrapper check_user_admin_or_owner is '
+ 'not called in session'
+ )
+ check_user = _get_user(user_id, session=session)
+ if not user.is_admin and user.id != check_user.id:
+ raise exception.Forbidden(
+ 'User %s is not admin or the owner of user %s.' % (
+ user.email, check_user.email
+ )
+ )
+
+ return func(
+ user_id, *args, **kwargs
+ )
+ else:
+ return func(
+ user_id, *args, **kwargs
+ )
+ return wrapper
+ return decorator
+
+
+def _add_user_permissions(user, session=None, **permission_filters):
+ """add permissions to a user."""
+ from compass.db.api import permission as permission_api
+ for api_permission in permission_api.list_permissions(
+ session=session, **permission_filters
+ ):
+ utils.add_db_object(
+ session, models.UserPermission, False,
+ user.id, api_permission['id']
+ )
+
+
+def _remove_user_permissions(user, session=None, **permission_filters):
+ """remove permissions from a user."""
+ from compass.db.api import permission as permission_api
+ permission_ids = [
+ api_permission['id']
+ for api_permission in permission_api.list_permissions(
+ session=session, **permission_filters
+ )
+ ]
+ utils.del_db_objects(
+ session, models.UserPermission,
+ user_id=user.id, permission_id=permission_ids
+ )
+
+
+def _set_user_permissions(user, session=None, **permission_filters):
+ """set permissions to a user."""
+ utils.del_db_objects(
+ session, models.UserPermission,
+ user_id=user.id
+ )
+ _add_user_permissions(session, user, **permission_filters)
+
+
+class UserWrapper(UserMixin):
+ """Wrapper class provided to flask."""
+
+ def __init__(
+ self, id, email, crypted_password,
+ active=True, is_admin=False,
+ expire_timestamp=None, token='', **kwargs
+ ):
+ self.id = id
+ self.email = email
+ self.password = crypted_password
+ self.active = active
+ self.is_admin = is_admin
+ self.expire_timestamp = expire_timestamp
+ if not token:
+ self.token = self.get_auth_token()
+ else:
+ self.token = token
+ super(UserWrapper, self).__init__()
+
+ def authenticate(self, password):
+ if not util.encrypt(password, self.password) == self.password:
+ raise exception.Unauthorized('%s password mismatch' % self.email)
+
+ def get_auth_token(self):
+ return util.encrypt(self.email)
+
+ def is_active(self):
+ return self.active
+
+ def get_id(self):
+ return self.token
+
+ def is_authenticated(self):
+ current_time = datetime.datetime.now()
+ return (
+ not self.expire_timestamp or
+ current_time < self.expire_timestamp
+ )
+
+ def __str__(self):
+ return '%s[email:%s,password:%s]' % (
+ self.__class__.__name__, self.email, self.password)
+
+
+@database.run_in_session()
+def get_user_object(email, session=None, **kwargs):
+ """get user and convert to UserWrapper object."""
+ user = utils.get_db_object(
+ session, models.User, False, email=email
+ )
+ if not user:
+ raise exception.Unauthorized(
+ '%s unauthorized' % email
+ )
+ user_dict = user.to_dict()
+ user_dict.update(kwargs)
+ return UserWrapper(**user_dict)
+
+
+@database.run_in_session(exception_when_in_session=False)
+def get_user_object_from_token(token, session=None):
+ """Get user from token and convert to UserWrapper object.
+
+ ::note:
+ get_user_object_from_token may be called in session.
+ """
+ expire_timestamp = {
+ 'ge': datetime.datetime.now()
+ }
+ user_token = utils.get_db_object(
+ session, models.UserToken, False,
+ token=token, expire_timestamp=expire_timestamp
+ )
+ if not user_token:
+ raise exception.Unauthorized(
+ 'invalid user token: %s' % token
+ )
+ user_dict = _get_user(
+ user_token.user_id, session=session
+ ).to_dict()
+ user_dict['token'] = token
+ expire_timestamp = user_token.expire_timestamp
+ user_dict['expire_timestamp'] = expire_timestamp
+ return UserWrapper(**user_dict)
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_TOKEN_FIELDS)
+def record_user_token(
+ token, expire_timestamp, user=None, session=None
+):
+ """record user token in database."""
+ user_token = utils.get_db_object(
+ session, models.UserToken, False,
+ user_id=user.id, token=token
+ )
+ if not user_token:
+ return utils.add_db_object(
+ session, models.UserToken, True,
+ token, user_id=user.id,
+ expire_timestamp=expire_timestamp
+ )
+ elif expire_timestamp > user_token.expire_timestamp:
+ return utils.update_db_object(
+ session, user_token, expire_timestamp=expire_timestamp
+ )
+ return user_token
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_TOKEN_FIELDS)
+def clean_user_token(token, user=None, session=None):
+ """clean user token in database."""
+ return utils.del_db_objects(
+ session, models.UserToken,
+ token=token, user_id=user.id
+ )
+
+
+def _get_user(user_id, session=None, **kwargs):
+ """Get user object by user id."""
+ if isinstance(user_id, (int, long)):
+ return utils.get_db_object(
+ session, models.User, id=user_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'user id %s type is not int compatible' % user_id
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@check_user_admin_or_owner()
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_user(
+ user_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get a user."""
+ return _get_user(
+ user_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_current_user(
+ exception_when_missing=True, user=None,
+ session=None, **kwargs
+):
+ """get current user."""
+ return _get_user(
+ user.id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_FIELDS
+)
+@database.run_in_session()
+@check_user_admin()
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_users(user=None, session=None, **filters):
+ """List all users."""
+ return utils.list_db_objects(
+ session, models.User, **filters
+ )
+
+
+@utils.input_validates(email=_check_email)
+@utils.supported_filters(
+ ADDED_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@check_user_admin()
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_user(
+ exception_when_existing=True, user=None,
+ session=None, email=None, **kwargs
+):
+ """Create a user and return created user object."""
+ add_user = utils.add_db_object(
+ session, models.User,
+ exception_when_existing, email,
+ **kwargs)
+ _add_user_permissions(
+ add_user,
+ session=session,
+ name=setting.COMPASS_DEFAULT_PERMISSIONS
+ )
+ return add_user
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@check_user_admin()
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_user(user_id, user=None, session=None, **kwargs):
+ """delete a user and return the deleted user object."""
+ del_user = _get_user(user_id, session=session)
+ return utils.del_db_object(session, del_user)
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(email=_check_email)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_user(user_id, user=None, session=None, **kwargs):
+ """Update a user and return the updated user object."""
+ update_user = _get_user(
+ user_id, session=session,
+ )
+ allowed_fields = set()
+ if user.is_admin:
+ allowed_fields |= set(ADMIN_UPDATED_FIELDS)
+ if user.id == update_user.id:
+ allowed_fields |= set(SELF_UPDATED_FIELDS)
+ unsupported_fields = set(kwargs) - allowed_fields
+ if unsupported_fields:
+ # The user is not allowed to update a user.
+ raise exception.Forbidden(
+ 'User %s has no permission to update user %s fields %s.' % (
+ user.email, user.email, unsupported_fields
+ )
+ )
+ return utils.update_db_object(session, update_user, **kwargs)
+
+
+@utils.supported_filters(optional_support_keys=PERMISSION_SUPPORTED_FIELDS)
+@database.run_in_session()
+@check_user_admin_or_owner()
+@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
+def get_permissions(
+ user_id, user=None, exception_when_missing=True,
+ session=None, **kwargs
+):
+ """List permissions of a user."""
+ get_user = _get_user(
+ user_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+ return utils.list_db_objects(
+ session, models.UserPermission, user_id=get_user.id, **kwargs
+ )
+
+
+def _get_permission(user_id, permission_id, session=None, **kwargs):
+ """Get user permission by user id and permission id."""
+ user = _get_user(user_id, session=session)
+ from compass.db.api import permission as permission_api
+ permission = permission_api.get_permission_internal(
+ permission_id, session=session
+ )
+ return utils.get_db_object(
+ session, models.UserPermission,
+ user_id=user.id, permission_id=permission.id,
+ **kwargs
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@check_user_admin_or_owner()
+@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
+def get_permission(
+ user_id, permission_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get a permission of a user."""
+ return _get_permission(
+ user_id, permission_id,
+ exception_when_missing=exception_when_missing,
+ session=session,
+ **kwargs
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@check_user_admin_or_owner()
+@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
+def del_permission(user_id, permission_id, user=None, session=None, **kwargs):
+ """Delete a permission from a user."""
+ user_permission = _get_permission(
+ user_id, permission_id,
+ session=session, **kwargs
+ )
+ return utils.del_db_object(session, user_permission)
+
+
+@utils.supported_filters(
+ PERMISSION_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@check_user_admin()
+@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
+def add_permission(
+ user_id, permission_id=None, exception_when_existing=True,
+ user=None, session=None
+):
+ """Add a permission to a user."""
+ get_user = _get_user(user_id, session=session)
+ from compass.db.api import permission as permission_api
+ get_permission = permission_api.get_permission_internal(
+ permission_id, session=session
+ )
+ return utils.add_db_object(
+ session, models.UserPermission, exception_when_existing,
+ get_user.id, get_permission.id
+ )
+
+
+def _get_permission_filters(permission_ids):
+ """Helper function to filter permissions."""
+ if permission_ids == 'all':
+ return {}
+ else:
+ return {'id': permission_ids}
+
+
+@utils.supported_filters(
+ optional_support_keys=[
+ 'add_permissions', 'remove_permissions', 'set_permissions'
+ ]
+)
+@database.run_in_session()
+@check_user_admin()
+@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
+def update_permissions(
+ user_id, add_permissions=[], remove_permissions=[],
+ set_permissions=None, user=None, session=None, **kwargs
+):
+ """update user permissions."""
+ update_user = _get_user(user_id, session=session)
+ if remove_permissions:
+ _remove_user_permissions(
+ update_user, session=session,
+ **_get_permission_filters(remove_permissions)
+ )
+ if add_permissions:
+ _add_user_permissions(
+ update_user, session=session,
+ **_get_permission_filters(add_permissions)
+ )
+ if set_permissions is not None:
+ _set_user_permissions(
+ update_user, session=session,
+ **_get_permission_filters(set_permissions)
+ )
+ return update_user.user_permissions
diff --git a/compass-deck/db/api/user_log.py b/compass-deck/db/api/user_log.py
new file mode 100644
index 0000000..70de9db
--- /dev/null
+++ b/compass-deck/db/api/user_log.py
@@ -0,0 +1,82 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""UserLog database operations."""
+import logging
+
+from compass.db.api import database
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+
+SUPPORTED_FIELDS = ['user_email', 'timestamp']
+USER_SUPPORTED_FIELDS = ['timestamp']
+RESP_FIELDS = ['user_id', 'action', 'timestamp']
+
+
+@database.run_in_session()
+def log_user_action(user_id, action, session=None):
+ """Log user action."""
+ utils.add_db_object(
+ session, models.UserLog, True, user_id=user_id, action=action
+ )
+
+
+@utils.supported_filters(optional_support_keys=USER_SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_admin_or_owner()
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_user_actions(user_id, user=None, session=None, **filters):
+ """list user actions of a user."""
+ list_user = user_api.get_user(user_id, user=user, session=session)
+ return utils.list_db_objects(
+ session, models.UserLog, order_by=['timestamp'],
+ user_id=list_user['id'], **filters
+ )
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@user_api.check_user_admin()
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_actions(user=None, session=None, **filters):
+ """list actions of all users."""
+ return utils.list_db_objects(
+ session, models.UserLog, order_by=['timestamp'], **filters
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_admin_or_owner()
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_user_actions(user_id, user=None, session=None, **filters):
+ """delete actions of a user."""
+ del_user = user_api.get_user(user_id, user=user, session=session)
+ return utils.del_db_objects(
+ session, models.UserLog, user_id=del_user['id'], **filters
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_admin()
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_actions(user=None, session=None, **filters):
+ """delete actions of all users."""
+ return utils.del_db_objects(
+ session, models.UserLog, **filters
+ )
diff --git a/compass-deck/db/api/utils.py b/compass-deck/db/api/utils.py
new file mode 100644
index 0000000..a44f26e
--- /dev/null
+++ b/compass-deck/db/api/utils.py
@@ -0,0 +1,1286 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utils for database usage."""
+
+import functools
+import inspect
+import logging
+import netaddr
+import re
+
+from inspect import isfunction
+from sqlalchemy import and_
+from sqlalchemy import or_
+
+from compass.db import exception
+from compass.db import models
+from compass.utils import util
+
+
+def model_query(session, model):
+ """model query.
+
+ Return sqlalchemy query object.
+ """
+ if not issubclass(model, models.BASE):
+ raise exception.DatabaseException("model should be sublass of BASE!")
+
+ return session.query(model)
+
+
+def _default_list_condition_func(col_attr, value, condition_func):
+ """The default condition func for a list of data.
+
+ Given the condition func for single item of data, this function
+ wrap the condition_func and return another condition func using
+ or_ to merge the conditions of each single item to deal with a
+ list of data item.
+
+ Args:
+ col_attr: the colomn name
+ value: the column value need to be compared.
+ condition_func: the sqlalchemy condition object like ==
+
+ Examples:
+ col_attr is name, value is ['a', 'b', 'c'] and
+ condition_func is ==, the returned condition is
+ name == 'a' or name == 'b' or name == 'c'
+ """
+ conditions = []
+ for sub_value in value:
+ condition = condition_func(col_attr, sub_value)
+ if condition is not None:
+ conditions.append(condition)
+ if conditions:
+ return or_(*conditions)
+ else:
+ return None
+
+
+def _one_item_list_condition_func(col_attr, value, condition_func):
+ """The wrapper condition func to deal with one item data list.
+
+ For simplification, it is used to reduce generating too complex
+ sql conditions.
+ """
+ if value:
+ return condition_func(col_attr, value[0])
+ else:
+ return None
+
+
+def _model_condition_func(
+ col_attr, value,
+ item_condition_func,
+ list_condition_func=_default_list_condition_func
+):
+ """Return sql condition based on value type."""
+ if isinstance(value, list):
+ if not value:
+ return None
+ if len(value) == 1:
+ return item_condition_func(col_attr, value)
+ return list_condition_func(
+ col_attr, value, item_condition_func
+ )
+ else:
+ return item_condition_func(col_attr, value)
+
+
+def _between_condition(col_attr, value):
+ """Return sql range condition."""
+ if value[0] is not None and value[1] is not None:
+ return col_attr.between(value[0], value[1])
+ if value[0] is not None:
+ return col_attr >= value[0]
+ if value[1] is not None:
+ return col_attr <= value[1]
+ return None
+
+
+def model_order_by(query, model, order_by):
+ """append order by into sql query model."""
+ if not order_by:
+ return query
+ order_by_cols = []
+ for key in order_by:
+ if isinstance(key, tuple):
+ key, is_desc = key
+ else:
+ is_desc = False
+ if isinstance(key, basestring):
+ if hasattr(model, key):
+ col_attr = getattr(model, key)
+ else:
+ continue
+ else:
+ col_attr = key
+ if is_desc:
+ order_by_cols.append(col_attr.desc())
+ else:
+ order_by_cols.append(col_attr)
+ return query.order_by(*order_by_cols)
+
+
+def _model_condition(col_attr, value):
+ """Generate condition for one column.
+
+ Example for col_attr is name:
+ value is 'a': name == 'a'
+ value is ['a']: name == 'a'
+ value is ['a', 'b']: name == 'a' or name == 'b'
+ value is {'eq': 'a'}: name == 'a'
+ value is {'lt': 'a'}: name < 'a'
+ value is {'le': 'a'}: name <= 'a'
+ value is {'gt': 'a'}: name > 'a'
+ value is {'ge': 'a'}: name >= 'a'
+ value is {'ne': 'a'}: name != 'a'
+ value is {'in': ['a', 'b']}: name in ['a', 'b']
+ value is {'notin': ['a', 'b']}: name not in ['a', 'b']
+ value is {'startswith': 'abc'}: name like 'abc%'
+ value is {'endswith': 'abc'}: name like '%abc'
+ value is {'like': 'abc'}: name like '%abc%'
+ value is {'between': ('a', 'c')}: name >= 'a' and name <= 'c'
+ value is [{'lt': 'a'}]: name < 'a'
+ value is [{'lt': 'a'}, {'gt': c'}]: name < 'a' or name > 'c'
+ value is {'lt': 'c', 'gt': 'a'}: name > 'a' and name < 'c'
+
+ If value is a list, the condition is the or relationship among
+ conditions of each item.
+ If value is dict and there are multi keys in the dict, the relationship
+ is and conditions of each key.
+ Otherwise the condition is to compare the column with the value.
+ """
+ if isinstance(value, list):
+ basetype_values = []
+ composite_values = []
+ for item in value:
+ if isinstance(item, (list, dict)):
+ composite_values.append(item)
+ else:
+ basetype_values.append(item)
+ conditions = []
+ if basetype_values:
+ if len(basetype_values) == 1:
+ condition = (col_attr == basetype_values[0])
+ else:
+ condition = col_attr.in_(basetype_values)
+ conditions.append(condition)
+ for composite_value in composite_values:
+ condition = _model_condition(col_attr, composite_value)
+ if condition is not None:
+ conditions.append(condition)
+ if not conditions:
+ return None
+ if len(conditions) == 1:
+ return conditions[0]
+ return or_(*conditions)
+ elif isinstance(value, dict):
+ conditions = []
+ if 'eq' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['eq'],
+ lambda attr, data: attr == data,
+ lambda attr, data, item_condition_func: attr.in_(data)
+ ))
+ if 'lt' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['lt'],
+ lambda attr, data: attr < data,
+ _one_item_list_condition_func
+ ))
+ if 'gt' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['gt'],
+ lambda attr, data: attr > data,
+ _one_item_list_condition_func
+ ))
+ if 'le' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['le'],
+ lambda attr, data: attr <= data,
+ _one_item_list_condition_func
+ ))
+ if 'ge' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['ge'],
+ lambda attr, data: attr >= data,
+ _one_item_list_condition_func
+ ))
+ if 'ne' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['ne'],
+ lambda attr, data: attr != data,
+ lambda attr, data, item_condition_func: attr.notin_(data)
+ ))
+ if 'in' in value:
+ conditions.append(col_attr.in_(value['in']))
+ if 'notin' in value:
+ conditions.append(col_attr.notin_(value['notin']))
+ if 'startswith' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['startswith'],
+ lambda attr, data: attr.like('%s%%' % data)
+ ))
+ if 'endswith' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['endswith'],
+ lambda attr, data: attr.like('%%%s' % data)
+ ))
+ if 'like' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['like'],
+ lambda attr, data: attr.like('%%%s%%' % data)
+ ))
+ if 'between' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['between'],
+ _between_condition
+ ))
+ conditions = [
+ condition
+ for condition in conditions
+ if condition is not None
+ ]
+ if not conditions:
+ return None
+ if len(conditions) == 1:
+ return conditions[0]
+ return and_(conditions)
+ else:
+ condition = (col_attr == value)
+ return condition
+
+
+def model_filter(query, model, **filters):
+ """Append conditons to query for each possible column."""
+ for key, value in filters.items():
+ if isinstance(key, basestring):
+ if hasattr(model, key):
+ col_attr = getattr(model, key)
+ else:
+ continue
+ else:
+ col_attr = key
+ condition = _model_condition(col_attr, value)
+ if condition is not None:
+ query = query.filter(condition)
+ return query
+
+
+def replace_output(**output_mapping):
+ """Decorator to recursively relace output by output mapping.
+
+ The replacement detail is described in _replace_output.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ return _replace_output(
+ func(*args, **kwargs), **output_mapping
+ )
+ return wrapper
+ return decorator
+
+
+def _replace_output(data, **output_mapping):
+ """Helper to replace output data.
+
+ Example:
+ data = {'a': 'hello'}
+ output_mapping = {'a': 'b'}
+ returns: {'b': 'hello'}
+
+ data = {'a': {'b': 'hello'}}
+ output_mapping = {'a': 'b'}
+ returns: {'b': {'b': 'hello'}}
+
+ data = {'a': {'b': 'hello'}}
+ output_mapping = {'a': {'b': 'c'}}
+ returns: {'a': {'c': 'hello'}}
+
+ data = [{'a': 'hello'}, {'a': 'hi'}]
+ output_mapping = {'a': 'b'}
+ returns: [{'b': 'hello'}, {'b': 'hi'}]
+ """
+ if isinstance(data, list):
+ return [
+ _replace_output(item, **output_mapping)
+ for item in data
+ ]
+ if not isinstance(data, dict):
+ raise exception.InvalidResponse(
+ '%s type is not dict' % data
+ )
+ info = {}
+ for key, value in data.items():
+ if key in output_mapping:
+ output_key = output_mapping[key]
+ if isinstance(output_key, basestring):
+ info[output_key] = value
+ else:
+ info[key] = (
+ _replace_output(value, **output_key)
+ )
+ else:
+ info[key] = value
+ return info
+
+
+def get_wrapped_func(func):
+ """Get wrapped function instance.
+
+ Example:
+ @dec1
+ @dec2
+ myfunc(*args, **kwargs)
+
+ get_wrapped_func(myfunc) returns function object with
+ following attributes:
+ __name__: 'myfunc'
+ args: args
+ kwargs: kwargs
+ otherwise myfunc is function object with following attributes:
+ __name__: partial object ...
+ args: ...
+ kwargs: ...
+ """
+ if func.func_closure:
+ for closure in func.func_closure:
+ if isfunction(closure.cell_contents):
+ return get_wrapped_func(closure.cell_contents)
+ return func
+ else:
+ return func
+
+
+def wrap_to_dict(support_keys=[], **filters):
+ """Decrator to convert returned object to dict.
+
+ The details is decribed in _wrapper_dict.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ return _wrapper_dict(
+ func(*args, **kwargs), support_keys, **filters
+ )
+ return wrapper
+ return decorator
+
+
+def _wrapper_dict(data, support_keys, **filters):
+ """Helper for warpping db object into dictionary.
+
+ If data is list, convert it to a list of dict
+ If data is Base model, convert it to dict
+ for the data as a dict, filter it with the supported keys.
+ For each filter_key, filter_value in filters, also filter
+ data[filter_key] by filter_value recursively if it exists.
+
+ Example:
+ data is models.Switch, it will be converted to
+ {
+ 'id': 1, 'ip': '10.0.0.1', 'ip_int': 123456,
+ 'credentials': {'version': 2, 'password': 'abc'}
+ }
+ Then if support_keys are ['id', 'ip', 'credentials'],
+ it will be filtered to {
+ 'id': 1, 'ip': '10.0.0.1',
+ 'credentials': {'version': 2, 'password': 'abc'}
+ }
+ Then if filters is {'credentials': ['version']},
+ it will be filtered to {
+ 'id': 1, 'ip': '10.0.0.1',
+ 'credentials': {'version': 2}
+ }
+ """
+ logging.debug(
+ 'wrap dict %s by support_keys=%s filters=%s',
+ data, support_keys, filters
+ )
+ if isinstance(data, list):
+ return [
+ _wrapper_dict(item, support_keys, **filters)
+ for item in data
+ ]
+ if isinstance(data, models.HelperMixin):
+ data = data.to_dict()
+ if not isinstance(data, dict):
+ raise exception.InvalidResponse(
+ 'response %s type is not dict' % data
+ )
+ info = {}
+ try:
+ for key in support_keys:
+ if key in data and data[key] is not None:
+ if key in filters:
+ filter_keys = filters[key]
+ if isinstance(filter_keys, dict):
+ info[key] = _wrapper_dict(
+ data[key], filter_keys.keys(),
+ **filter_keys
+ )
+ else:
+ info[key] = _wrapper_dict(
+ data[key], filter_keys
+ )
+ else:
+ info[key] = data[key]
+ return info
+ except Exception as error:
+ logging.exception(error)
+ raise error
+
+
+def replace_filters(**kwarg_mapping):
+ """Decorator to replace kwargs.
+
+ Examples:
+ kwargs: {'a': 'b'}, kwarg_mapping: {'a': 'c'}
+ replaced kwargs to decorated func:
+ {'c': 'b'}
+
+ replace_filters is used to replace caller's input
+ to make it understandable by models.py.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ replaced_kwargs = {}
+ for key, value in kwargs.items():
+ if key in kwarg_mapping:
+ replaced_kwargs[kwarg_mapping[key]] = value
+ else:
+ replaced_kwargs[key] = value
+ return func(*args, **replaced_kwargs)
+ return wrapper
+ return decorator
+
+
+def supported_filters(
+ support_keys=[],
+ optional_support_keys=[],
+ ignore_support_keys=[],
+):
+ """Decorator to check kwargs keys.
+
+ keys in kwargs and in ignore_support_keys will be removed.
+ If any unsupported keys found, a InvalidParameter
+ exception raises.
+
+ Args:
+ support_keys: keys that must exist.
+ optional_support_keys: keys that may exist.
+ ignore_support_keys: keys should be ignored.
+
+ Assumption: args without default value is supposed to exist.
+ You can add them in support_keys or not but we will make sure
+ it appears when we call the decorated function.
+ We do best match on both args and kwargs to make sure if the
+ key appears or not.
+
+ Examples:
+ decorated func: func(a, b, c=3, d=4, **kwargs)
+
+ support_keys=['e'] and call func(e=5):
+ raises: InvalidParameter: missing declared arg
+ support_keys=['e'] and call func(1,2,3,4,5,e=6):
+ raises: InvalidParameter: caller sending more args
+ support_keys=['e'] and call func(1,2):
+ raises: InvalidParameter: supported keys ['e'] missing
+ support_keys=['d', 'e'] and call func(1,2,e=3):
+ raises: InvalidParameter: supported keys ['d'] missing
+ support_keys=['d', 'e'] and call func(1,2,d=4, e=3):
+ passed
+ support_keys=['d'], optional_support_keys=['e']
+ and call func(1,2, d=3):
+ passed
+ support_keys=['d'], optional_support_keys=['e']
+ and call func(1,2, d=3, e=4, f=5):
+ raises: InvalidParameter: unsupported keys ['f']
+ support_keys=['d'], optional_support_keys=['e'],
+ ignore_support_keys=['f']
+ and call func(1,2, d=3, e=4, f=5):
+ passed to decorated keys: func(1,2, d=3, e=4)
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **filters):
+ wrapped_func = get_wrapped_func(func)
+ argspec = inspect.getargspec(wrapped_func)
+ wrapped_args = argspec.args
+ args_defaults = argspec.defaults
+ # wrapped_must_args are positional args caller must pass in.
+ if args_defaults:
+ wrapped_must_args = wrapped_args[:-len(args_defaults)]
+ else:
+ wrapped_must_args = wrapped_args[:]
+ # make sure any positional args without default value in
+ # decorated function should appear in args or filters.
+ if len(args) < len(wrapped_must_args):
+ remain_args = wrapped_must_args[len(args):]
+ for remain_arg in remain_args:
+ if remain_arg not in filters:
+ raise exception.InvalidParameter(
+ 'function missing declared arg %s '
+ 'while caller sends args %s' % (
+ remain_arg, args
+ )
+ )
+ # make sure args should be no more than positional args
+ # declared in decorated function.
+ if len(args) > len(wrapped_args):
+ raise exception.InvalidParameter(
+ 'function definition args %s while the caller '
+ 'sends args %s' % (
+ wrapped_args, args
+ )
+ )
+ # exist_args are positional args caller has given.
+ exist_args = dict(zip(wrapped_args, args)).keys()
+ must_support_keys = set(support_keys)
+ all_support_keys = must_support_keys | set(optional_support_keys)
+ wrapped_supported_keys = set(filters) | set(exist_args)
+ unsupported_keys = (
+ set(filters) - set(wrapped_args) -
+ all_support_keys - set(ignore_support_keys)
+ )
+ # unsupported_keys are the keys that are not in support_keys,
+ # optional_support_keys, ignore_support_keys and are not passed in
+ # by positional args. It means the decorated function may
+ # not understand these parameters.
+ if unsupported_keys:
+ raise exception.InvalidParameter(
+ 'filter keys %s are not supported for %s' % (
+ list(unsupported_keys), wrapped_func
+ )
+ )
+ # missing_keys are the keys that must exist but missing in
+ # both positional args or kwargs.
+ missing_keys = must_support_keys - wrapped_supported_keys
+ if missing_keys:
+ raise exception.InvalidParameter(
+ 'filter keys %s not found for %s' % (
+ list(missing_keys), wrapped_func
+ )
+ )
+ # We filter kwargs to eliminate ignore_support_keys in kwargs
+ # passed to decorated function.
+ filtered_filters = dict([
+ (key, value)
+ for key, value in filters.items()
+ if key not in ignore_support_keys
+ ])
+ return func(*args, **filtered_filters)
+ return wrapper
+ return decorator
+
+
+def input_filters(
+ **filters
+):
+ """Decorator to filter kwargs.
+
+ For key in kwargs, if the key exists and filters
+ and the return of call filters[key] is False, the key
+ will be removed from kwargs.
+
+ The function definition of filters[key] is
+ func(value, *args, **kwargs) compared with decorated
+ function func(*args, **kwargs)
+
+ The function is used to filter kwargs in case some
+ kwargs should be removed conditionally depends on the
+ related filters.
+
+ Examples:
+ filters={'a': func(value, *args, **kwargs)}
+ @input_filters(**filters)
+ decorated_func(*args, **kwargs)
+ func returns False.
+ Then when call decorated_func(a=1, b=2)
+ it will be actually called the decorated func with
+ b=2. a=1 will be removed since it does not pass filtering.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ filtered_kwargs = {}
+ for key, value in kwargs.items():
+ if key in filters:
+ if filters[key](value, *args, **kwargs):
+ filtered_kwargs[key] = value
+ else:
+ logging.debug(
+ 'ignore filtered input key %s' % key
+ )
+ else:
+ filtered_kwargs[key] = value
+ return func(*args, **filtered_kwargs)
+ return wrapper
+ return decorator
+
+
+def _obj_equal_or_subset(check, obj):
+ """Used by output filter to check if obj is in check."""
+ if check == obj:
+ return True
+ if not issubclass(obj.__class__, check.__class__):
+ return False
+ if isinstance(obj, dict):
+ return _dict_equal_or_subset(check, obj)
+ elif isinstance(obj, list):
+ return _list_equal_or_subset(check, obj)
+ else:
+ return False
+
+
+def _list_equal_or_subset(check_list, obj_list):
+ """Used by output filter to check if obj_list is in check_list"""
+ if not isinstance(check_list, list):
+ return False
+ return set(check_list).issubset(set(obj_list))
+
+
+def _dict_equal_or_subset(check_dict, obj_dict):
+ """Used by output filter to check if obj_dict in check_dict."""
+ if not isinstance(check_dict, dict):
+ return False
+ for key, value in check_dict.items():
+ if (
+ key not in obj_dict or
+ not _obj_equal_or_subset(check_dict[key], obj_dict[key])
+ ):
+ return False
+ return True
+
+
+def general_filter_callback(general_filter, obj):
+ """General filter function to filter output.
+
+ Since some fields stored in database is json encoded and
+ we want to do the deep match for the json encoded field to
+ do the filtering in some cases, we introduces the output_filters
+ and general_filter_callback to deal with this kind of cases.
+
+ We do special treatment for key 'resp_eq' to check if
+ obj is the recursively subset of general_filter['resp_eq']
+
+
+ Example:
+ obj: 'b'
+ general_filter: {}
+ returns: True
+
+ obj: 'b'
+ general_filter: {'resp_in': ['a', 'b']}
+ returns: True
+
+ obj: 'b'
+ general_filter: {'resp_in': ['a']}
+ returns: False
+
+ obj: 'b'
+ general_filter: {'resp_eq': 'b'}
+ returns: True
+
+ obj: 'b'
+ general_filter: {'resp_eq': 'a'}
+ returns: False
+
+ obj: 'b'
+ general_filter: {'resp_range': ('a', 'c')}
+ returns: True
+
+ obj: 'd'
+ general_filter: {'resp_range': ('a', 'c')}
+ returns: False
+
+ If there are multi keys in dict, the output is filtered
+ by and relationship.
+
+ If the general_filter is a list, the output is filtered
+ by or relationship.
+
+ Supported general filters: [
+ 'resp_eq', 'resp_in', 'resp_lt',
+ 'resp_le', 'resp_gt', 'resp_ge',
+ 'resp_match', 'resp_range'
+ ]
+ """
+ if isinstance(general_filter, list):
+ if not general_filter:
+ return True
+ return any([
+ general_filter_callback(item, obj)
+ for item in general_filter
+ ])
+ elif isinstance(general_filter, dict):
+ if 'resp_eq' in general_filter:
+ if not _obj_equal_or_subset(
+ general_filter['resp_eq'], obj
+ ):
+ return False
+ if 'resp_in' in general_filter:
+ in_filters = general_filter['resp_in']
+ if not any([
+ _obj_equal_or_subset(in_filer, obj)
+ for in_filer in in_filters
+ ]):
+ return False
+ if 'resp_lt' in general_filter:
+ if obj >= general_filter['resp_lt']:
+ return False
+ if 'resp_le' in general_filter:
+ if obj > general_filter['resp_le']:
+ return False
+ if 'resp_gt' in general_filter:
+ if obj <= general_filter['resp_gt']:
+ return False
+ if 'resp_ge' in general_filter:
+ if obj < general_filter['resp_gt']:
+ return False
+ if 'resp_match' in general_filter:
+ if not re.match(general_filter['resp_match'], obj):
+ return False
+ if 'resp_range' in general_filter:
+ resp_range = general_filter['resp_range']
+ if not isinstance(resp_range, list):
+ resp_range = [resp_range]
+ in_range = False
+ for range_start, range_end in resp_range:
+ if range_start <= obj <= range_end:
+ in_range = True
+ if not in_range:
+ return False
+ return True
+ else:
+ return True
+
+
+def filter_output(filter_callbacks, kwargs, obj, missing_ok=False):
+ """Filter ouput.
+
+ For each key in filter_callbacks, if it exists in kwargs,
+ kwargs[key] tells what we need to filter. If the call of
+ filter_callbacks[key] returns False, it tells the obj should be
+ filtered out of output.
+ """
+ for callback_key, callback_value in filter_callbacks.items():
+ if callback_key not in kwargs:
+ continue
+ if callback_key not in obj:
+ if missing_ok:
+ continue
+ else:
+ raise exception.InvalidResponse(
+ '%s is not in %s' % (callback_key, obj)
+ )
+ if not callback_value(
+ kwargs[callback_key], obj[callback_key]
+ ):
+ return False
+ return True
+
+
+def output_filters(missing_ok=False, **filter_callbacks):
+ """Decorator to filter output list.
+
+ Each filter_callback should have the definition like:
+ func({'resp_eq': 'a'}, 'a')
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ filtered_obj_list = []
+ obj_list = func(*args, **kwargs)
+ for obj in obj_list:
+ if filter_output(
+ filter_callbacks, kwargs, obj, missing_ok
+ ):
+ filtered_obj_list.append(obj)
+ return filtered_obj_list
+ return wrapper
+ return decorator
+
+
+def _input_validates(args_validators, kwargs_validators, *args, **kwargs):
+ """Used by input_validators to validate inputs."""
+ for i, value in enumerate(args):
+ if i < len(args_validators) and args_validators[i]:
+ args_validators[i](value)
+ for key, value in kwargs.items():
+ if kwargs_validators.get(key):
+ kwargs_validators[key](value)
+
+
+def input_validates(*args_validators, **kwargs_validators):
+ """Decorator to validate input.
+
+ Each validator should have definition like:
+ func('00:01:02:03:04:05')
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ _input_validates(
+ args_validators, kwargs_validators,
+ *args, **kwargs
+ )
+ return func(*args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def _input_validates_with_args(
+ args_validators, kwargs_validators, *args, **kwargs
+):
+ """Validate input with validators.
+
+ Each validator takes the arguments of the decorated function
+ as its arguments. The function definition is like:
+ func(value, *args, **kwargs) compared with the decorated
+ function func(*args, **kwargs).
+ """
+ for i, value in enumerate(args):
+ if i < len(args_validators) and args_validators[i]:
+ args_validators[i](value, *args, **kwargs)
+ for key, value in kwargs.items():
+ if kwargs_validators.get(key):
+ kwargs_validators[key](value, *args, **kwargs)
+
+
+def input_validates_with_args(
+ *args_validators, **kwargs_validators
+):
+ """Decorator to validate input."""
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ _input_validates_with_args(
+ args_validators, kwargs_validators,
+ *args, **kwargs
+ )
+ return func(*args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def _output_validates_with_args(
+ kwargs_validators, obj, *args, **kwargs
+):
+ """Validate output with validators.
+
+ Each validator takes the arguments of the decorated function
+ as its arguments. The function definition is like:
+ func(value, *args, **kwargs) compared with the decorated
+ function func(*args, **kwargs).
+ """
+ if isinstance(obj, list):
+ for item in obj:
+ _output_validates_with_args(
+ kwargs_validators, item, *args, **kwargs
+ )
+ return
+ if isinstance(obj, models.HelperMixin):
+ obj = obj.to_dict()
+ if not isinstance(obj, dict):
+ raise exception.InvalidResponse(
+ 'response %s type is not dict' % str(obj)
+ )
+ try:
+ for key, value in obj.items():
+ if key in kwargs_validators:
+ kwargs_validators[key](value, *args, **kwargs)
+ except Exception as error:
+ logging.exception(error)
+ raise error
+
+
+def output_validates_with_args(**kwargs_validators):
+ """Decorator to validate output.
+
+ The validator can take the arguments of the decorated
+ function as its arguments.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ obj = func(*args, **kwargs)
+ if isinstance(obj, list):
+ for obj_item in obj:
+ _output_validates_with_args(
+ kwargs_validators, obj_item,
+ *args, **kwargs
+ )
+ else:
+ _output_validates_with_args(
+ kwargs_validators, obj,
+ *args, **kwargs
+ )
+ return obj
+ return wrapper
+ return decorator
+
+
+def _output_validates(kwargs_validators, obj):
+ """Validate output.
+
+ Each validator has following signature:
+ func(value)
+ """
+ if isinstance(obj, list):
+ for item in obj:
+ _output_validates(kwargs_validators, item)
+ return
+ if isinstance(obj, models.HelperMixin):
+ obj = obj.to_dict()
+ if not isinstance(obj, dict):
+ raise exception.InvalidResponse(
+ 'response %s type is not dict' % str(obj)
+ )
+ try:
+ for key, value in obj.items():
+ if key in kwargs_validators:
+ kwargs_validators[key](value)
+ except Exception as error:
+ logging.exception(error)
+ raise error
+
+
+def output_validates(**kwargs_validators):
+ """Decorator to validate output."""
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ obj = func(*args, **kwargs)
+ if isinstance(obj, list):
+ for obj_item in obj:
+ _output_validates(kwargs_validators, obj_item)
+ else:
+ _output_validates(kwargs_validators, obj)
+ return obj
+ return wrapper
+ return decorator
+
+
+def get_db_object(session, table, exception_when_missing=True, **kwargs):
+ """Get db object.
+
+ If not exception_when_missing and the db object can not be found,
+ return None instead of raising exception.
+ """
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s get db object %s from table %s',
+ id(session), kwargs, table.__name__)
+ db_object = model_filter(
+ model_query(session, table), table, **kwargs
+ ).first()
+ logging.debug(
+ 'session %s got db object %s', id(session), db_object
+ )
+ if db_object:
+ return db_object
+
+ if not exception_when_missing:
+ return None
+
+ raise exception.RecordNotExists(
+ 'Cannot find the record in table %s: %s' % (
+ table.__name__, kwargs
+ )
+ )
+
+
+def add_db_object(session, table, exception_when_existing=True,
+ *args, **kwargs):
+ """Create db object.
+
+ If not exception_when_existing and the db object exists,
+ Instead of raising exception, updating the existing db object.
+ """
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s add object %s atributes %s to table %s',
+ id(session), args, kwargs, table.__name__)
+ argspec = inspect.getargspec(table.__init__)
+ arg_names = argspec.args[1:]
+ arg_defaults = argspec.defaults
+ if not arg_defaults:
+ arg_defaults = []
+ if not (
+ len(arg_names) - len(arg_defaults) <= len(args) <= len(arg_names)
+ ):
+ raise exception.InvalidParameter(
+ 'arg names %s does not match arg values %s' % (
+ arg_names, args)
+ )
+ db_keys = dict(zip(arg_names, args))
+ if db_keys:
+ db_object = session.query(table).filter_by(**db_keys).first()
+ else:
+ db_object = None
+
+ new_object = False
+ if db_object:
+ logging.debug(
+ 'got db object %s: %s', db_keys, db_object
+ )
+ if exception_when_existing:
+ raise exception.DuplicatedRecord(
+ '%s exists in table %s' % (db_keys, table.__name__)
+ )
+ else:
+ db_object = table(**db_keys)
+ new_object = True
+
+ for key, value in kwargs.items():
+ setattr(db_object, key, value)
+
+ if new_object:
+ session.add(db_object)
+ session.flush()
+ db_object.initialize()
+ db_object.validate()
+ logging.debug(
+ 'session %s db object %s added', id(session), db_object
+ )
+ return db_object
+
+
+def list_db_objects(session, table, order_by=[], **filters):
+ """List db objects.
+
+ If order by given, the db objects should be sorted by the ordered keys.
+ """
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s list db objects by filters %s in table %s',
+ id(session), filters, table.__name__
+ )
+ db_objects = model_order_by(
+ model_filter(
+ model_query(session, table),
+ table,
+ **filters
+ ),
+ table,
+ order_by
+ ).all()
+ logging.debug(
+ 'session %s got listed db objects: %s',
+ id(session), db_objects
+ )
+ return db_objects
+
+
+def del_db_objects(session, table, **filters):
+ """delete db objects."""
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s delete db objects by filters %s in table %s',
+ id(session), filters, table.__name__
+ )
+ query = model_filter(
+ model_query(session, table), table, **filters
+ )
+ db_objects = query.all()
+ query.delete(synchronize_session=False)
+ logging.debug(
+ 'session %s db objects %s deleted', id(session), db_objects
+ )
+ return db_objects
+
+
+def update_db_objects(session, table, updates={}, **filters):
+ """Update db objects."""
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s update db objects by filters %s in table %s',
+ id(session), filters, table.__name__)
+ db_objects = model_filter(
+ model_query(session, table), table, **filters
+ ).all()
+ for db_object in db_objects:
+ logging.debug('update db object %s: %s', db_object, updates)
+ update_db_object(session, db_object, **updates)
+ logging.debug(
+ 'session %s db objects %s updated',
+ id(session), db_objects
+ )
+ return db_objects
+
+
+def update_db_object(session, db_object, **kwargs):
+ """Update db object."""
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s update db object %s by value %s',
+ id(session), db_object, kwargs
+ )
+ for key, value in kwargs.items():
+ setattr(db_object, key, value)
+ session.flush()
+ db_object.update()
+ db_object.validate()
+ logging.debug(
+ 'session %s db object %s updated',
+ id(session), db_object
+ )
+ return db_object
+
+
+def del_db_object(session, db_object):
+ """Delete db object."""
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s delete db object %s',
+ id(session), db_object
+ )
+ session.delete(db_object)
+ logging.debug(
+ 'session %s db object %s deleted',
+ id(session), db_object
+ )
+ return db_object
+
+
+def check_ip(ip):
+ """Check ip is ip address formatted."""
+ try:
+ netaddr.IPAddress(ip)
+ except Exception as error:
+ logging.exception(error)
+ raise exception.InvalidParameter(
+ 'ip address %s format uncorrect' % ip
+ )
+
+
+def check_mac(mac):
+ """Check mac is mac address formatted."""
+ try:
+ netaddr.EUI(mac)
+ except Exception as error:
+ logging.exception(error)
+ raise exception.InvalidParameter(
+ 'invalid mac address %s' % mac
+ )
+
+
+NAME_PATTERN = re.compile(r'[a-zA-Z0-9][a-zA-Z0-9_-]*')
+
+
+def check_name(name):
+ """Check name meeting name format requirement."""
+ if not NAME_PATTERN.match(name):
+ raise exception.InvalidParameter(
+ 'name %s does not match the pattern %s' % (
+ name, NAME_PATTERN.pattern
+ )
+ )
+
+
+def _check_ipmi_credentials_ip(ip):
+ check_ip(ip)
+
+
+def check_ipmi_credentials(ipmi_credentials):
+ """Check ipmi credentials format is correct."""
+ if not ipmi_credentials:
+ return
+ if not isinstance(ipmi_credentials, dict):
+ raise exception.InvalidParameter(
+ 'invalid ipmi credentials %s' % ipmi_credentials
+
+ )
+ for key in ipmi_credentials:
+ if key not in ['ip', 'username', 'password']:
+ raise exception.InvalidParameter(
+ 'unrecognized field %s in ipmi credentials %s' % (
+ key, ipmi_credentials
+ )
+ )
+ for key in ['ip', 'username', 'password']:
+ if key not in ipmi_credentials:
+ raise exception.InvalidParameter(
+ 'no field %s in ipmi credentials %s' % (
+ key, ipmi_credentials
+ )
+ )
+ check_ipmi_credential_field = '_check_ipmi_credentials_%s' % key
+ this_module = globals()
+ if check_ipmi_credential_field in this_module:
+ this_module[check_ipmi_credential_field](
+ ipmi_credentials[key]
+ )
+ else:
+ logging.debug(
+ 'function %s is not defined', check_ipmi_credential_field
+ )
+
+
+def _check_switch_credentials_version(version):
+ if version not in ['1', '2c', '3']:
+ raise exception.InvalidParameter(
+ 'unknown snmp version %s' % version
+ )
+
+
+def check_switch_credentials(credentials):
+ """Check switch credentials format is correct."""
+ if not credentials:
+ return
+ if not isinstance(credentials, dict):
+ raise exception.InvalidParameter(
+ 'credentials %s is not dict' % credentials
+ )
+ for key in credentials:
+ if key not in ['version', 'community']:
+ raise exception.InvalidParameter(
+ 'unrecognized key %s in credentials %s' % (key, credentials)
+ )
+ for key in ['version', 'community']:
+ if key not in credentials:
+ raise exception.InvalidParameter(
+ 'there is no %s field in credentials %s' % (key, credentials)
+ )
+
+ key_check_func_name = '_check_switch_credentials_%s' % key
+ this_module = globals()
+ if key_check_func_name in this_module:
+ this_module[key_check_func_name](
+ credentials[key]
+ )
+ else:
+ logging.debug(
+ 'function %s is not defined',
+ key_check_func_name
+ )