summaryrefslogtreecommitdiffstats
path: root/compass-tasks/db
diff options
context:
space:
mode:
authorHarry Huang <huangxiangyu5@huawei.com>2017-11-17 14:53:44 +0800
committerHarry Huang <huangxiangyu5@huawei.com>2017-12-21 16:36:30 +0800
commit8646b8d62cf4ca7b6bccae537a0c9e72ba45eab3 (patch)
tree73a9a983e0dd1423e9df928a78a5023a09d5a7f9 /compass-tasks/db
parent6234176ae292a75dcda5520324cb7857d6105988 (diff)
Merge compass-tasks-osa and compass-tasks-k8s
JIRA: COMPASS-568 rename compass-tasks to compass-tasks-base. add both osa and k8s support in compass-tasks Change-Id: I438f5b17e509d4cb751ced0ffe640ec70899882f Signed-off-by: Harry Huang <huangxiangyu5@huawei.com>
Diffstat (limited to 'compass-tasks/db')
-rw-r--r--compass-tasks/db/__init__.py13
-rw-r--r--compass-tasks/db/api/__init__.py13
-rw-r--r--compass-tasks/db/api/adapter.py313
-rw-r--r--compass-tasks/db/api/adapter_holder.py155
-rw-r--r--compass-tasks/db/api/cluster.py2444
-rw-r--r--compass-tasks/db/api/database.py264
-rw-r--r--compass-tasks/db/api/health_check_report.py190
-rw-r--r--compass-tasks/db/api/host.py1120
-rw-r--r--compass-tasks/db/api/machine.py317
-rw-r--r--compass-tasks/db/api/metadata.py517
-rw-r--r--compass-tasks/db/api/metadata_holder.py731
-rw-r--r--compass-tasks/db/api/network.py160
-rw-r--r--compass-tasks/db/api/permission.py357
-rw-r--r--compass-tasks/db/api/switch.py1213
-rw-r--r--compass-tasks/db/api/user.py553
-rw-r--r--compass-tasks/db/api/user_log.py82
-rw-r--r--compass-tasks/db/api/utils.py1286
-rw-r--r--compass-tasks/db/callback.py204
-rw-r--r--compass-tasks/db/config_validation/__init__.py0
-rw-r--r--compass-tasks/db/config_validation/default_validator.py131
-rw-r--r--compass-tasks/db/config_validation/extension/__init__.py0
-rw-r--r--compass-tasks/db/config_validation/extension/openstack.py18
-rw-r--r--compass-tasks/db/exception.py116
-rw-r--r--compass-tasks/db/models.py1924
-rw-r--r--compass-tasks/db/v1/model.py724
-rw-r--r--compass-tasks/db/validator.py195
26 files changed, 0 insertions, 13040 deletions
diff --git a/compass-tasks/db/__init__.py b/compass-tasks/db/__init__.py
deleted file mode 100644
index 4ee55a4..0000000
--- a/compass-tasks/db/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/compass-tasks/db/api/__init__.py b/compass-tasks/db/api/__init__.py
deleted file mode 100644
index 5e42ae9..0000000
--- a/compass-tasks/db/api/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/compass-tasks/db/api/adapter.py b/compass-tasks/db/api/adapter.py
deleted file mode 100644
index c3ad48d..0000000
--- a/compass-tasks/db/api/adapter.py
+++ /dev/null
@@ -1,313 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Adapter related database operations."""
-import logging
-import re
-
-from compass.db.api import database
-from compass.db.api import utils
-from compass.db import exception
-from compass.db import models
-
-from compass.utils import setting_wrapper as setting
-from compass.utils import util
-
-
-OSES = None
-OS_INSTALLERS = None
-PACKAGE_INSTALLERS = None
-ADAPTERS = None
-ADAPTERS_FLAVORS = None
-ADAPTERS_ROLES = None
-
-
-def _get_oses_from_configuration():
- """Get all os configs from os configuration dir.
-
- Example: {
- <os_name>: {
- 'name': <os_name>,
- 'id': <os_name>,
- 'os_id': <os_name>,
- 'deployable': True
- }
- }
- """
- configs = util.load_configs(setting.OS_DIR)
- systems = {}
- for config in configs:
- logging.info('get config %s', config)
- system_name = config['NAME']
- parent_name = config.get('PARENT', None)
- system = {
- 'name': system_name,
- 'id': system_name,
- 'os_id': system_name,
- 'parent': parent_name,
- 'parent_id': parent_name,
- 'deployable': config.get('DEPLOYABLE', False)
- }
- systems[system_name] = system
- parents = {}
- for name, system in systems.items():
- parent = system.get('parent', None)
- parents[name] = parent
- for name, system in systems.items():
- util.recursive_merge_dict(name, systems, parents)
- return systems
-
-
-def _get_installers_from_configuration(configs):
- """Get installers from configurations.
-
- Example: {
- <installer_isntance>: {
- 'alias': <instance_name>,
- 'id': <instance_name>,
- 'name': <name>,
- 'settings': <dict pass to installer plugin>
- }
- }
- """
- installers = {}
- for config in configs:
- name = config['NAME']
- instance_name = config.get('INSTANCE_NAME', name)
- installers[instance_name] = {
- 'alias': instance_name,
- 'id': instance_name,
- 'name': name,
- 'settings': config.get('SETTINGS', {})
- }
- return installers
-
-
-def _get_os_installers_from_configuration():
- """Get os installers from os installer config dir."""
- configs = util.load_configs(setting.OS_INSTALLER_DIR)
- return _get_installers_from_configuration(configs)
-
-
-def _get_package_installers_from_configuration():
- """Get package installers from package installer config dir."""
- configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR)
- return _get_installers_from_configuration(configs)
-
-
-def _get_adapters_from_configuration():
- """Get adapters from adapter config dir."""
- configs = util.load_configs(setting.ADAPTER_DIR)
- adapters = {}
- for config in configs:
- logging.info('add config %s to adapter', config)
- if 'OS_INSTALLER' in config:
- os_installer = OS_INSTALLERS[config['OS_INSTALLER']]
- else:
- os_installer = None
-
- if 'PACKAGE_INSTALLER' in config:
- package_installer = PACKAGE_INSTALLERS[
- config['PACKAGE_INSTALLER']
- ]
- else:
- package_installer = None
-
- adapter_name = config['NAME']
- parent_name = config.get('PARENT', None)
- adapter = {
- 'name': adapter_name,
- 'id': adapter_name,
- 'parent': parent_name,
- 'parent_id': parent_name,
- 'display_name': config.get('DISPLAY_NAME', adapter_name),
- 'os_installer': os_installer,
- 'package_installer': package_installer,
- 'deployable': config.get('DEPLOYABLE', False),
- 'health_check_cmd': config.get('HEALTH_CHECK_COMMAND', None),
- 'supported_oses': [],
- 'roles': [],
- 'flavors': []
- }
- supported_os_patterns = [
- re.compile(supported_os_pattern)
- for supported_os_pattern in config.get('SUPPORTED_OS_PATTERNS', [])
- ]
- for os_name, os in OSES.items():
- if not os.get('deployable', False):
- continue
- for supported_os_pattern in supported_os_patterns:
- if supported_os_pattern.match(os_name):
- adapter['supported_oses'].append(os)
- break
- adapters[adapter_name] = adapter
-
- parents = {}
- for name, adapter in adapters.items():
- parent = adapter.get('parent', None)
- parents[name] = parent
- for name, adapter in adapters.items():
- util.recursive_merge_dict(name, adapters, parents)
- return adapters
-
-
-def _add_roles_from_configuration():
- """Get roles from roles config dir and update to adapters."""
- configs = util.load_configs(setting.ADAPTER_ROLE_DIR)
- for config in configs:
- logging.info(
- 'add config %s to role', config
- )
- adapter_name = config['ADAPTER_NAME']
- adapter = ADAPTERS[adapter_name]
- adapter_roles = ADAPTERS_ROLES.setdefault(adapter_name, {})
- for role_dict in config['ROLES']:
- role_name = role_dict['role']
- display_name = role_dict.get('display_name', role_name)
- adapter_roles[role_name] = {
- 'name': role_name,
- 'id': '%s:%s' % (adapter_name, role_name),
- 'adapter_id': adapter_name,
- 'adapter_name': adapter_name,
- 'display_name': display_name,
- 'description': role_dict.get('description', display_name),
- 'optional': role_dict.get('optional', False)
- }
- parents = {}
- for name, adapter in ADAPTERS.items():
- parent = adapter.get('parent', None)
- parents[name] = parent
- for adapter_name, adapter_roles in ADAPTERS_ROLES.items():
- util.recursive_merge_dict(adapter_name, ADAPTERS_ROLES, parents)
- for adapter_name, adapter_roles in ADAPTERS_ROLES.items():
- adapter = ADAPTERS[adapter_name]
- adapter['roles'] = adapter_roles.values()
-
-
-def _add_flavors_from_configuration():
- """Get flavors from flavor config dir and update to adapters."""
- configs = util.load_configs(setting.ADAPTER_FLAVOR_DIR)
- for config in configs:
- logging.info('add config %s to flavor', config)
- adapter_name = config['ADAPTER_NAME']
- adapter = ADAPTERS[adapter_name]
- adapter_flavors = ADAPTERS_FLAVORS.setdefault(adapter_name, {})
- adapter_roles = ADAPTERS_ROLES[adapter_name]
- for flavor_dict in config['FLAVORS']:
- flavor_name = flavor_dict['flavor']
- flavor_id = '%s:%s' % (adapter_name, flavor_name)
- flavor = {
- 'name': flavor_name,
- 'id': flavor_id,
- 'adapter_id': adapter_name,
- 'adapter_name': adapter_name,
- 'display_name': flavor_dict.get('display_name', flavor_name),
- 'template': flavor_dict.get('template', None)
- }
- flavor_roles = flavor_dict.get('roles', [])
- roles_in_flavor = []
- for flavor_role in flavor_roles:
- if isinstance(flavor_role, basestring):
- role_name = flavor_role
- role_in_flavor = {
- 'name': role_name,
- 'flavor_id': flavor_id
- }
- else:
- role_in_flavor = flavor_role
- role_in_flavor['flavor_id'] = flavor_id
- if 'role' in role_in_flavor:
- role_in_flavor['name'] = role_in_flavor['role']
- del role_in_flavor['role']
- role_name = role_in_flavor['name']
- role = adapter_roles[role_name]
- util.merge_dict(role_in_flavor, role, override=False)
- roles_in_flavor.append(role_in_flavor)
- flavor['roles'] = roles_in_flavor
- adapter_flavors[flavor_name] = flavor
- parents = {}
- for name, adapter in ADAPTERS.items():
- parent = adapter.get('parent', None)
- parents[name] = parent
- for adapter_name, adapter_roles in ADAPTERS_FLAVORS.items():
- util.recursive_merge_dict(adapter_name, ADAPTERS_FLAVORS, parents)
- for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items():
- adapter = ADAPTERS[adapter_name]
- adapter['flavors'] = adapter_flavors.values()
-
-
-def load_adapters_internal(force_reload=False):
- """Load adapter related configurations into memory.
-
- If force_reload, reload all configurations even it is loaded already.
- """
- global OSES
- if force_reload or OSES is None:
- OSES = _get_oses_from_configuration()
- global OS_INSTALLERS
- if force_reload or OS_INSTALLERS is None:
- OS_INSTALLERS = _get_os_installers_from_configuration()
- global PACKAGE_INSTALLERS
- if force_reload or PACKAGE_INSTALLERS is None:
- PACKAGE_INSTALLERS = _get_package_installers_from_configuration()
- global ADAPTERS
- if force_reload or ADAPTERS is None:
- ADAPTERS = _get_adapters_from_configuration()
- global ADAPTERS_ROLES
- if force_reload or ADAPTERS_ROLES is None:
- ADAPTERS_ROLES = {}
- _add_roles_from_configuration()
- global ADAPTERS_FLAVORS
- if force_reload or ADAPTERS_FLAVORS is None:
- ADAPTERS_FLAVORS = {}
- _add_flavors_from_configuration()
-
-
-def get_adapters_internal(force_reload=False):
- """Get all deployable adapters."""
- load_adapters_internal(force_reload=force_reload)
- adapter_mapping = {}
- for adapter_name, adapter in ADAPTERS.items():
- if adapter.get('deployable'):
- # TODO(xicheng): adapter should be filtered before
- # return to caller.
- adapter_mapping[adapter_name] = adapter
- else:
- logging.info(
- 'ignore adapter %s since it is not deployable',
- adapter_name
- )
- return adapter_mapping
-
-
-def get_flavors_internal(force_reload=False):
- """Get all deployable flavors."""
- load_adapters_internal(force_reload=force_reload)
- adapter_flavor_mapping = {}
- for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items():
- adapter = ADAPTERS.get(adapter_name, {})
- for flavor_name, flavor in adapter_flavors.items():
- if adapter.get('deployable'):
- # TODO(xicheng): flavor dict should be filtered before
- # return to caller.
- adapter_flavor_mapping.setdefault(
- adapter_name, {}
- )[flavor_name] = flavor
- else:
- logging.info(
- 'ignore adapter %s since it is not deployable',
- adapter_name
- )
-
- return adapter_flavor_mapping
diff --git a/compass-tasks/db/api/adapter_holder.py b/compass-tasks/db/api/adapter_holder.py
deleted file mode 100644
index 91c65c4..0000000
--- a/compass-tasks/db/api/adapter_holder.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Adapter related object holder."""
-import logging
-
-from compass.db.api import adapter as adapter_api
-from compass.db.api import database
-from compass.db.api import permission
-from compass.db.api import user as user_api
-from compass.db.api import utils
-from compass.db import exception
-
-
-SUPPORTED_FIELDS = [
- 'name',
-]
-RESP_FIELDS = [
- 'id', 'name', 'roles', 'flavors',
- 'os_installer', 'package_installer',
- 'supported_oses', 'display_name', 'health_check_cmd'
-]
-RESP_OS_FIELDS = [
- 'id', 'name', 'os_id'
-]
-RESP_ROLES_FIELDS = [
- 'id', 'name', 'display_name', 'description', 'optional'
-]
-RESP_FLAVORS_FIELDS = [
- 'id', 'adapter_id', 'adapter_name', 'name', 'display_name',
- 'template', 'roles'
-]
-
-
-ADAPTER_MAPPING = None
-FLAVOR_MAPPING = None
-
-
-def load_adapters(force_reload=False):
- global ADAPTER_MAPPING
- if force_reload or ADAPTER_MAPPING is None:
- logging.info('load adapters into memory')
- ADAPTER_MAPPING = adapter_api.get_adapters_internal(
- force_reload=force_reload
- )
-
-
-def load_flavors(force_reload=False):
- global FLAVOR_MAPPING
- if force_reload or FLAVOR_MAPPING is None:
- logging.info('load flavors into memory')
- FLAVOR_MAPPING = {}
- adapters_flavors = adapter_api.get_flavors_internal(
- force_reload=force_reload
- )
- for adapter_name, adapter_flavors in adapters_flavors.items():
- for flavor_name, flavor in adapter_flavors.items():
- FLAVOR_MAPPING['%s:%s' % (adapter_name, flavor_name)] = flavor
-
-
-def _filter_adapters(adapter_config, filter_name, filter_value):
- if filter_name not in adapter_config:
- return False
- if isinstance(filter_value, list):
- return bool(
- adapter_config[filter_name] in filter_value
- )
- elif isinstance(filter_value, dict):
- return all([
- _filter_adapters(
- adapter_config[filter_name],
- sub_filter_key, sub_filter_value
- )
- for sub_filter_key, sub_filter_value in filter_value.items()
- ])
- else:
- return adapter_config[filter_name] == filter_value
-
-
-@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_ADAPTERS
-)
-@utils.output_filters(name=utils.general_filter_callback)
-@utils.wrap_to_dict(
- RESP_FIELDS,
- supported_oses=RESP_OS_FIELDS,
- roles=RESP_ROLES_FIELDS,
- flavors=RESP_FLAVORS_FIELDS
-)
-def list_adapters(user=None, session=None, **filters):
- """list adapters."""
- load_adapters()
- return ADAPTER_MAPPING.values()
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_ADAPTERS
-)
-@utils.wrap_to_dict(
- RESP_FIELDS,
- supported_oses=RESP_OS_FIELDS,
- roles=RESP_ROLES_FIELDS,
- flavors=RESP_FLAVORS_FIELDS
-)
-def get_adapter(adapter_id, user=None, session=None, **kwargs):
- """get adapter."""
- load_adapters()
- if adapter_id not in ADAPTER_MAPPING:
- raise exception.RecordNotExists(
- 'adpater %s does not exist' % adapter_id
- )
- return ADAPTER_MAPPING[adapter_id]
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_METADATAS
-)
-@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
-def list_flavors(user=None, session=None, **filters):
- """List flavors."""
- load_flavors()
- return FLAVOR_MAPPING.values()
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_METADATAS
-)
-@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
-def get_flavor(flavor_id, user=None, session=None, **kwargs):
- """Get flavor."""
- load_flavors()
- if flavor_id not in FLAVOR_MAPPING:
- raise exception.RecordNotExists(
- 'flavor %s does not exist' % flavor_id
- )
- return FLAVOR_MAPPING[flavor_id]
diff --git a/compass-tasks/db/api/cluster.py b/compass-tasks/db/api/cluster.py
deleted file mode 100644
index 7a7022c..0000000
--- a/compass-tasks/db/api/cluster.py
+++ /dev/null
@@ -1,2444 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Cluster database operations."""
-import copy
-import functools
-import logging
-import re
-
-from compass.db.api import adapter_holder as adapter_api
-from compass.db.api import database
-from compass.db.api import metadata_holder as metadata_api
-from compass.db.api import permission
-from compass.db.api import user as user_api
-from compass.db.api import utils
-from compass.db import exception
-from compass.db import models
-from compass.utils import util
-
-
-SUPPORTED_FIELDS = [
- 'name', 'os_name', 'owner',
- 'adapter_name', 'flavor_name'
-]
-SUPPORTED_CLUSTERHOST_FIELDS = []
-RESP_FIELDS = [
- 'id', 'name', 'os_name', 'os_id', 'adapter_id', 'flavor_id',
- 'reinstall_distributed_system', 'flavor',
- 'distributed_system_installed',
- 'owner', 'adapter_name', 'flavor_name',
- 'created_at', 'updated_at'
-]
-RESP_CLUSTERHOST_FIELDS = [
- 'id', 'host_id', 'clusterhost_id', 'machine_id',
- 'name', 'hostname', 'roles', 'os_installer',
- 'cluster_id', 'clustername', 'location', 'tag',
- 'networks', 'mac', 'switch_ip', 'port', 'switches',
- 'os_installed', 'distributed_system_installed',
- 'os_name', 'os_id', 'ip',
- 'reinstall_os', 'reinstall_distributed_system',
- 'owner', 'cluster_id',
- 'created_at', 'updated_at',
- 'patched_roles'
-]
-RESP_CONFIG_FIELDS = [
- 'os_config',
- 'package_config',
- 'config_step',
- 'config_validated',
- 'created_at',
- 'updated_at'
-]
-RESP_DEPLOYED_CONFIG_FIELDS = [
- 'deployed_os_config',
- 'deployed_package_config',
- 'created_at',
- 'updated_at'
-]
-RESP_METADATA_FIELDS = [
- 'os_config', 'package_config'
-]
-RESP_CLUSTERHOST_CONFIG_FIELDS = [
- 'package_config',
- 'os_config',
- 'config_step',
- 'config_validated',
- 'networks',
- 'created_at',
- 'updated_at'
-]
-RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS = [
- 'deployed_os_config',
- 'deployed_package_config',
- 'created_at',
- 'updated_at'
-]
-RESP_STATE_FIELDS = [
- 'id', 'state', 'percentage', 'message', 'severity',
- 'status', 'ready',
- 'created_at', 'updated_at'
-]
-RESP_CLUSTERHOST_STATE_FIELDS = [
- 'id', 'state', 'percentage', 'message', 'severity',
- 'ready', 'created_at', 'updated_at'
-]
-RESP_REVIEW_FIELDS = [
- 'cluster', 'hosts'
-]
-RESP_DEPLOY_FIELDS = [
- 'status', 'cluster', 'hosts'
-]
-IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
-ADDED_FIELDS = ['name', 'adapter_id', 'os_id']
-OPTIONAL_ADDED_FIELDS = ['flavor_id']
-UPDATED_FIELDS = ['name', 'reinstall_distributed_system']
-ADDED_HOST_FIELDS = ['machine_id']
-UPDATED_HOST_FIELDS = ['name', 'reinstall_os']
-UPDATED_CLUSTERHOST_FIELDS = ['roles', 'patched_roles']
-PATCHED_CLUSTERHOST_FIELDS = ['patched_roles']
-UPDATED_CONFIG_FIELDS = [
- 'put_os_config', 'put_package_config', 'config_step'
-]
-UPDATED_DEPLOYED_CONFIG_FIELDS = [
- 'deployed_os_config', 'deployed_package_config'
-]
-PATCHED_CONFIG_FIELDS = [
- 'patched_os_config', 'patched_package_config', 'config_step'
-]
-UPDATED_CLUSTERHOST_CONFIG_FIELDS = [
- 'put_os_config',
- 'put_package_config'
-]
-PATCHED_CLUSTERHOST_CONFIG_FIELDS = [
- 'patched_os_config',
- 'patched_package_config'
-]
-UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS = [
- 'deployed_os_config',
- 'deployed_package_config'
-]
-UPDATED_CLUSTERHOST_STATE_FIELDS = [
- 'state', 'percentage', 'message', 'severity'
-]
-UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS = [
- 'ready'
-]
-UPDATED_CLUSTER_STATE_FIELDS = ['state']
-IGNORE_UPDATED_CLUSTER_STATE_FIELDS = ['percentage', 'message', 'severity']
-UPDATED_CLUSTER_STATE_INTERNAL_FIELDS = ['ready']
-RESP_CLUSTERHOST_LOG_FIELDS = [
- 'clusterhost_id', 'id', 'host_id', 'cluster_id',
- 'filename', 'position', 'partial_line',
- 'percentage',
- 'message', 'severity', 'line_matcher_name'
-]
-ADDED_CLUSTERHOST_LOG_FIELDS = [
- 'filename'
-]
-UPDATED_CLUSTERHOST_LOG_FIELDS = [
- 'position', 'partial_line', 'percentage',
- 'message', 'severity', 'line_matcher_name'
-]
-
-
-@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_CLUSTERS
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def list_clusters(user=None, session=None, **filters):
- """List clusters."""
- clusters = utils.list_db_objects(
- session, models.Cluster, **filters
- )
- logging.info('user is %s', user.email)
- if not user.is_admin and len(clusters):
- clusters = [c for c in clusters if c.owner == user.email]
- return clusters
-
-
-def _get_cluster(cluster_id, session=None, **kwargs):
- """Get cluster by id."""
- if isinstance(cluster_id, (int, long)):
- return utils.get_db_object(
- session, models.Cluster, id=cluster_id, **kwargs
- )
- raise exception.InvalidParameter(
- 'cluster id %s type is not int compatible' % cluster_id
- )
-
-
-def get_cluster_internal(cluster_id, session=None, **kwargs):
- """Helper function to get cluster.
-
- Should be only used by other files under db/api.
- """
- return _get_cluster(cluster_id, session=session, **kwargs)
-
-
-def _get_cluster_host(
- cluster_id, host_id, session=None, **kwargs
-):
- """Get clusterhost by cluster id and host id."""
- cluster = _get_cluster(cluster_id, session=session, **kwargs)
- from compass.db.api import host as host_api
- host = host_api.get_host_internal(host_id, session=session, **kwargs)
- return utils.get_db_object(
- session, models.ClusterHost,
- cluster_id=cluster.id,
- host_id=host.id,
- **kwargs
- )
-
-
-def _get_clusterhost(clusterhost_id, session=None, **kwargs):
- """Get clusterhost by clusterhost id."""
- if isinstance(clusterhost_id, (int, long)):
- return utils.get_db_object(
- session, models.ClusterHost,
- clusterhost_id=clusterhost_id,
- **kwargs
- )
- raise exception.InvalidParameter(
- 'clusterhost id %s type is not int compatible' % clusterhost_id
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_CLUSTERS
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def get_cluster(
- cluster_id, exception_when_missing=True,
- user=None, session=None, **kwargs
-):
- """Get cluster info."""
- return _get_cluster(
- cluster_id,
- session=session,
- exception_when_missing=exception_when_missing
- )
-
-
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_CLUSTERS)
-def is_cluster_os_ready(
- cluster_id, exception_when_missing=True,
- user=None, session=None, **kwargs
-):
- cluster = utils.get_db_object(
- session, models.Cluster, exception_when_missing, id=cluster_id)
-
- all_states = ([i.host.state.ready for i in cluster.clusterhosts])
-
- logging.info("is_cluster_os_ready: all_states %s" % all_states)
-
- return all(all_states)
-
-
-def check_cluster_validated(cluster):
- """Check cluster is validated."""
- if not cluster.config_validated:
- raise exception.Forbidden(
- 'cluster %s is not validated' % cluster.name
- )
-
-
-def check_clusterhost_validated(clusterhost):
- """Check clusterhost is validated."""
- if not clusterhost.config_validated:
- raise exception.Forbidden(
- 'clusterhost %s is not validated' % clusterhost.name
- )
-
-
-def check_cluster_editable(
- cluster, user=None,
- check_in_installing=False
-):
- """Check if cluster is editable.
-
- If we try to set cluster
- reinstall_distributed_system attribute or any
- checking to make sure the cluster is not in installing state,
- we can set check_in_installing to True.
- Otherwise we will make sure the cluster is not in deploying or
- deployed.
- If user is not admin or not the owner of the cluster, the check
- will fail to make sure he can not update the cluster attributes.
- """
- if check_in_installing:
- if cluster.state.state == 'INSTALLING':
- raise exception.Forbidden(
- 'cluster %s is not editable '
- 'when state is installing' % cluster.name
- )
-# elif (
-# cluster.flavor_name and
-# not cluster.reinstall_distributed_system
-# ):
-# raise exception.Forbidden(
-# 'cluster %s is not editable '
-# 'when not to be reinstalled' % cluster.name
-# )
- if user and not user.is_admin and cluster.creator_id != user.id:
- raise exception.Forbidden(
- 'cluster %s is not editable '
- 'when user is not admin or cluster owner' % cluster.name
- )
-
-
-def is_cluster_editable(
- cluster, user=None,
- check_in_installing=False
-):
- """Get if cluster is editble."""
- try:
- check_cluster_editable(
- cluster, user=user,
- check_in_installing=check_in_installing
- )
- return True
- except exception.Forbidden:
- return False
-
-
-@utils.supported_filters(
- ADDED_FIELDS,
- optional_support_keys=OPTIONAL_ADDED_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(name=utils.check_name)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_CLUSTER
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def add_cluster(
- exception_when_existing=True,
- name=None, adapter_id=None, flavor_id=None,
- user=None, session=None, **kwargs
-):
- """Create a cluster."""
- adapter = adapter_api.get_adapter(
- adapter_id, user=user, session=session
- )
- # if flavor_id is not None, also set flavor field.
- # In future maybe we can move the use of flavor from
- # models.py to db/api and explictly get flavor when
- # needed instead of setting flavor into cluster record.
- flavor = {}
- if flavor_id:
- flavor = adapter_api.get_flavor(
- flavor_id,
- user=user, session=session
- )
- if flavor['adapter_id'] != adapter['id']:
- raise exception.InvalidParameter(
- 'flavor %s is not of adapter %s' % (
- flavor_id, adapter_id
- )
- )
-
- cluster = utils.add_db_object(
- session, models.Cluster, exception_when_existing,
- name, user.id, adapter_id=adapter_id,
- flavor_id=flavor_id, flavor=flavor, **kwargs
- )
- return cluster
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(name=utils.check_name)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_CLUSTER
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def update_cluster(cluster_id, user=None, session=None, **kwargs):
- """Update a cluster."""
- cluster = _get_cluster(
- cluster_id, session=session
- )
- check_cluster_editable(
- cluster, user=user,
- check_in_installing=(
- kwargs.get('reinstall_distributed_system', False)
- )
- )
- return utils.update_db_object(session, cluster, **kwargs)
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEL_CLUSTER
-)
-@utils.wrap_to_dict(
- RESP_FIELDS + ['status', 'cluster', 'hosts'],
- cluster=RESP_FIELDS,
- hosts=RESP_CLUSTERHOST_FIELDS
-)
-def del_cluster(
- cluster_id, force=False, from_database_only=False,
- delete_underlying_host=False, user=None, session=None, **kwargs
-):
- """Delete a cluster.
-
- If force, the cluster will be deleted anyway. It is used by cli to
- force clean a cluster in any case.
- If from_database_only, the cluster recored will only be removed from
- database. Otherwise, a del task is sent to celery to do clean deletion.
- If delete_underlying_host, all hosts under this cluster will also be
- deleted.
- The backend will call del_cluster again with from_database_only set
- when it has done the deletion work on os installer/package installer.
- """
- cluster = _get_cluster(
- cluster_id, session=session
- )
- logging.debug(
- 'delete cluster %s with force=%s '
- 'from_database_only=%s delete_underlying_host=%s',
- cluster.id, force, from_database_only, delete_underlying_host
- )
- # force set cluster state to ERROR and the state of any clusterhost
- # in the cluster to ERROR when we want to delete the cluster anyway
- # even the cluster is in installing or already installed.
- # It let the api know the deleting is in doing when backend is doing
- # the real deleting.
- # In future we may import a new state like INDELETE to indicate
- # the deleting is processing.
- # We need discuss about if we can delete a cluster when it is already
- # installed by api.
- for clusterhost in cluster.clusterhosts:
- if clusterhost.state.state != 'UNINITIALIZED' and force:
- clusterhost.state.state = 'ERROR'
- if delete_underlying_host:
- host = clusterhost.host
- if host.state.state != 'UNINITIALIZED' and force:
- host.state.state = 'ERROR'
- if cluster.state.state != 'UNINITIALIZED' and force:
- cluster.state.state = 'ERROR'
-
- check_cluster_editable(
- cluster, user=user,
- check_in_installing=True
- )
-
- # delete underlying host if delete_underlying_host is set.
- if delete_underlying_host:
- for clusterhost in cluster.clusterhosts:
- # delete underlying host only user has permission.
- from compass.db.api import host as host_api
- host = clusterhost.host
- if host_api.is_host_editable(
- host, user=user, check_in_installing=True
- ):
- # Delete host record directly in database when there is no need
- # to do the deletion in backend or from_database_only is set.
- if host.state.state == 'UNINITIALIZED' or from_database_only:
- utils.del_db_object(
- session, host
- )
-
- # Delete cluster record directly in database when there
- # is no need to do the deletion in backend or from_database_only is set.
- if cluster.state.state == 'UNINITIALIZED' or from_database_only:
- return utils.del_db_object(
- session, cluster
- )
- else:
- from compass.tasks import client as celery_client
- logging.info('send del cluster %s task to celery', cluster_id)
- celery_client.celery.send_task(
- 'compass.tasks.delete_cluster',
- (
- user.email, cluster.id,
- [
- clusterhost.host_id
- for clusterhost in cluster.clusterhosts
- ],
- delete_underlying_host
- ),
- queue=user.email,
- exchange=user.email,
- routing_key=user.email
- )
- return {
- 'status': 'delete action is sent',
- 'cluster': cluster,
- 'hosts': cluster.clusterhosts
- }
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_CLUSTER_CONFIG
-)
-@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
-def get_cluster_config(cluster_id, user=None, session=None, **kwargs):
- """Get cluster config."""
- return _get_cluster(cluster_id, session=session)
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_CLUSTER_CONFIG
-)
-@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
-def get_cluster_deployed_config(cluster_id, user=None, session=None, **kwargs):
- """Get cluster deployed config."""
- return _get_cluster(cluster_id, session=session)
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_METADATAS
-)
-@utils.wrap_to_dict(RESP_METADATA_FIELDS)
-def get_cluster_metadata(cluster_id, user=None, session=None, **kwargs):
- """Get cluster metadata.
-
- If no flavor in the cluster, it means this is a os only cluster.
- We ignore package metadata for os only cluster.
- """
- cluster = _get_cluster(cluster_id, session=session)
- metadatas = {}
- os_name = cluster.os_name
- if os_name:
- metadatas.update(
- metadata_api.get_os_metadata(
- os_name, session=session
- )
- )
- flavor_id = cluster.flavor_id
- if flavor_id:
- metadatas.update(
- metadata_api.get_flavor_metadata(
- flavor_id,
- user=user, session=session
- )
- )
-
- return metadatas
-
-
-def _cluster_os_config_validates(
- config, cluster, session=None, user=None, **kwargs
-):
- """Check cluster os config validation."""
- metadata_api.validate_os_config(
- config, cluster.os_id
- )
-
-
-def _cluster_package_config_validates(
- config, cluster, session=None, user=None, **kwargs
-):
- """Check cluster package config validation."""
- metadata_api.validate_flavor_config(
- config, cluster.flavor_id
- )
-
-
-@utils.input_validates_with_args(
- put_os_config=_cluster_os_config_validates,
- put_package_config=_cluster_package_config_validates
-)
-@utils.output_validates_with_args(
- os_config=_cluster_os_config_validates,
- package_config=_cluster_package_config_validates
-)
-@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
-def _update_cluster_config(cluster, session=None, user=None, **kwargs):
- """Update a cluster config."""
- check_cluster_editable(cluster, user=user)
- return utils.update_db_object(
- session, cluster, **kwargs
- )
-
-
-# replace os_config to deployed_os_config,
-# package_config to deployed_package_config
-@utils.replace_filters(
- os_config='deployed_os_config',
- package_config='deployed_package_config'
-)
-@utils.supported_filters(
- optional_support_keys=UPDATED_DEPLOYED_CONFIG_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_CLUSTER_CONFIG
-)
-@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
-def update_cluster_deployed_config(
- cluster_id, user=None, session=None, **kwargs
-):
- """Update cluster deployed config."""
- cluster = _get_cluster(cluster_id, session=session)
- check_cluster_editable(cluster, user=user)
- check_cluster_validated(cluster)
- return utils.update_db_object(
- session, cluster, **kwargs
- )
-
-
-# replace os_config to put_os_config,
-# package_config to put_package_config in kwargs.
-# It tells db these fields will be updated not patched.
-@utils.replace_filters(
- os_config='put_os_config',
- package_config='put_package_config'
-)
-@utils.supported_filters(
- optional_support_keys=UPDATED_CONFIG_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_CLUSTER_CONFIG
-)
-def update_cluster_config(cluster_id, user=None, session=None, **kwargs):
- """Update cluster config."""
- cluster = _get_cluster(cluster_id, session=session)
- return _update_cluster_config(
- cluster, session=session, user=user, **kwargs
- )
-
-
-# replace os_config to patched_os_config and
-# package_config to patched_package_config in kwargs.
-# It tells db these fields will be patched not updated.
-@utils.replace_filters(
- os_config='patched_os_config',
- package_config='patched_package_config'
-)
-@utils.supported_filters(
- optional_support_keys=PATCHED_CONFIG_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_CLUSTER_CONFIG
-)
-def patch_cluster_config(cluster_id, user=None, session=None, **kwargs):
- """patch cluster config."""
- cluster = _get_cluster(cluster_id, session=session)
- return _update_cluster_config(
- cluster, session=session, user=user, **kwargs
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEL_CLUSTER_CONFIG
-)
-@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
-def del_cluster_config(cluster_id, user=None, session=None):
- """Delete a cluster config."""
- cluster = _get_cluster(
- cluster_id, session=session
- )
- check_cluster_editable(cluster, user=user)
- return utils.update_db_object(
- session, cluster, os_config={},
- package_config={}, config_validated=False
- )
-
-
-def _roles_validates(roles, cluster, session=None, user=None):
- """Check roles is validated to a cluster's roles."""
- if roles:
- if not cluster.flavor_name:
- raise exception.InvalidParameter(
- 'not flavor in cluster %s' % cluster.name
- )
- cluster_roles = [role['name'] for role in cluster.flavor['roles']]
- for role in roles:
- if role not in cluster_roles:
- raise exception.InvalidParameter(
- 'role %s is not in cluster roles %s' % (
- role, cluster_roles
- )
- )
-
-
-def _cluster_host_roles_validates(
- value, cluster, host, session=None, user=None, **kwargs
-):
- """Check clusterhost roles is validated by cluster and host."""
- _roles_validates(value, cluster, session=session, user=user)
-
-
-def _clusterhost_roles_validates(
- value, clusterhost, session=None, user=None, **kwargs
-):
- """Check clusterhost roles is validated by clusterhost."""
- _roles_validates(
- value, clusterhost.cluster, session=session, user=user
- )
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_HOST_FIELDS,
- ignore_support_keys=UPDATED_CLUSTERHOST_FIELDS
-)
-@utils.input_validates(name=utils.check_name)
-def _add_host_if_not_exist(
- machine_id, cluster, session=None, user=None, **kwargs
-):
- """Add underlying host if it does not exist."""
- from compass.db.api import host as host_api
- host = host_api.get_host_internal(
- machine_id, session=session, exception_when_missing=False
- )
- if host:
- if kwargs:
- # ignore update underlying host if host is not editable.
- from compass.db.api import host as host_api
- if host_api.is_host_editable(
- host, user=cluster.creator,
- check_in_installing=kwargs.get('reinstall_os', False),
- ):
- utils.update_db_object(
- session, host,
- **kwargs
- )
- else:
- logging.debug(
- 'ignore update host host %s '
- 'since it is not editable' % host.name
- )
- else:
- logging.debug('nothing to update for host %s', host.name)
- else:
- from compass.db.api import adapter_holder as adapter_api
- adapter = adapter_api.get_adapter(
- cluster.adapter_name, user=user, session=session
- )
- host = utils.add_db_object(
- session, models.Host, False, machine_id,
- os_name=cluster.os_name,
- os_installer=adapter['os_installer'],
- creator=cluster.creator,
- **kwargs
- )
- return host
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_CLUSTERHOST_FIELDS,
- ignore_support_keys=UPDATED_HOST_FIELDS
-)
-@utils.input_validates_with_args(
- roles=_cluster_host_roles_validates
-)
-def _add_clusterhost_only(
- cluster, host,
- exception_when_existing=False,
- session=None, user=None,
- **kwargs
-):
- """Get clusterhost only."""
- if not cluster.state.state == "UNINITIALIZED":
- cluster.state.ready = False
- cluster.state.state = "UNINITIALIZED"
- cluster.state.percentage = 0.0
- utils.update_db_object(session, cluster.state, state="UNINITIALIZED")
-
- return utils.add_db_object(
- session, models.ClusterHost, exception_when_existing,
- cluster.id, host.id, **kwargs
- )
-
-
-@utils.supported_filters(
- ADDED_HOST_FIELDS,
- optional_support_keys=UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-def _add_clusterhost(
- cluster,
- exception_when_existing=False,
- session=None, user=None, machine_id=None, **kwargs
-):
- """Add clusterhost and add underlying host if it does not exist."""
- host = _add_host_if_not_exist(
- machine_id, cluster, session=session,
- user=user, **kwargs
- )
-
- return _add_clusterhost_only(
- cluster, host, exception_when_existing=exception_when_existing,
- session=session, user=user, **kwargs
- )
-
-
-def _add_clusterhosts(cluster, machines, session=None, user=None):
- """Add machines to cluster.
-
- Args:
- machines: list of dict which contains clusterost attr to update.
-
- Examples:
- [{'machine_id': 1, 'name': 'host1'}]
- """
- check_cluster_editable(
- cluster, user=user,
- check_in_installing=True
- )
- if cluster.state.state == 'SUCCESSFUL':
- cluster.state.state == 'UPDATE_PREPARING'
- for machine_dict in machines:
- _add_clusterhost(
- cluster, session=session, user=user, **machine_dict
- )
-
-
-def _remove_clusterhosts(cluster, hosts, session=None, user=None):
- """Remove hosts from cluster.
-
- Args:
- hosts: list of host id.
- """
- check_cluster_editable(
- cluster, user=user,
- check_in_installing=True
- )
- utils.del_db_objects(
- session, models.ClusterHost,
- cluster_id=cluster.id, host_id=hosts
- )
-
-
-def _set_clusterhosts(cluster, machines, session=None, user=None):
- """set machines to cluster.
-
- Args:
- machines: list of dict which contains clusterost attr to update.
-
- Examples:
- [{'machine_id': 1, 'name': 'host1'}]
- """
- check_cluster_editable(
- cluster, user=user,
- check_in_installing=True
- )
- utils.del_db_objects(
- session, models.ClusterHost,
- cluster_id=cluster.id
- )
- if cluster.state.state == 'SUCCESSFUL':
- cluster.state.state = 'UPDATE_PREPARING'
- for machine_dict in machines:
- _add_clusterhost(
- cluster, True, session=session, user=user, **machine_dict
- )
-
-
-@utils.supported_filters(optional_support_keys=SUPPORTED_CLUSTERHOST_FIELDS)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_CLUSTERHOSTS
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
-def list_cluster_hosts(cluster_id, user=None, session=None, **filters):
- """List clusterhosts of a cluster."""
- cluster = _get_cluster(cluster_id, session=session)
- return utils.list_db_objects(
- session, models.ClusterHost, cluster_id=cluster.id,
- **filters
- )
-
-
-@utils.supported_filters(optional_support_keys=SUPPORTED_CLUSTERHOST_FIELDS)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_CLUSTERHOSTS
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
-def list_clusterhosts(user=None, session=None, **filters):
- """List all clusterhosts."""
- return utils.list_db_objects(
- session, models.ClusterHost, **filters
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_CLUSTERHOSTS
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
-def get_cluster_host(
- cluster_id, host_id, exception_when_missing=True,
- user=None, session=None, **kwargs
-):
- """Get clusterhost info by cluster id and host id."""
- return _get_cluster_host(
- cluster_id, host_id, session=session,
- exception_when_missing=exception_when_missing,
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_CLUSTERHOSTS
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
-def get_clusterhost(
- clusterhost_id, exception_when_missing=True,
- user=None, session=None, **kwargs
-):
- """Get clusterhost info by clusterhost id."""
- return _get_clusterhost(
- clusterhost_id, session=session,
- exception_when_missing=exception_when_missing,
- user=user
- )
-
-
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_CLUSTER_HOSTS
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
-def add_cluster_host(
- cluster_id, exception_when_existing=True,
- user=None, session=None, **kwargs
-):
- """Add a host to a cluster."""
- cluster = _get_cluster(cluster_id, session=session)
- check_cluster_editable(
- cluster, user=user,
- check_in_installing=True
- )
- if cluster.state.state == 'SUCCESSFUL':
- cluster.state.state = 'UPDATE_PREPARING'
- return _add_clusterhost(
- cluster, exception_when_existing,
- session=session, user=user, **kwargs
- )
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_HOST_FIELDS,
- ignore_support_keys=(
- UPDATED_CLUSTERHOST_FIELDS +
- PATCHED_CLUSTERHOST_FIELDS
- )
-)
-def _update_host_if_necessary(
- clusterhost, session=None, user=None, **kwargs
-):
- """Update underlying host if there is something to update."""
- host = clusterhost.host
- if kwargs:
- # ignore update underlying host if the host is not editable.
- from compass.db.api import host as host_api
- if host_api.is_host_editable(
- host, user=clusterhost.cluster.creator,
- check_in_installing=kwargs.get('reinstall_os', False),
- ):
- utils.update_db_object(
- session, host,
- **kwargs
- )
- else:
- logging.debug(
- 'ignore update host %s since it is not editable' % host.name
- )
- else:
- logging.debug(
- 'nothing to update for host %s', host.name
- )
- return host
-
-
-@utils.supported_filters(
- optional_support_keys=(
- UPDATED_CLUSTERHOST_FIELDS +
- PATCHED_CLUSTERHOST_FIELDS
- ),
- ignore_support_keys=UPDATED_HOST_FIELDS
-)
-@utils.input_validates_with_args(
- roles=_clusterhost_roles_validates,
- patched_roles=_clusterhost_roles_validates
-)
-def _update_clusterhost_only(
- clusterhost, session=None, user=None, **kwargs
-):
- """Update clusterhost only."""
- check_cluster_editable(clusterhost.cluster, user=user)
- return utils.update_db_object(
- session, clusterhost, **kwargs
- )
-
-
-@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
-def _update_clusterhost(clusterhost, session=None, user=None, **kwargs):
- """Update clusterhost and underlying host if necessary."""
- _update_host_if_necessary(
- clusterhost, session=session, user=user, **kwargs
- )
- return _update_clusterhost_only(
- clusterhost, session=session, user=user, **kwargs
- )
-
-
-@utils.supported_filters(
- optional_support_keys=(UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS),
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_CLUSTER_HOSTS
-)
-def update_cluster_host(
- cluster_id, host_id, user=None,
- session=None, **kwargs
-):
- """Update clusterhost by cluster id and host id."""
- logging.info('updating kwargs: %s', kwargs)
- clusterhost = _get_cluster_host(
- cluster_id, host_id, session=session
- )
- return _update_clusterhost(
- clusterhost, session=session, user=user, **kwargs
- )
-
-
-@utils.supported_filters(
- optional_support_keys=(UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS),
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_CLUSTER_HOSTS
-)
-def update_clusterhost(
- clusterhost_id, user=None,
- session=None, **kwargs
-):
- """Update clusterhost by clusterhost id."""
- clusterhost = _get_clusterhost(
- clusterhost_id, session=session
- )
- return _update_clusterhost(
- clusterhost, session=session, user=user, **kwargs
- )
-
-
-# replace roles to patched_roles in kwargs.
-# It tells db roles field will be patched.
-@utils.replace_filters(
- roles='patched_roles'
-)
-@utils.supported_filters(
- optional_support_keys=PATCHED_CLUSTERHOST_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_CLUSTER_HOSTS
-)
-def patch_cluster_host(
- cluster_id, host_id, user=None,
- session=None, **kwargs
-):
- """Patch clusterhost by cluster id and host id."""
- logging.info("kwargs are %s", kwargs)
- clusterhost = _get_cluster_host(
- cluster_id, host_id, session=session
- )
- updated_clusterhost = _update_clusterhost(
- clusterhost, session=session, user=user, **kwargs
- )
- return updated_clusterhost
-
-
-# replace roles to patched_roles in kwargs.
-# It tells db roles field will be patched.
-@utils.replace_filters(
- roles='patched_roles'
-)
-@utils.supported_filters(
- optional_support_keys=PATCHED_CLUSTERHOST_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_CLUSTER_HOSTS
-)
-def patch_clusterhost(
- clusterhost_id, user=None, session=None,
- **kwargs
-):
- """Patch clusterhost by clusterhost id."""
- clusterhost = _get_clusterhost(
- clusterhost_id, session=session
- )
- return _update_clusterhost(
- clusterhost, session=session, user=user, **kwargs
- )
-
-
-@user_api.check_user_permission(
- permission.PERMISSION_DEL_CLUSTER_HOST
-)
-@utils.wrap_to_dict(
- RESP_CLUSTERHOST_FIELDS + ['status', 'host'],
- host=RESP_CLUSTERHOST_FIELDS
-)
-def _del_cluster_host(
- clusterhost,
- force=False, from_database_only=False,
- delete_underlying_host=False, user=None,
- session=None, **kwargs
-):
- """delete clusterhost.
-
- If force, the cluster host will be deleted anyway.
- If from_database_only, the cluster host recored will only be
- deleted from database. Otherwise a celery task sent to do
- clean deletion.
- If delete_underlying_host, the underlying host will also be deleted.
- The backend will call _del_cluster_host again when the clusterhost is
- deleted from os installer/package installer with from_database_only
- set.
- """
- # force set clusterhost state to ERROR when we want to delete the
- # clusterhost anyway even the clusterhost is in installing or already
- # installed. It let the api know the deleting is in doing when backend
- # is doing the real deleting. In future we may import a new state like
- # INDELETE to indicate the deleting is processing.
- # We need discuss about if we can delete a clusterhost when it is already
- # installed by api.
- if clusterhost.state.state != 'UNINITIALIZED' and force:
- clusterhost.state.state = 'ERROR'
- if not force:
- check_cluster_editable(
- clusterhost.cluster, user=user,
- check_in_installing=True
- )
- # delete underlying host if delete_underlying_host is set.
- if delete_underlying_host:
- host = clusterhost.host
- if host.state.state != 'UNINITIALIZED' and force:
- host.state.state = 'ERROR'
- # only delete the host when user have the permission to delete it.
- import compass.db.api.host as host_api
- if host_api.is_host_editable(
- host, user=user,
- check_in_installing=True
- ):
- # if there is no need to do the deletion by backend or
- # from_database_only is set, we only delete the record
- # in database.
- if host.state.state == 'UNINITIALIZED' or from_database_only:
- utils.del_db_object(
- session, host
- )
-
- # if there is no need to do the deletion by backend or
- # from_database_only is set, we only delete the record in database.
- if clusterhost.state.state == 'UNINITIALIZED' or from_database_only:
- return utils.del_db_object(
- session, clusterhost
- )
- else:
- logging.info(
- 'send del cluster %s host %s task to celery',
- clusterhost.cluster_id, clusterhost.host_id
- )
- from compass.tasks import client as celery_client
- celery_client.celery.send_task(
- 'compass.tasks.delete_cluster_host',
- (
- user.email, clusterhost.cluster_id, clusterhost.host_id,
- delete_underlying_host
- ),
- queue=user.email,
- exchange=user.email,
- routing_key=user.email
- )
- return {
- 'status': 'delete action sent',
- 'host': clusterhost,
- }
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-def del_cluster_host(
- cluster_id, host_id,
- force=False, from_database_only=False,
- delete_underlying_host=False, user=None,
- session=None, **kwargs
-):
- """Delete clusterhost by cluster id and host id."""
- clusterhost = _get_cluster_host(
- cluster_id, host_id, session=session
- )
- return _del_cluster_host(
- clusterhost, force=force, from_database_only=from_database_only,
- delete_underlying_host=delete_underlying_host, user=user,
- session=session, **kwargs
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-def del_clusterhost(
- clusterhost_id,
- force=False, from_database_only=False,
- delete_underlying_host=False, user=None,
- session=None, **kwargs
-):
- """Delete clusterhost by clusterhost id."""
- clusterhost = _get_clusterhost(
- clusterhost_id, session=session
- )
- return _del_cluster_host(
- clusterhost, force=force, from_database_only=from_database_only,
- delete_underlying_host=delete_underlying_host, user=user,
- session=session, **kwargs
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
-def get_cluster_host_config(
- cluster_id, host_id, user=None,
- session=None, **kwargs
-):
- """Get clusterhost config by cluster id and host id."""
- return _get_cluster_host(
- cluster_id, host_id, session=session
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS)
-def get_cluster_host_deployed_config(
- cluster_id, host_id, user=None, session=None, **kwargs
-):
- """Get clusterhost deployed config by cluster id and host id."""
- return _get_cluster_host(
- cluster_id, host_id, session=session
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
-def get_clusterhost_config(clusterhost_id, user=None, session=None, **kwargs):
- """Get clusterhost config by clusterhost id."""
- return _get_clusterhost(
- clusterhost_id, session=session
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS)
-def get_clusterhost_deployed_config(
- clusterhost_id, user=None,
- session=None, **kwargs
-):
- """Get clusterhost deployed config by clusterhost id."""
- return _get_clusterhost(
- clusterhost_id, session=session
- )
-
-
-def _clusterhost_os_config_validates(
- config, clusterhost, session=None, user=None, **kwargs
-):
- """Validate clusterhost's underlying host os config."""
- from compass.db.api import host as host_api
- host = clusterhost.host
- host_api.check_host_editable(host, user=user)
- metadata_api.validate_os_config(
- config, host.os_id
- )
-
-
-def _clusterhost_package_config_validates(
- config, clusterhost, session=None, user=None, **kwargs
-):
- """Validate clusterhost's cluster package config."""
- cluster = clusterhost.cluster
- check_cluster_editable(cluster, user=user)
- metadata_api.validate_flavor_config(
- config, cluster.flavor_id
- )
-
-
-def _filter_clusterhost_host_editable(
- config, clusterhost, session=None, user=None, **kwargs
-):
- """Filter fields if the underlying host is not editable."""
- from compass.db.api import host as host_api
- host = clusterhost.host
- return host_api.is_host_editable(host, user=user)
-
-
-@utils.input_filters(
- put_os_config=_filter_clusterhost_host_editable,
- patched_os_config=_filter_clusterhost_host_editable
-)
-@utils.input_validates_with_args(
- put_os_config=_clusterhost_os_config_validates,
- put_package_config=_clusterhost_package_config_validates
-)
-@utils.output_validates_with_args(
- os_config=_clusterhost_os_config_validates,
- package_config=_clusterhost_package_config_validates
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
-def _update_clusterhost_config(clusterhost, session=None, user=None, **kwargs):
- """Update clusterhost config."""
- return utils.update_db_object(
- session, clusterhost, **kwargs
- )
-
-
-def _clusterhost_host_validated(
- config, clusterhost, session=None, user=None, **kwargs
-):
- """Check clusterhost's underlying host is validated."""
- from compass.db.api import host as host_api
- host = clusterhost.host
- host_api.check_host_editable(host, user=user)
- host_api.check_host_validated(host)
-
-
-def _clusterhost_cluster_validated(
- config, clusterhost, session=None, user=None, **kwargs
-):
- """Check clusterhost's cluster is validated."""
- cluster = clusterhost.cluster
- check_cluster_editable(cluster, user=user)
- check_clusterhost_validated(clusterhost)
-
-
-@utils.input_filters(
- deployed_os_config=_filter_clusterhost_host_editable,
-)
-@utils.input_validates_with_args(
- deployed_os_config=_clusterhost_host_validated,
- deployed_package_config=_clusterhost_cluster_validated
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS)
-def _update_clusterhost_deployed_config(
- clusterhost, session=None, user=None, **kwargs
-):
- """Update clusterhost deployed config."""
- return utils.update_db_object(
- session, clusterhost, **kwargs
- )
-
-
-# replace os_config to put_os_config and
-# package_config to put_package_config in kwargs.
-# It tells db these fields will be updated not patched.
-@utils.replace_filters(
- os_config='put_os_config',
- package_config='put_package_config'
-)
-@utils.supported_filters(
- optional_support_keys=UPDATED_CLUSTERHOST_CONFIG_FIELDS,
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
-)
-def update_cluster_host_config(
- cluster_id, host_id, user=None, session=None, **kwargs
-):
- """Update clusterhost config by cluster id and host id."""
- clusterhost = _get_cluster_host(
- cluster_id, host_id, session=session
- )
- return _update_clusterhost_config(
- clusterhost, user=user, session=session, **kwargs
- )
-
-
-# replace os_config to deployed_os_config and
-# package_config to deployed_package_config in kwargs.
-@utils.replace_filters(
- os_config='deployed_os_config',
- package_config='deployed_package_config'
-)
-@utils.supported_filters(
- optional_support_keys=UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
-)
-def update_cluster_host_deployed_config(
- cluster_id, host_id, user=None, session=None, **kwargs
-):
- """Update clusterhost deployed config by cluster id and host id."""
- clusterhost = _get_cluster_host(
- cluster_id, host_id, session=session
- )
- return _update_clusterhost_deployed_config(
- clusterhost, session=session, user=user, **kwargs
- )
-
-
-# replace os_config to put_os_config and
-# package_config to put_package_config in kwargs.
-# It tells db these fields will be updated not patched.
-@utils.replace_filters(
- os_config='put_os_config',
- package_config='put_package_config'
-)
-@utils.supported_filters(
- optional_support_keys=UPDATED_CLUSTERHOST_CONFIG_FIELDS,
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
-)
-def update_clusterhost_config(
- clusterhost_id, user=None, session=None, **kwargs
-):
- """Update clusterhost config by clusterhost id."""
- clusterhost = _get_clusterhost(
- clusterhost_id, session=session
- )
- return _update_clusterhost_config(
- clusterhost, session=session, user=user, **kwargs
- )
-
-
-# replace os_config to deployed_os_config and
-# package_config to deployed_package_config in kwargs.
-@utils.replace_filters(
- os_config='deployed_os_config',
- package_config='deployed_package_config'
-)
-@utils.supported_filters(
- optional_support_keys=UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
-)
-def update_clusterhost_deployed_config(
- clusterhost_id, user=None, session=None, **kwargs
-):
- """Update clusterhost deployed config by clusterhost id."""
- clusterhost = _get_clusterhost(
- clusterhost_id, session=session
- )
- return _update_clusterhost_deployed_config(
- clusterhost, session=session, user=user, **kwargs
- )
-
-
-# replace os_config to patched_os_config and
-# package_config to patched_package_config in kwargs
-# It tells db these fields will be patched not updated.
-@utils.replace_filters(
- os_config='patched_os_config',
- package_config='patched_package_config'
-)
-@utils.supported_filters(
- optional_support_keys=PATCHED_CLUSTERHOST_CONFIG_FIELDS,
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
-)
-def patch_cluster_host_config(
- cluster_id, host_id, user=None, session=None, **kwargs
-):
- """patch clusterhost config by cluster id and host id."""
- clusterhost = _get_cluster_host(
- cluster_id, host_id, session=session
- )
- return _update_clusterhost_config(
- clusterhost, session=session, user=user, **kwargs
- )
-
-
-# replace os_config to patched_os_config and
-# package_config to patched_package_config in kwargs
-# It tells db these fields will be patched not updated.
-@utils.replace_filters(
- os_config='patched_os_config',
- package_config='patched_package_config'
-)
-@utils.supported_filters(
- optional_support_keys=PATCHED_CLUSTERHOST_CONFIG_FIELDS,
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
-)
-def patch_clusterhost_config(
- clusterhost_id, user=None, session=None, **kwargs
-):
- """patch clusterhost config by clusterhost id."""
- clusterhost = _get_clusterhost(
- clusterhost_id, session=session
- )
- return _update_clusterhost_config(
- clusterhost, session=session, user=user, **kwargs
- )
-
-
-def _clusterhost_host_editable(
- config, clusterhost, session=None, user=None, **kwargs
-):
- """Check clusterhost underlying host is editable."""
- from compass.db.api import host as host_api
- host_api.check_host_editable(clusterhost.host, user=user)
-
-
-def _clusterhost_cluster_editable(
- config, clusterhost, session=None, user=None, **kwargs
-):
- """Check clusterhost's cluster is editable."""
- check_cluster_editable(clusterhost.cluster, user=user)
-
-
-@utils.supported_filters(
- optional_support_keys=['os_config', 'package_config']
-)
-@utils.input_filters(
- os_config=_filter_clusterhost_host_editable,
-)
-@utils.output_validates_with_args(
- package_config=_clusterhost_cluster_editable
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
-def _delete_clusterhost_config(
- clusterhost, session=None, user=None, **kwargs
-):
- """delete clusterhost config."""
- return utils.update_db_object(
- session, clusterhost, config_validated=False,
- **kwargs
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEL_CLUSTERHOST_CONFIG
-)
-def delete_cluster_host_config(
- cluster_id, host_id, user=None, session=None
-):
- """Delete a clusterhost config by cluster id and host id."""
- clusterhost = _get_cluster_host(
- cluster_id, host_id, session=session
- )
- return _delete_clusterhost_config(
- clusterhost, session=session, user=user,
- os_config={}, package_config={}
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEL_CLUSTERHOST_CONFIG
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
-def delete_clusterhost_config(clusterhost_id, user=None, session=None):
- """Delet a clusterhost config by clusterhost id."""
- clusterhost = _get_clusterhost(
- clusterhost_id, session=session
- )
- return _delete_clusterhost_config(
- clusterhost, session=session, user=user,
- os_config={}, package_config={}
- )
-
-
-@utils.supported_filters(
- optional_support_keys=['add_hosts', 'remove_hosts', 'set_hosts']
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_CLUSTER_HOSTS
-)
-@utils.wrap_to_dict(
- ['hosts'],
- hosts=RESP_CLUSTERHOST_FIELDS
-)
-def update_cluster_hosts(
- cluster_id, add_hosts={}, set_hosts=None,
- remove_hosts={}, user=None, session=None
-):
- """Update cluster hosts."""
- cluster = _get_cluster(cluster_id, session=session)
- if remove_hosts:
- _remove_clusterhosts(
- cluster, session=session, user=user, **remove_hosts
- )
- if add_hosts:
- _add_clusterhosts(
- cluster, session=session, user=user, **add_hosts
- )
- if set_hosts is not None:
- _set_clusterhosts(
- cluster, session=session, user=user, **set_hosts
- )
-
- return {
- 'hosts': list_cluster_hosts(cluster_id, session=session)
- }
-
-
-def validate_clusterhost(clusterhost, session=None):
- """validate clusterhost."""
- roles = clusterhost.roles
- if not roles:
- if clusterhost.cluster.flavor_name:
- raise exception.InvalidParameter(
- 'empty roles for clusterhost %s' % clusterhost.name
- )
-
-
-def validate_cluster(cluster, session=None):
- """Validate cluster."""
- if not cluster.clusterhosts:
- raise exception.InvalidParameter(
- 'cluster %s does not have any hosts' % cluster.name
- )
- if cluster.flavor_name:
- cluster_roles = cluster.flavor['roles']
- else:
- cluster_roles = []
- necessary_roles = set([
- role['name'] for role in cluster_roles if not role.get('optional')
- ])
- clusterhost_roles = set([])
- interface_subnets = {}
- for clusterhost in cluster.clusterhosts:
- roles = clusterhost.roles
- for role in roles:
- clusterhost_roles.add(role['name'])
- host = clusterhost.host
- for host_network in host.host_networks:
- interface_subnets.setdefault(
- host_network.interface, set([])
- ).add(host_network.subnet.subnet)
- missing_roles = necessary_roles - clusterhost_roles
- if missing_roles:
- raise exception.InvalidParameter(
- 'cluster %s have some roles %s not assigned to any host' % (
- cluster.name, list(missing_roles)
- )
- )
- for interface, subnets in interface_subnets.items():
- if len(subnets) > 1:
- raise exception.InvalidParameter(
- 'cluster %s multi subnets %s in interface %s' % (
- cluster.name, list(subnets), interface
- )
- )
-
-
-@utils.supported_filters(optional_support_keys=['review'])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_REVIEW_CLUSTER
-)
-@utils.wrap_to_dict(
- RESP_REVIEW_FIELDS,
- cluster=RESP_CONFIG_FIELDS,
- hosts=RESP_CLUSTERHOST_CONFIG_FIELDS
-)
-def review_cluster(cluster_id, review={}, user=None, session=None, **kwargs):
- """review cluster.
-
- Args:
- cluster_id: the cluster id.
- review: dict contains hosts to be reviewed. either contains key
- hosts or clusterhosts. where hosts is a list of host id,
- clusterhosts is a list of clusterhost id.
- """
- from compass.db.api import host as host_api
- cluster = _get_cluster(cluster_id, session=session)
- check_cluster_editable(cluster, user=user)
- host_ids = review.get('hosts', [])
- clusterhost_ids = review.get('clusterhosts', [])
- clusterhosts = []
- # Get clusterhosts need to be reviewed.
- for clusterhost in cluster.clusterhosts:
- if (
- clusterhost.clusterhost_id in clusterhost_ids or
- clusterhost.host_id in host_ids
- ):
- clusterhosts.append(clusterhost)
-
- os_config = copy.deepcopy(cluster.os_config)
- os_config = metadata_api.autofill_os_config(
- os_config, cluster.os_id, cluster=cluster
- )
- metadata_api.validate_os_config(
- os_config, cluster.os_id, True
- )
- for clusterhost in clusterhosts:
- host = clusterhost.host
- # ignore underlying host os config validation
- # since the host is not editable
- if not host_api.is_host_editable(
- host, user=user, check_in_installing=False
- ):
- logging.info(
- 'ignore update host %s config '
- 'since it is not editable' % host.name
- )
- continue
- host_os_config = copy.deepcopy(host.os_config)
- host_os_config = metadata_api.autofill_os_config(
- host_os_config, host.os_id,
- host=host
- )
- deployed_os_config = util.merge_dict(
- os_config, host_os_config
- )
- metadata_api.validate_os_config(
- deployed_os_config, host.os_id, True
- )
- host_api.validate_host(host)
- utils.update_db_object(
- session, host, os_config=host_os_config, config_validated=True
- )
-
- package_config = copy.deepcopy(cluster.package_config)
- if cluster.flavor_name:
- package_config = metadata_api.autofill_flavor_config(
- package_config, cluster.flavor_id,
- cluster=cluster
- )
- metadata_api.validate_flavor_config(
- package_config, cluster.flavor_id, True
- )
- for clusterhost in clusterhosts:
- clusterhost_package_config = copy.deepcopy(
- clusterhost.package_config
- )
- clusterhost_package_config = (
- metadata_api.autofill_flavor_config(
- clusterhost_package_config,
- cluster.flavor_id,
- clusterhost=clusterhost
- )
- )
- deployed_package_config = util.merge_dict(
- package_config, clusterhost_package_config
- )
- metadata_api.validate_flavor_config(
- deployed_package_config,
- cluster.flavor_id, True
- )
- validate_clusterhost(clusterhost, session=session)
- utils.update_db_object(
- session, clusterhost,
- package_config=clusterhost_package_config,
- config_validated=True
- )
-
- validate_cluster(cluster, session=session)
- utils.update_db_object(
- session, cluster, os_config=os_config, package_config=package_config,
- config_validated=True
- )
- return {
- 'cluster': cluster,
- 'hosts': clusterhosts
- }
-
-
-@utils.supported_filters(optional_support_keys=['deploy'])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEPLOY_CLUSTER
-)
-@utils.wrap_to_dict(
- RESP_DEPLOY_FIELDS,
- cluster=RESP_CONFIG_FIELDS,
- hosts=RESP_CLUSTERHOST_FIELDS
-)
-def deploy_cluster(
- cluster_id, deploy={}, user=None, session=None, **kwargs
-):
- """deploy cluster.
-
- Args:
- cluster_id: cluster id.
- deploy: dict contains key either hosts or clusterhosts.
- deploy['hosts'] is a list of host id,
- deploy['clusterhosts'] is a list of clusterhost id.
- """
- from compass.db.api import host as host_api
- from compass.tasks import client as celery_client
- cluster = _get_cluster(cluster_id, session=session)
- host_ids = deploy.get('hosts', [])
- clusterhost_ids = deploy.get('clusterhosts', [])
- clusterhosts = []
- # get clusterhost to deploy.
- for clusterhost in cluster.clusterhosts:
- if (
- clusterhost.clusterhost_id in clusterhost_ids or
- clusterhost.host_id in host_ids
- ):
- clusterhosts.append(clusterhost)
- check_cluster_editable(cluster, user=user)
- check_cluster_validated(cluster)
- utils.update_db_object(session, cluster.state, state='INITIALIZED')
- for clusterhost in clusterhosts:
- host = clusterhost.host
- # ignore checking if underlying host is validated if
- # the host is not editable.
- if host_api.is_host_editable(host, user=user):
- host_api.check_host_validated(host)
- utils.update_db_object(session, host.state, state='INITIALIZED')
- if cluster.flavor_name:
- check_clusterhost_validated(clusterhost)
- utils.update_db_object(
- session, clusterhost.state, state='INITIALIZED'
- )
-
- celery_client.celery.send_task(
- 'compass.tasks.deploy_cluster',
- (
- user.email, cluster_id,
- [clusterhost.host_id for clusterhost in clusterhosts]
- ),
- queue=user.email,
- exchange=user.email,
- routing_key=user.email
- )
- return {
- 'status': 'deploy action sent',
- 'cluster': cluster,
- 'hosts': clusterhosts
- }
-
-
-@utils.supported_filters(optional_support_keys=['redeploy'])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEPLOY_CLUSTER
-)
-@utils.wrap_to_dict(
- RESP_DEPLOY_FIELDS,
- cluster=RESP_CONFIG_FIELDS,
- hosts=RESP_CLUSTERHOST_FIELDS
-)
-def redeploy_cluster(
- cluster_id, deploy={}, user=None, session=None, **kwargs
-):
- """redeploy cluster.
-
- Args:
- cluster_id: cluster id.
- """
- from compass.db.api import host as host_api
- from compass.tasks import client as celery_client
- cluster = _get_cluster(cluster_id, session=session)
-
- check_cluster_editable(cluster, user=user)
- check_cluster_validated(cluster)
- utils.update_db_object(
- session, cluster.state,
- state='INITIALIZED',
- percentage=0,
- ready=False
- )
- for clusterhost in cluster.clusterhosts:
- host = clusterhost.host
- # ignore checking if underlying host is validated if
- # the host is not editable.
- host_api.check_host_validated(host)
- utils.update_db_object(
- session, host.state,
- state='INITIALIZED',
- percentage=0,
- ready=False
- )
- if cluster.flavor_name:
- check_clusterhost_validated(clusterhost)
- utils.update_db_object(
- session,
- clusterhost.state,
- state='INITIALIZED',
- percentage=0,
- ready=False
- )
-
- celery_client.celery.send_task(
- 'compass.tasks.redeploy_cluster',
- (
- user.email, cluster_id
- ),
- queue=user.email,
- exchange=user.email,
- routing_key=user.email
- )
- return {
- 'status': 'redeploy action sent',
- 'cluster': cluster
- }
-
-
-@utils.supported_filters(optional_support_keys=['apply_patch'])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEPLOY_CLUSTER
-)
-@utils.wrap_to_dict(
- RESP_DEPLOY_FIELDS,
- cluster=RESP_CONFIG_FIELDS,
- hosts=RESP_CLUSTERHOST_FIELDS
-)
-def patch_cluster(cluster_id, user=None, session=None, **kwargs):
-
- from compass.tasks import client as celery_client
-
- cluster = _get_cluster(cluster_id, session=session)
- celery_client.celery.send_task(
- 'compass.tasks.patch_cluster',
- (
- user.email, cluster_id,
- ),
- queue=user.email,
- exchange=user.email,
- routing_key=user.email
- )
- return {
- 'status': 'patch action sent',
- 'cluster': cluster
- }
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_GET_CLUSTER_STATE
-)
-@utils.wrap_to_dict(RESP_STATE_FIELDS)
-def get_cluster_state(cluster_id, user=None, session=None, **kwargs):
- """Get cluster state info."""
- return _get_cluster(cluster_id, session=session).state_dict()
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_GET_CLUSTERHOST_STATE
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
-def get_cluster_host_state(
- cluster_id, host_id, user=None, session=None, **kwargs
-):
- """Get clusterhost state merged with underlying host state."""
- return _get_cluster_host(
- cluster_id, host_id, session=session
- ).state_dict()
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_GET_CLUSTERHOST_STATE
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
-def get_cluster_host_self_state(
- cluster_id, host_id, user=None, session=None, **kwargs
-):
- """Get clusterhost itself state."""
- return _get_cluster_host(
- cluster_id, host_id, session=session
- ).state
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_GET_CLUSTERHOST_STATE
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
-def get_clusterhost_state(
- clusterhost_id, user=None, session=None, **kwargs
-):
- """Get clusterhost state merged with underlying host state."""
- return _get_clusterhost(
- clusterhost_id, session=session
- ).state_dict()
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_GET_CLUSTERHOST_STATE
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
-def get_clusterhost_self_state(
- clusterhost_id, user=None, session=None, **kwargs
-):
- """Get clusterhost itself state."""
- return _get_clusterhost(
- clusterhost_id, session=session
- ).state
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_CLUSTERHOST_STATE_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
-def update_cluster_host_state(
- cluster_id, host_id, user=None, session=None, **kwargs
-):
- """Update a clusterhost itself state."""
- clusterhost = _get_cluster_host(
- cluster_id, host_id, session=session
- )
- # Modify(harry): without progress_update.py to update cluster state
- # update cluster state here
- cluster = _get_cluster(clusterhost.cluster_id, session=session)
- utils.update_db_object(session, clusterhost.state, **kwargs)
- utils.update_db_object(session, cluster.state, **kwargs)
- return clusterhost.state_dict()
-
-
-def _update_clusterhost_state(
- clusterhost, from_database_only=False,
- session=None, user=None, **kwargs
-):
- """Update clusterhost state.
-
- If from_database_only, the state will only be updated in database.
- Otherwise a task sent to celery and os installer/package installer
- will also update its state if needed.
- """
- if 'ready' in kwargs and kwargs['ready'] and not clusterhost.state.ready:
- ready_triggered = True
- else:
- ready_triggered = False
- cluster_ready = False
- host = clusterhost.host
- cluster = clusterhost.cluster
- host_ready = not host.state.ready
- if ready_triggered:
- cluster_ready = True
- for clusterhost_in_cluster in cluster.clusterhosts:
- if (
- clusterhost_in_cluster.clusterhost_id
- == clusterhost.clusterhost_id
- ):
- continue
- if not clusterhost_in_cluster.state.ready:
- cluster_ready = False
-
- logging.info(
- 'clusterhost %s ready: %s',
- clusterhost.name, ready_triggered
- )
- logging.info('cluster ready: %s', cluster_ready)
- logging.info('host ready: %s', host_ready)
- if not ready_triggered or from_database_only:
- logging.info('%s state is set to %s', clusterhost.name, kwargs)
- utils.update_db_object(session, clusterhost.state, **kwargs)
- if not clusterhost.state.ready:
- logging.info('%s state ready is set to False', cluster.name)
- utils.update_db_object(session, cluster.state, ready=False)
- status = '%s state is updated' % clusterhost.name
- else:
- if not user:
- user_id = cluster.creator_id
- user_dict = user_api.get_user(user_id, session=session)
- user_email = user_dict['email']
- else:
- user_email = user.email
- from compass.tasks import client as celery_client
- celery_client.celery.send_task(
- 'compass.tasks.package_installed',
- (
- clusterhost.cluster_id, clusterhost.host_id,
- cluster_ready, host_ready
- ),
- queue=user_email,
- exchange=user_email,
- routing_key=user_email
- )
- status = '%s: cluster ready %s host ready %s' % (
- clusterhost.name, cluster_ready, host_ready
- )
- logging.info('action status: %s', status)
- return {
- 'status': status,
- 'clusterhost': clusterhost.state_dict()
- }
-
-
-@util.deprecated
-@utils.supported_filters(
- optional_support_keys=UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
-)
-@utils.wrap_to_dict(['status', 'clusterhost'])
-def update_cluster_host_state_internal(
- cluster_id, host_id, from_database_only=False,
- user=None, session=None, **kwargs
-):
- """Update a clusterhost state by installation process."""
- # TODO(xicheng): it should be merged into update_cluster_host_state
- clusterhost = _get_cluster_host(
- cluster_id, host_id, session=session
- )
- return _update_clusterhost_state(
- clusterhost, from_database_only=from_database_only,
- session=session, users=user, **kwargs
- )
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_CLUSTERHOST_STATE_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
-)
-@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
-def update_clusterhost_state(
- clusterhost_id, user=None, session=None, **kwargs
-):
- """Update a clusterhost itself state."""
- clusterhost = _get_clusterhost(
- clusterhost_id, session=session
- )
- # Modify(harry): without progress_update.py to update cluster state
- # update cluster state here
- cluster = _get_cluster(clusterhost.cluster_id, session=session)
- utils.update_db_object(session, clusterhost.state, **kwargs)
- utils.update_db_object(session, cluster.state, **kwargs)
- return clusterhost.state_dict()
-
-
-@util.deprecated
-@utils.supported_filters(
- optional_support_keys=UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
-)
-@utils.wrap_to_dict(['status', 'clusterhost'])
-def update_clusterhost_state_internal(
- clusterhost_id, from_database_only=False,
- user=None, session=None, **kwargs
-):
- """Update a clusterhost state by installation process."""
- # TODO(xicheng): it should be merged into update_clusterhost_state
- clusterhost = _get_clusterhost(clusterhost_id, session=session)
- return _update_clusterhost_state(
- clusterhost, from_database_only=from_database_only,
- session=session, user=user, **kwargs
- )
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_CLUSTER_STATE_FIELDS,
- ignore_support_keys=(IGNORE_FIELDS + IGNORE_UPDATED_CLUSTER_STATE_FIELDS)
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_CLUSTER_STATE
-)
-@utils.wrap_to_dict(RESP_STATE_FIELDS)
-def update_cluster_state(
- cluster_id, user=None, session=None, **kwargs
-):
- """Update a cluster state."""
- cluster = _get_cluster(
- cluster_id, session=session
- )
- utils.update_db_object(session, cluster.state, **kwargs)
- return cluster.state_dict()
-
-
-@util.deprecated
-@utils.supported_filters(
- optional_support_keys=UPDATED_CLUSTER_STATE_INTERNAL_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_CLUSTER_STATE
-)
-@utils.wrap_to_dict(['status', 'cluster'])
-def update_cluster_state_internal(
- cluster_id, from_database_only=False,
- user=None, session=None, **kwargs
-):
- """Update a cluster state by installation process.
-
- If from_database_only, the state will only be updated in database.
- Otherwise a task sent to do state update in os installer and
- package installer.
- """
- # TODO(xicheng): it should be merged into update_cluster_state
- cluster = _get_cluster(cluster_id, session=session)
- if 'ready' in kwargs and kwargs['ready'] and not cluster.state.ready:
- ready_triggered = True
- else:
- ready_triggered = False
- clusterhost_ready = {}
- if ready_triggered:
- for clusterhost in cluster.clusterhosts:
- clusterhost_ready[clusterhost.host_id] = (
- not clusterhost.state.ready
- )
-
- logging.info('cluster %s ready: %s', cluster_id, ready_triggered)
- logging.info('clusterhost ready: %s', clusterhost_ready)
-
- if not ready_triggered or from_database_only:
- logging.info('%s state is set to %s', cluster.name, kwargs)
- utils.update_db_object(session, cluster.state, **kwargs)
- if not cluster.state.ready:
- for clusterhost in cluster.clusterhosts:
- logging.info('%s state ready is to False', clusterhost.name)
- utils.update_db_object(
- session, clusterhost.state, ready=False
- )
- status = '%s state is updated' % cluster.name
- else:
- if not user:
- user_id = cluster.creator_id
- user_dict = user_api.get_user(user_id, session=session)
- user_email = user_dict['email']
- else:
- user_email = user.email
- from compass.tasks import client as celery_client
- celery_client.celery.send_task(
- 'compass.tasks.cluster_installed',
- (clusterhost.cluster_id, clusterhost_ready),
- queue=user_email,
- exchange=user_email,
- routing_key=user_email
- )
- status = '%s installed action set clusterhost ready %s' % (
- cluster.name, clusterhost_ready
- )
- logging.info('action status: %s', status)
- return {
- 'status': status,
- 'cluster': cluster.state_dict()
- }
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
-def get_cluster_host_log_histories(
- cluster_id, host_id, user=None, session=None, **kwargs
-):
- """Get clusterhost log history by cluster id and host id."""
- return _get_cluster_host(
- cluster_id, host_id, session=session
- ).log_histories
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
-def get_clusterhost_log_histories(
- clusterhost_id, user=None,
- session=None, **kwargs
-):
- """Get clusterhost log history by clusterhost id."""
- return _get_clusterhost(
- clusterhost_id, session=session
- ).log_histories
-
-
-def _get_cluster_host_log_history(
- cluster_id, host_id, filename, session=None, **kwargs
-):
- """Get clusterhost log history by cluster id, host id and filename."""
- clusterhost = _get_cluster_host(cluster_id, host_id, session=session)
- return utils.get_db_object(
- session, models.ClusterHostLogHistory,
- clusterhost_id=clusterhost.clusterhost_id, filename=filename,
- **kwargs
- )
-
-
-def _get_clusterhost_log_history(
- clusterhost_id, filename, session=None, **kwargs
-):
- """Get clusterhost log history by clusterhost id and filename."""
- clusterhost = _get_clusterhost(clusterhost_id, session=session)
- return utils.get_db_object(
- session, models.ClusterHostLogHistory,
- clusterhost_id=clusterhost.clusterhost_id, filename=filename,
- **kwargs
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
-def get_cluster_host_log_history(
- cluster_id, host_id, filename, user=None, session=None, **kwargs
-):
- """Get clusterhost log history by cluster id, host id and filename."""
- return _get_cluster_host_log_history(
- cluster_id, host_id, filename, session=session
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
-def get_clusterhost_log_history(
- clusterhost_id, filename, user=None, session=None, **kwargs
-):
- """Get host log history by clusterhost id and filename."""
- return _get_clusterhost_log_history(
- clusterhost_id, filename, session=session
- )
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
-def update_cluster_host_log_history(
- cluster_id, host_id, filename, user=None, session=None, **kwargs
-):
- """Update a host log history by cluster id, host id and filename."""
- cluster_host_log_history = _get_cluster_host_log_history(
- cluster_id, host_id, filename, session=session
- )
- return utils.update_db_object(
- session, cluster_host_log_history, **kwargs
- )
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
-def update_clusterhost_log_history(
- clusterhost_id, filename, user=None, session=None, **kwargs
-):
- """Update a host log history by clusterhost id and filename."""
- clusterhost_log_history = _get_clusterhost_log_history(
- clusterhost_id, filename, session=session
- )
- return utils.update_db_object(session, clusterhost_log_history, **kwargs)
-
-
-@utils.supported_filters(
- ADDED_CLUSTERHOST_LOG_FIELDS,
- optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
-def add_clusterhost_log_history(
- clusterhost_id, exception_when_existing=False,
- filename=None, user=None, session=None, **kwargs
-):
- """add a host log history by clusterhost id and filename."""
- clusterhost = _get_clusterhost(clusterhost_id, session=session)
- return utils.add_db_object(
- session, models.ClusterHostLogHistory,
- exception_when_existing,
- clusterhost.clusterhost_id, filename, **kwargs
- )
-
-
-@utils.supported_filters(
- ADDED_CLUSTERHOST_LOG_FIELDS,
- optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
-def add_cluster_host_log_history(
- cluster_id, host_id, exception_when_existing=False,
- filename=None, user=None, session=None, **kwargs
-):
- """add a host log history by cluster id, host id and filename."""
- clusterhost = _get_cluster_host(
- cluster_id, host_id, session=session
- )
- return utils.add_db_object(
- session, models.ClusterHostLogHistory, exception_when_existing,
- clusterhost.clusterhost_id, filename, **kwargs
- )
diff --git a/compass-tasks/db/api/database.py b/compass-tasks/db/api/database.py
deleted file mode 100644
index 49769d7..0000000
--- a/compass-tasks/db/api/database.py
+++ /dev/null
@@ -1,264 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Provider interface to manipulate database."""
-import functools
-import logging
-import netaddr
-
-from contextlib import contextmanager
-from sqlalchemy import create_engine
-from sqlalchemy.exc import IntegrityError
-from sqlalchemy.exc import OperationalError
-from sqlalchemy.orm import scoped_session
-from sqlalchemy.orm import sessionmaker
-from sqlalchemy.pool import NullPool
-from sqlalchemy.pool import QueuePool
-from sqlalchemy.pool import SingletonThreadPool
-from sqlalchemy.pool import StaticPool
-from threading import local
-
-from compass.db import exception
-from compass.db import models
-from compass.utils import logsetting
-from compass.utils import setting_wrapper as setting
-
-
-ENGINE = None
-SESSION = sessionmaker(autocommit=False, autoflush=False)
-SCOPED_SESSION = None
-SESSION_HOLDER = local()
-
-POOL_MAPPING = {
- 'instant': NullPool,
- 'static': StaticPool,
- 'queued': QueuePool,
- 'thread_single': SingletonThreadPool
-}
-
-
-def init(database_url=None):
- """Initialize database.
-
- Adjust sqlalchemy logging if necessary.
-
- :param database_url: string, database url.
- """
- global ENGINE
- global SCOPED_SESSION
- if not database_url:
- database_url = setting.SQLALCHEMY_DATABASE_URI
- logging.info('init database %s', database_url)
- root_logger = logging.getLogger()
- fine_debug = root_logger.isEnabledFor(logsetting.LOGLEVEL_MAPPING['fine'])
- if fine_debug:
- logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
- finest_debug = root_logger.isEnabledFor(
- logsetting.LOGLEVEL_MAPPING['finest']
- )
- if finest_debug:
- logging.getLogger('sqlalchemy.dialects').setLevel(logging.INFO)
- logging.getLogger('sqlalchemy.pool').setLevel(logging.INFO)
- logging.getLogger('sqlalchemy.orm').setLevel(logging.INFO)
- poolclass = POOL_MAPPING[setting.SQLALCHEMY_DATABASE_POOL_TYPE]
- ENGINE = create_engine(
- database_url, convert_unicode=True,
- poolclass=poolclass
- )
- SESSION.configure(bind=ENGINE)
- SCOPED_SESSION = scoped_session(SESSION)
- models.BASE.query = SCOPED_SESSION.query_property()
-
-
-def in_session():
- """check if in database session scope."""
- bool(hasattr(SESSION_HOLDER, 'session'))
-
-
-@contextmanager
-def session(exception_when_in_session=True):
- """database session scope.
-
- To operate database, it should be called in database session.
- If not exception_when_in_session, the with session statement support
- nested session and only the out most session commit/rollback the
- transaction.
- """
- if not ENGINE:
- init()
-
- nested_session = False
- if hasattr(SESSION_HOLDER, 'session'):
- if exception_when_in_session:
- logging.error('we are already in session')
- raise exception.DatabaseException('session already exist')
- else:
- new_session = SESSION_HOLDER.session
- nested_session = True
- logging.log(
- logsetting.getLevelByName('fine'),
- 'reuse session %s', nested_session
- )
- else:
- new_session = SCOPED_SESSION()
- setattr(SESSION_HOLDER, 'session', new_session)
- logging.log(
- logsetting.getLevelByName('fine'),
- 'enter session %s', new_session
- )
- try:
- yield new_session
- if not nested_session:
- new_session.commit()
- except Exception as error:
- if not nested_session:
- new_session.rollback()
- logging.error('failed to commit session')
- logging.exception(error)
- if isinstance(error, IntegrityError):
- for item in error.statement.split():
- if item.islower():
- object = item
- break
- raise exception.DuplicatedRecord(
- '%s in %s' % (error.orig, object)
- )
- elif isinstance(error, OperationalError):
- raise exception.DatabaseException(
- 'operation error in database'
- )
- elif isinstance(error, exception.DatabaseException):
- raise error
- else:
- raise exception.DatabaseException(str(error))
- finally:
- if not nested_session:
- new_session.close()
- SCOPED_SESSION.remove()
- delattr(SESSION_HOLDER, 'session')
- logging.log(
- logsetting.getLevelByName('fine'),
- 'exit session %s', new_session
- )
-
-
-def current_session():
- """Get the current session scope when it is called.
-
- :return: database session.
- :raises: DatabaseException when it is not in session.
- """
- try:
- return SESSION_HOLDER.session
- except Exception as error:
- logging.error('It is not in the session scope')
- logging.exception(error)
- if isinstance(error, exception.DatabaseException):
- raise error
- else:
- raise exception.DatabaseException(str(error))
-
-
-def run_in_session(exception_when_in_session=True):
- """Decorator to make sure the decorated function run in session.
-
- When not exception_when_in_session, the run_in_session can be
- decorated several times.
- """
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- try:
- my_session = kwargs.get('session')
- if my_session is not None:
- return func(*args, **kwargs)
- else:
- with session(
- exception_when_in_session=exception_when_in_session
- ) as my_session:
- kwargs['session'] = my_session
- return func(*args, **kwargs)
- except Exception as error:
- logging.error(
- 'got exception with func %s args %s kwargs %s',
- func, args, kwargs
- )
- logging.exception(error)
- raise error
- return wrapper
- return decorator
-
-
-def _setup_user_table(user_session):
- """Initialize user table with default user."""
- logging.info('setup user table')
- from compass.db.api import user
- user.add_user(
- session=user_session,
- email=setting.COMPASS_ADMIN_EMAIL,
- password=setting.COMPASS_ADMIN_PASSWORD,
- is_admin=True
- )
-
-
-def _setup_permission_table(permission_session):
- """Initialize permission table."""
- logging.info('setup permission table.')
- from compass.db.api import permission
- permission.add_permissions_internal(
- session=permission_session
- )
-
-
-def _setup_switch_table(switch_session):
- """Initialize switch table."""
- # TODO(xicheng): deprecate setup default switch.
- logging.info('setup switch table')
- from compass.db.api import switch
- switch.add_switch(
- True, setting.DEFAULT_SWITCH_IP,
- session=switch_session,
- machine_filters=['allow ports all']
- )
-
-
-def _update_others(other_session):
- """Update other tables."""
- logging.info('update other tables')
- from compass.db.api import utils
- from compass.db import models
- utils.update_db_objects(
- other_session, models.Cluster
- )
- utils.update_db_objects(
- other_session, models.Host
- )
- utils.update_db_objects(
- other_session, models.ClusterHost
- )
-
-
-@run_in_session()
-def create_db(session=None):
- """Create database."""
- models.BASE.metadata.create_all(bind=ENGINE)
- _setup_permission_table(session)
- _setup_user_table(session)
- _setup_switch_table(session)
- _update_others(session)
-
-
-def drop_db():
- """Drop database."""
- models.BASE.metadata.drop_all(bind=ENGINE)
diff --git a/compass-tasks/db/api/health_check_report.py b/compass-tasks/db/api/health_check_report.py
deleted file mode 100644
index aaea7a7..0000000
--- a/compass-tasks/db/api/health_check_report.py
+++ /dev/null
@@ -1,190 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Cluster health check report."""
-import logging
-
-from compass.db.api import cluster as cluster_api
-from compass.db.api import database
-from compass.db.api import host as host_api
-from compass.db.api import permission
-from compass.db.api import user as user_api
-from compass.db.api import utils
-from compass.db import exception
-from compass.db import models
-
-
-REQUIRED_INSERT_FIELDS = ['name']
-OPTIONAL_INSERT_FIELDS = [
- 'display_name', 'report', 'category', 'state', 'error_message'
-]
-UPDATE_FIELDS = ['report', 'state', 'error_message']
-RESP_FIELDS = [
- 'cluster_id', 'name', 'display_name', 'report',
- 'category', 'state', 'error_message'
-]
-RESP_ACTION_FIELDS = ['cluster_id', 'status']
-
-
-@utils.supported_filters(REQUIRED_INSERT_FIELDS, OPTIONAL_INSERT_FIELDS)
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_FIELDS)
-def add_report_record(cluster_id, name=None, report={},
- state='verifying', session=None, **kwargs):
- """Create a health check report record."""
- # Replace any white space into '-'
- words = name.split()
- name = '-'.join(words)
- cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
- return utils.add_db_object(
- session, models.HealthCheckReport, True, cluster.id, name,
- report=report, state=state, **kwargs
- )
-
-
-def _get_report(cluster_id, name, session=None):
- cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
- return utils.get_db_object(
- session, models.HealthCheckReport, cluster_id=cluster.id, name=name
- )
-
-
-@utils.supported_filters(UPDATE_FIELDS)
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_FIELDS)
-def update_report(cluster_id, name, session=None, **kwargs):
- """Update health check report."""
- report = _get_report(cluster_id, name, session=session)
- if report.state == 'finished':
- err_msg = 'Report cannot be updated if state is in "finished"'
- raise exception.Forbidden(err_msg)
-
- return utils.update_db_object(session, report, **kwargs)
-
-
-@utils.supported_filters(UPDATE_FIELDS)
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_FIELDS)
-def update_multi_reports(cluster_id, session=None, **kwargs):
- """Bulk update reports."""
- # TODO(grace): rename the fuction if needed to reflect the fact.
- return set_error(cluster_id, session=session, **kwargs)
-
-
-def set_error(cluster_id, report={}, session=None,
- state='error', error_message=None):
- cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
- logging.debug(
- "updates all reports as %s in cluster %s",
- state, cluster_id
- )
- return utils.update_db_objects(
- session, models.HealthCheckReport,
- updates={
- 'report': {},
- 'state': 'error',
- 'error_message': error_message
- }, cluster_id=cluster.id
- )
-
-
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_HEALTH_REPORT
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def list_health_reports(cluster_id, user=None, session=None):
- """List all reports in the specified cluster."""
- cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
- return utils.list_db_objects(
- session, models.HealthCheckReport, cluster_id=cluster.id
- )
-
-
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_GET_HEALTH_REPORT
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def get_health_report(cluster_id, name, user=None, session=None):
- return _get_report(
- cluster_id, name, session=session
- )
-
-
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DELETE_REPORT
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def delete_reports(cluster_id, name=None, user=None, session=None):
- # TODO(grace): better to separate this function into two.
- # One is to delete a report of a cluster, the other to delete all
- # reports under a cluster.
- if name:
- report = _get_report(cluster_id, name, session=session)
- return utils.del_db_object(session, report)
- else:
- cluster = cluster_api.get_cluster_internal(
- cluster_id, session=session
- )
- return utils.del_db_objects(
- session, models.HealthCheckReport, cluster_id=cluster.id
- )
-
-
-@utils.supported_filters(optional_support_keys=['check_health'])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_CHECK_CLUSTER_HEALTH
-)
-@utils.wrap_to_dict(RESP_ACTION_FIELDS)
-def start_check_cluster_health(cluster_id, send_report_url,
- user=None, session=None, check_health={}):
- """Start to check cluster health."""
- cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
-
- if cluster.state.state != 'SUCCESSFUL':
- logging.debug("state is %s" % cluster.state.state)
- err_msg = "Healthcheck starts only after cluster finished deployment!"
- raise exception.Forbidden(err_msg)
-
- reports = utils.list_db_objects(
- session, models.HealthCheckReport,
- cluster_id=cluster.id, state='verifying'
- )
- if reports:
- err_msg = 'Healthcheck in progress, please wait for it to complete!'
- raise exception.Forbidden(err_msg)
-
- # Clear all preivous report
- # TODO(grace): the delete should be moved into celery task.
- # We should consider the case that celery task is down.
- utils.del_db_objects(
- session, models.HealthCheckReport, cluster_id=cluster.id
- )
-
- from compass.tasks import client as celery_client
- celery_client.celery.send_task(
- 'compass.tasks.cluster_health',
- (cluster.id, send_report_url, user.email),
- queue=user.email,
- exchange=user.email,
- routing_key=user.email
- )
- return {
- "cluster_id": cluster.id,
- "status": "start to check cluster health."
- }
diff --git a/compass-tasks/db/api/host.py b/compass-tasks/db/api/host.py
deleted file mode 100644
index 15e0bb6..0000000
--- a/compass-tasks/db/api/host.py
+++ /dev/null
@@ -1,1120 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Host database operations."""
-import functools
-import logging
-import netaddr
-import re
-
-from compass.db.api import database
-from compass.db.api import metadata_holder as metadata_api
-from compass.db.api import permission
-from compass.db.api import user as user_api
-from compass.db.api import utils
-from compass.db import exception
-from compass.db import models
-from compass.utils import util
-
-
-SUPPORTED_FIELDS = ['name', 'os_name', 'owner', 'mac', 'id']
-SUPPORTED_MACHINE_HOST_FIELDS = [
- 'mac', 'tag', 'location', 'os_name', 'os_id'
-]
-SUPPORTED_NETOWORK_FIELDS = [
- 'interface', 'ip', 'is_mgmt', 'is_promiscuous'
-]
-RESP_FIELDS = [
- 'id', 'name', 'hostname', 'os_name', 'owner', 'mac',
- 'switch_ip', 'port', 'switches', 'os_installer', 'os_id', 'ip',
- 'reinstall_os', 'os_installed', 'tag', 'location', 'networks',
- 'created_at', 'updated_at'
-]
-RESP_CLUSTER_FIELDS = [
- 'id', 'name', 'os_name', 'reinstall_distributed_system',
- 'owner', 'adapter_name', 'flavor_name',
- 'distributed_system_installed', 'created_at', 'updated_at'
-]
-RESP_NETWORK_FIELDS = [
- 'id', 'ip', 'interface', 'netmask', 'is_mgmt', 'is_promiscuous',
- 'created_at', 'updated_at'
-]
-RESP_CONFIG_FIELDS = [
- 'os_config',
- 'config_setp',
- 'config_validated',
- 'networks',
- 'created_at',
- 'updated_at'
-]
-RESP_DEPLOYED_CONFIG_FIELDS = [
- 'deployed_os_config'
-]
-RESP_DEPLOY_FIELDS = [
- 'status', 'host'
-]
-UPDATED_FIELDS = ['name', 'reinstall_os']
-UPDATED_CONFIG_FIELDS = [
- 'put_os_config'
-]
-PATCHED_CONFIG_FIELDS = [
- 'patched_os_config'
-]
-UPDATED_DEPLOYED_CONFIG_FIELDS = [
- 'deployed_os_config'
-]
-ADDED_NETWORK_FIELDS = [
- 'interface', 'ip', 'subnet_id'
-]
-OPTIONAL_ADDED_NETWORK_FIELDS = ['is_mgmt', 'is_promiscuous']
-UPDATED_NETWORK_FIELDS = [
- 'interface', 'ip', 'subnet_id', 'subnet', 'is_mgmt',
- 'is_promiscuous'
-]
-IGNORE_FIELDS = [
- 'id', 'created_at', 'updated_at'
-]
-RESP_STATE_FIELDS = [
- 'id', 'state', 'percentage', 'message', 'severity', 'ready'
-]
-UPDATED_STATE_FIELDS = [
- 'state', 'percentage', 'message', 'severity'
-]
-UPDATED_STATE_INTERNAL_FIELDS = [
- 'ready'
-]
-RESP_LOG_FIELDS = [
- 'id', 'filename', 'position', 'partial_line', 'percentage',
- 'message', 'severity', 'line_matcher_name'
-]
-ADDED_LOG_FIELDS = [
- 'filename'
-]
-UPDATED_LOG_FIELDS = [
- 'position', 'partial_line', 'percentage',
- 'message', 'severity', 'line_matcher_name'
-]
-
-
-@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_HOSTS
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def list_hosts(user=None, session=None, **filters):
- """List hosts."""
- return utils.list_db_objects(
- session, models.Host, **filters
- )
-
-
-@utils.supported_filters(
- optional_support_keys=SUPPORTED_MACHINE_HOST_FIELDS)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_HOSTS
-)
-@utils.output_filters(
- missing_ok=True,
- tag=utils.general_filter_callback,
- location=utils.general_filter_callback,
- os_name=utils.general_filter_callback,
- os_id=utils.general_filter_callback
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def list_machines_or_hosts(user=None, session=None, **filters):
- """List machines or hosts if possible."""
- machines = utils.list_db_objects(
- session, models.Machine, **filters
- )
- machines_or_hosts = []
- for machine in machines:
- host = machine.host
- if host:
- machines_or_hosts.append(host)
- else:
- machines_or_hosts.append(machine)
- return machines_or_hosts
-
-
-def _get_host(host_id, session=None, **kwargs):
- """Get host by id."""
- if isinstance(host_id, (int, long)):
- return utils.get_db_object(
- session, models.Host,
- id=host_id, **kwargs
- )
- else:
- raise exception.InvalidParameter(
- 'host id %s type is not int compatible' % host_id
- )
-
-
-def get_host_internal(host_id, session=None, **kwargs):
- """Helper function to get host.
-
- Used by other files under db/api.
- """
- return _get_host(host_id, session=session, **kwargs)
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_HOSTS
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def get_host(
- host_id, exception_when_missing=True,
- user=None, session=None, **kwargs
-):
- """get host info."""
- return _get_host(
- host_id,
- exception_when_missing=exception_when_missing,
- session=session
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_HOSTS
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def get_machine_or_host(
- host_id, exception_when_missing=True,
- user=None, session=None, **kwargs
-):
- """get machine or host if possible."""
- from compass.db.api import machine as machine_api
- machine = machine_api.get_machine_internal(
- host_id,
- exception_when_missing=exception_when_missing,
- session=session
- )
- if machine.host:
- return machine.host
- else:
- return machine
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_HOST_CLUSTERS
-)
-@utils.wrap_to_dict(RESP_CLUSTER_FIELDS)
-def get_host_clusters(host_id, user=None, session=None, **kwargs):
- """get host clusters."""
- host = _get_host(host_id, session=session)
- return [clusterhost.cluster for clusterhost in host.clusterhosts]
-
-
-def check_host_validated(host):
- """Check host is validated."""
- if not host.config_validated:
- raise exception.Forbidden(
- 'host %s is not validated' % host.name
- )
-
-
-def check_host_editable(
- host, user=None,
- check_in_installing=False
-):
- """Check host is editable.
-
- If we try to set reinstall_os or check the host is not in installing
- state, we should set check_in_installing to True.
- Otherwise we will check the host is not in installing or installed.
- We also make sure the user is admin or the owner of the host to avoid
- unauthorized user to update host attributes.
- """
- if check_in_installing:
- if host.state.state == 'INSTALLING':
- raise exception.Forbidden(
- 'host %s is not editable '
- 'when state is in installing' % host.name
- )
- elif not host.reinstall_os:
- raise exception.Forbidden(
- 'host %s is not editable '
- 'when not to be reinstalled' % host.name
- )
- if user and not user.is_admin and host.creator_id != user.id:
- raise exception.Forbidden(
- 'host %s is not editable '
- 'when user is not admin or the owner of the host' % host.name
- )
-
-
-def is_host_editable(
- host, user=None,
- check_in_installing=False
-):
- """Get if host is editable."""
- try:
- check_host_editable(
- host, user=user,
- check_in_installing=check_in_installing
- )
- return True
- except exception.Forbidden:
- return False
-
-
-def validate_host(host):
- """Validate host.
-
- Makesure hostname is not empty, there is only one mgmt network,
- The mgmt network is not in promiscuous mode.
- """
- if not host.hostname:
- raise exception.Invalidparameter(
- 'host %s does not set hostname' % host.name
- )
- if not host.host_networks:
- raise exception.InvalidParameter(
- 'host %s does not have any network' % host.name
- )
- mgmt_interface_set = False
- for host_network in host.host_networks:
- if host_network.is_mgmt:
- if mgmt_interface_set:
- raise exception.InvalidParameter(
- 'host %s multi interfaces set mgmt ' % host.name
- )
- if host_network.is_promiscuous:
- raise exception.InvalidParameter(
- 'host %s interface %s is mgmt but promiscuous' % (
- host.name, host_network.interface
- )
- )
- mgmt_interface_set = True
- if not mgmt_interface_set:
- raise exception.InvalidParameter(
- 'host %s has no mgmt interface' % host.name
- )
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(name=utils.check_name)
-@utils.wrap_to_dict(RESP_FIELDS)
-def _update_host(host_id, session=None, user=None, **kwargs):
- """Update a host internal."""
- host = _get_host(host_id, session=session)
- if host.state.state == "SUCCESSFUL" and not host.reinstall_os:
- logging.info("ignoring successful host: %s", host_id)
- return {}
- check_host_editable(
- host, user=user,
- check_in_installing=kwargs.get('reinstall_os', False)
- )
- return utils.update_db_object(session, host, **kwargs)
-
-
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_HOST
-)
-def update_host(host_id, user=None, session=None, **kwargs):
- """Update a host."""
- return _update_host(host_id, session=session, user=user, **kwargs)
-
-
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_HOST
-)
-def update_hosts(data=[], user=None, session=None):
- """Update hosts."""
- # TODO(xicheng): this batch function is not similar as others.
- # try to make it similar output as others and batch update should
- # tolerate partial failure.
- hosts = []
- for host_data in data:
- hosts.append(_update_host(session=session, user=user, **host_data))
- return hosts
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEL_HOST
-)
-@utils.wrap_to_dict(
- RESP_FIELDS + ['status', 'host'],
- host=RESP_FIELDS
-)
-def del_host(
- host_id, force=False, from_database_only=False,
- user=None, session=None, **kwargs
-):
- """Delete a host.
-
- If force, we delete the host anyway.
- If from_database_only, we only delete the host record in databaes.
- Otherwise we send to del host task to celery to delete the host
- record in os installer and package installer, clean installation logs
- and at last clean database record.
- The backend will call this function again after it deletes the record
- in os installer and package installer with from_database_only set.
- """
- from compass.db.api import cluster as cluster_api
- host = _get_host(host_id, session=session)
- # force set host state to ERROR when we want to delete the
- # host anyway even the host is in installing or already
- # installed. It let the api know the deleting is in doing when backend
- # is doing the real deleting. In future we may import a new state like
- # INDELETE to indicate the deleting is processing.
- # We need discuss about if we can delete a host when it is already
- # installed by api.
- if host.state.state != 'UNINITIALIZED' and force:
- host.state.state = 'ERROR'
- check_host_editable(
- host, user=user,
- check_in_installing=True
- )
- cluster_ids = []
- for clusterhost in host.clusterhosts:
- if clusterhost.state.state != 'UNINITIALIZED' and force:
- clusterhost.state.state = 'ERROR'
- # TODO(grace): here we check all clusters which use this host editable.
- # Because in backend we do not have functions to delete host without
- # reference its cluster. After deleting pure host supported in backend,
- # we should change code here to is_cluster_editable.
- # Here delete a host may fail even we set force flag.
- cluster_api.check_cluster_editable(
- clusterhost.cluster, user=user,
- check_in_installing=True
- )
- cluster_ids.append(clusterhost.cluster_id)
-
- # Delete host record directly if there is no need to delete it
- # in backend or from_database_only is set.
- if host.state.state == 'UNINITIALIZED' or from_database_only:
- return utils.del_db_object(session, host)
- else:
- logging.info(
- 'send del host %s task to celery', host_id
- )
- if not user:
- user_id = host.creator_id
- user_dict = user_api.get_user(user_id, session=session)
- user_email = user_dict['email']
- else:
- user_email = user.email
- from compass.tasks import client as celery_client
- celery_client.celery.send_task(
- 'compass.tasks.delete_host',
- (
- user.email, host.id, cluster_ids
- ),
- queue=user_email,
- exchange=user_email,
- routing_key=user_email
- )
- return {
- 'status': 'delete action sent',
- 'host': host,
- }
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_HOST_CONFIG
-)
-@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
-def get_host_config(host_id, user=None, session=None, **kwargs):
- """Get host config."""
- return _get_host(host_id, session=session)
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_HOST_CONFIG
-)
-@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
-def get_host_deployed_config(host_id, user=None, session=None, **kwargs):
- """Get host deployed config."""
- return _get_host(host_id, session=session)
-
-
-# replace os_config to deployed_os_config in kwargs.
-@utils.replace_filters(
- os_config='deployed_os_config'
-)
-@utils.supported_filters(
- UPDATED_DEPLOYED_CONFIG_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_HOST_CONFIG
-)
-@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
-def update_host_deployed_config(host_id, user=None, session=None, **kwargs):
- """Update host deployed config."""
- host = _get_host(host_id, session=session)
- check_host_editable(host, user=user)
- check_host_validated(host)
- return utils.update_db_object(session, host, **kwargs)
-
-
-def _host_os_config_validates(
- config, host, session=None, user=None, **kwargs
-):
- """Check host os config's validation."""
- metadata_api.validate_os_config(
- config, host.os_id
- )
-
-
-@utils.input_validates_with_args(
- put_os_config=_host_os_config_validates
-)
-@utils.output_validates_with_args(
- os_config=_host_os_config_validates
-)
-@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
-def _update_host_config(host, session=None, user=None, **kwargs):
- """Update host config."""
- check_host_editable(host, user=user)
- return utils.update_db_object(session, host, **kwargs)
-
-
-# replace os_config to put_os_config in kwargs.
-# It tells db the os_config will be updated not patched.
-@utils.replace_filters(
- os_config='put_os_config'
-)
-@utils.supported_filters(
- UPDATED_CONFIG_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_HOST_CONFIG
-)
-def update_host_config(host_id, user=None, session=None, **kwargs):
- """Update host config."""
- host = _get_host(host_id, session=session)
- return _update_host_config(
- host, session=session, user=user, **kwargs
- )
-
-
-# replace os_config to patched_os_config in kwargs.
-# It tells db os_config will be patched not be updated.
-@utils.replace_filters(
- os_config='patched_os_config'
-)
-@utils.supported_filters(
- PATCHED_CONFIG_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_HOST_CONFIG
-)
-def patch_host_config(host_id, user=None, session=None, **kwargs):
- """Patch host config."""
- host = _get_host(host_id, session=session)
- return _update_host_config(
- host, session=session, user=user, **kwargs
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEL_HOST_CONFIG
-)
-@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
-def del_host_config(host_id, user=None, session=None):
- """delete a host config."""
- host = _get_host(host_id, session=session)
- check_host_editable(host, user=user)
- return utils.update_db_object(
- session, host, os_config={}, config_validated=False
- )
-
-
-@utils.supported_filters(
- optional_support_keys=SUPPORTED_NETOWORK_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_HOST_NETWORKS
-)
-@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
-def list_host_networks(host_id, user=None, session=None, **filters):
- """Get host networks for a host."""
- host = _get_host(host_id, session=session)
- return utils.list_db_objects(
- session, models.HostNetwork,
- host_id=host.id, **filters
- )
-
-
-@utils.supported_filters(
- optional_support_keys=SUPPORTED_NETOWORK_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_HOST_NETWORKS
-)
-@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
-def list_hostnetworks(user=None, session=None, **filters):
- """Get host networks."""
- return utils.list_db_objects(
- session, models.HostNetwork, **filters
- )
-
-
-def _get_hostnetwork(host_network_id, session=None, **kwargs):
- """Get hostnetwork by hostnetwork id."""
- if isinstance(host_network_id, (int, long)):
- return utils.get_db_object(
- session, models.HostNetwork,
- id=host_network_id, **kwargs
- )
- raise exception.InvalidParameter(
- 'host network id %s type is not int compatible' % host_network_id
- )
-
-
-def _get_host_network(host_id, host_network_id, session=None, **kwargs):
- """Get hostnetwork by host id and hostnetwork id."""
- host = _get_host(host_id, session=session)
- host_network = _get_hostnetwork(host_network_id, session=session, **kwargs)
- if host_network.host_id != host.id:
- raise exception.RecordNotExists(
- 'host %s does not own host network %s' % (
- host.id, host_network.id
- )
- )
- return host_network
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_HOST_NETWORKS
-)
-@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
-def get_host_network(
- host_id, host_network_id,
- user=None, session=None, **kwargs
-):
- """Get host network."""
- return _get_host_network(
- host_id, host_network_id, session=session
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_HOST_NETWORKS
-)
-@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
-def get_hostnetwork(host_network_id, user=None, session=None, **kwargs):
- """Get host network."""
- return _get_hostnetwork(host_network_id, session=session)
-
-
-@utils.supported_filters(
- ADDED_NETWORK_FIELDS,
- optional_support_keys=OPTIONAL_ADDED_NETWORK_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(
- ip=utils.check_ip
-)
-@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
-def _add_host_network(
- host_id, exception_when_existing=True,
- session=None, user=None, interface=None, ip=None, **kwargs
-):
- """Add hostnetwork to a host."""
- host = _get_host(host_id, session=session)
- check_host_editable(host, user=user)
- user_id = user.id
- return utils.add_db_object(
- session, models.HostNetwork,
- exception_when_existing,
- host.id, interface, user_id, ip=ip, **kwargs
- )
-
-
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_HOST_NETWORK
-)
-def add_host_network(
- host_id, exception_when_existing=True,
- interface=None, user=None, session=None, **kwargs
-):
- """Create a hostnetwork to a host."""
- return _add_host_network(
- host_id,
- exception_when_existing,
- interface=interface, session=session, user=user, **kwargs
- )
-
-
-def _get_hostnetwork_by_ip(
- ip, session=None, **kwargs
-):
- ip_int = long(netaddr.IPAddress(ip))
- return utils.get_db_object(
- session, models.HostNetwork,
- ip_int=ip_int, **kwargs
- )
-
-
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_HOST_NETWORK
-)
-def add_host_networks(
- exception_when_existing=False,
- data=[], user=None, session=None
-):
- """Create host networks."""
- hosts = []
- failed_hosts = []
- for host_data in data:
- host_id = host_data['host_id']
- host = _get_host(host_id, session=session)
- networks = host_data['networks']
- host_networks = []
- failed_host_networks = []
- for network in networks:
- host_network = _get_hostnetwork_by_ip(
- network['ip'], session=session,
- exception_when_missing=False
- )
- if (
- host_network and not (
- host_network.host_id == host.id and
- host_network.interface == network['interface']
- )
- ):
- logging.error('ip %s exists in host network %s' % (
- network['ip'], host_network.id
- ))
- failed_host_networks.append(network)
- else:
- host_networks.append(_add_host_network(
- host.id, exception_when_existing,
- session=session, user=user, **network
- ))
- if host_networks:
- hosts.append({'host_id': host.id, 'networks': host_networks})
- if failed_host_networks:
- failed_hosts.append({
- 'host_id': host.id, 'networks': failed_host_networks
- })
- return {
- 'hosts': hosts,
- 'failed_hosts': failed_hosts
- }
-
-
-@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
-def _update_host_network(
- host_network, session=None, user=None, **kwargs
-):
- """Update host network."""
- check_host_editable(host_network.host, user=user)
- return utils.update_db_object(session, host_network, **kwargs)
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_NETWORK_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(
- ip=utils.check_ip
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_HOST_NETWORK
-)
-def update_host_network(
- host_id, host_network_id, user=None, session=None, **kwargs
-):
- """Update a host network by host id and host network id."""
- host = _get_host(
- host_id, session=session
- )
- if host.state.state == "SUCCESSFUL" and not host.reinstall_os:
- logging.info("ignoring updating request for successful hosts")
- return {}
-
- host_network = _get_host_network(
- host_id, host_network_id, session=session
- )
- return _update_host_network(
- host_network, session=session, user=user, **kwargs
- )
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_NETWORK_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(
- ip=utils.check_ip
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_HOST_NETWORK
-)
-def update_hostnetwork(host_network_id, user=None, session=None, **kwargs):
- """Update a host network by host network id."""
- host_network = _get_hostnetwork(
- host_network_id, session=session
- )
- return _update_host_network(
- host_network, session=session, user=user, **kwargs
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEL_HOST_NETWORK
-)
-@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
-def del_host_network(
- host_id, host_network_id, user=None,
- session=None, **kwargs
-):
- """Delete a host network by host id and host network id."""
- host_network = _get_host_network(
- host_id, host_network_id, session=session
- )
- check_host_editable(host_network.host, user=user)
- return utils.del_db_object(session, host_network)
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEL_HOST_NETWORK
-)
-@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
-def del_hostnetwork(host_network_id, user=None, session=None, **kwargs):
- """Delete a host network by host network id."""
- host_network = _get_hostnetwork(
- host_network_id, session=session
- )
- check_host_editable(host_network.host, user=user)
- return utils.del_db_object(session, host_network)
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_GET_HOST_STATE
-)
-@utils.wrap_to_dict(RESP_STATE_FIELDS)
-def get_host_state(host_id, user=None, session=None, **kwargs):
- """Get host state info."""
- return _get_host(host_id, session=session).state
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_STATE_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_HOST_STATE
-)
-@utils.wrap_to_dict(RESP_STATE_FIELDS)
-def update_host_state(host_id, user=None, session=None, **kwargs):
- """Update a host state."""
- host = _get_host(host_id, session=session)
- utils.update_db_object(session, host.state, **kwargs)
- return host.state
-
-
-@util.deprecated
-@utils.supported_filters(
- optional_support_keys=UPDATED_STATE_INTERNAL_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_HOST_STATE
-)
-@utils.wrap_to_dict(['status', 'host'])
-def update_host_state_internal(
- host_id, from_database_only=False,
- user=None, session=None, **kwargs
-):
- """Update a host state.
-
- This function is called when host os is installed.
- If from_database_only, the state is updated in database.
- Otherwise a celery task sent to os installer and package installer
- to do some future actions.
- """
- # TODO(xicheng): should be merged into update_host_state
- host = _get_host(host_id, session=session)
- logging.info("======host state: %s", host.state)
- if 'ready' in kwargs and kwargs['ready'] and not host.state.ready:
- ready_triggered = True
- else:
- ready_triggered = False
- clusterhosts_ready = {}
- clusters_os_ready = {}
- if ready_triggered:
- for clusterhost in host.clusterhosts:
- cluster = clusterhost.cluster
- if cluster.flavor_name:
- clusterhosts_ready[cluster.id] = False
- else:
- clusterhosts_ready[cluster.id] = True
- all_os_ready = True
- for clusterhost_in_cluster in cluster.clusterhosts:
- host_in_cluster = clusterhost_in_cluster.host
- if host_in_cluster.id == host.id:
- continue
- if not host_in_cluster.state.ready:
- all_os_ready = False
- clusters_os_ready[cluster.id] = all_os_ready
- logging.debug('host %s ready: %s', host_id, ready_triggered)
- logging.debug("clusterhosts_ready is: %s", clusterhosts_ready)
- logging.debug("clusters_os_ready is %s", clusters_os_ready)
-
- if not ready_triggered or from_database_only:
- logging.debug('%s state is set to %s', host.name, kwargs)
- utils.update_db_object(session, host.state, **kwargs)
- if not host.state.ready:
- for clusterhost in host.clusterhosts:
- utils.update_db_object(
- session, clusterhost.state, ready=False
- )
- utils.update_db_object(
- session, clusterhost.cluster.state, ready=False
- )
- status = '%s state is updated' % host.name
- else:
- if not user:
- user_id = host.creator_id
- user_dict = user_api.get_user(user_id, session=session)
- user_email = user_dict['email']
- else:
- user_email = user.email
- from compass.tasks import client as celery_client
- celery_client.celery.send_task(
- 'compass.tasks.os_installed',
- (
- host.id, clusterhosts_ready,
- clusters_os_ready
- ),
- queue=user_email,
- exchange=user_email,
- routing_key=user_email
- )
- status = '%s: clusterhosts ready %s clusters os ready %s' % (
- host.name, clusterhosts_ready, clusters_os_ready
- )
- logging.info('action status: %s', status)
- return {
- 'status': status,
- 'host': host.state
- }
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_LOG_FIELDS)
-def get_host_log_histories(host_id, user=None, session=None, **kwargs):
- """Get host log history."""
- host = _get_host(host_id, session=session)
- return utils.list_db_objects(
- session, models.HostLogHistory, id=host.id, **kwargs
- )
-
-
-def _get_host_log_history(host_id, filename, session=None, **kwargs):
- host = _get_host(host_id, session=session)
- return utils.get_db_object(
- session, models.HostLogHistory, id=host.id,
- filename=filename, **kwargs
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_LOG_FIELDS)
-def get_host_log_history(host_id, filename, user=None, session=None, **kwargs):
- """Get host log history."""
- return _get_host_log_history(
- host_id, filename, session=session
- )
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_LOG_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_LOG_FIELDS)
-def update_host_log_history(
- host_id, filename, user=None,
- session=None, **kwargs
-):
- """Update a host log history."""
- host_log_history = _get_host_log_history(
- host_id, filename, session=session
- )
- return utils.update_db_object(session, host_log_history, **kwargs)
-
-
-@utils.supported_filters(
- ADDED_LOG_FIELDS,
- optional_support_keys=UPDATED_LOG_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_LOG_FIELDS)
-def add_host_log_history(
- host_id, exception_when_existing=False,
- filename=None, user=None, session=None, **kwargs
-):
- """add a host log history."""
- host = _get_host(host_id, session=session)
- return utils.add_db_object(
- session, models.HostLogHistory, exception_when_existing,
- host.id, filename, **kwargs
- )
-
-
-@utils.supported_filters(optional_support_keys=['poweron'])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEPLOY_HOST
-)
-@utils.wrap_to_dict(
- RESP_DEPLOY_FIELDS,
- host=RESP_CONFIG_FIELDS
-)
-def poweron_host(
- host_id, poweron={}, user=None, session=None, **kwargs
-):
- """power on host."""
- from compass.tasks import client as celery_client
- host = _get_host(host_id, session=session)
- check_host_validated(host)
- if not user:
- user_id = host.creator_id
- user_dict = user_api.get_user(user_id, session=session)
- user_email = user_dict['email']
- else:
- user_email = user.email
- celery_client.celery.send_task(
- 'compass.tasks.poweron_host',
- (host.id,),
- queue=user_email,
- exchange=user_email,
- routing_key=user_email
- )
- return {
- 'status': 'poweron %s action sent' % host.name,
- 'host': host
- }
-
-
-@utils.supported_filters(optional_support_keys=['poweroff'])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEPLOY_HOST
-)
-@utils.wrap_to_dict(
- RESP_DEPLOY_FIELDS,
- host=RESP_CONFIG_FIELDS
-)
-def poweroff_host(
- host_id, poweroff={}, user=None, session=None, **kwargs
-):
- """power off host."""
- from compass.tasks import client as celery_client
- host = _get_host(host_id, session=session)
- check_host_validated(host)
- if not user:
- user_id = host.creator_id
- user_dict = user_api.get_user(user_id, session=session)
- user_email = user_dict['email']
- else:
- user_email = user.email
- celery_client.celery.send_task(
- 'compass.tasks.poweroff_host',
- (host.id,),
- queue=user_email,
- exchange=user_email,
- routing_key=user_email
- )
- return {
- 'status': 'poweroff %s action sent' % host.name,
- 'host': host
- }
-
-
-@utils.supported_filters(optional_support_keys=['reset'])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEPLOY_HOST
-)
-@utils.wrap_to_dict(
- RESP_DEPLOY_FIELDS,
- host=RESP_CONFIG_FIELDS
-)
-def reset_host(
- host_id, reset={}, user=None, session=None, **kwargs
-):
- """reset host."""
- from compass.tasks import client as celery_client
- host = _get_host(host_id, session=session)
- check_host_validated(host)
- if not user:
- user_id = host.creator_id
- user_dict = user_api.get_user(user_id, session=session)
- user_email = user_dict['email']
- else:
- user_email = user.email
- celery_client.celery.send_task(
- 'compass.tasks.reset_host',
- (host.id,),
- queue=user_email,
- exchange=user_email,
- routing_key=user_email
- )
- return {
- 'status': 'reset %s action sent' % host.name,
- 'host': host
- }
diff --git a/compass-tasks/db/api/machine.py b/compass-tasks/db/api/machine.py
deleted file mode 100644
index b7b16b2..0000000
--- a/compass-tasks/db/api/machine.py
+++ /dev/null
@@ -1,317 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Switch database operations."""
-import logging
-import re
-
-from compass.db.api import database
-from compass.db.api import permission
-from compass.db.api import user as user_api
-from compass.db.api import utils
-from compass.db import exception
-from compass.db import models
-
-from compass.utils import setting_wrapper as setting
-from compass.utils import util
-
-
-MACHINE_PRIMARY_FILEDS = ['mac', 'owner_id']
-SUPPORTED_FIELDS = [
- 'mac', 'tag', 'location',
- 'machine_attributes', 'owner_id']
-IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
-UPDATED_FIELDS = [
- 'ipmi_credentials', 'machine_attributes',
- 'tag', 'location']
-PATCHED_FIELDS = [
- 'patched_ipmi_credentials', 'patched_tag',
- 'patched_location'
-]
-RESP_FIELDS = [
- 'id', 'mac', 'ipmi_credentials', 'switches', 'switch_ip',
- 'port', 'vlans', 'machine_attributes', 'owner_id',
- 'tag', 'location', 'created_at', 'updated_at'
-]
-RESP_DEPLOY_FIELDS = [
- 'status', 'machine'
-]
-
-
-def _get_machine(machine_id, session=None, **kwargs):
- """Get machine by id."""
- if isinstance(machine_id, (int, long)):
- return utils.get_db_object(
- session, models.Machine,
- id=machine_id, **kwargs
- )
- raise exception.InvalidParameter(
- 'machine id %s type is not int compatible' % machine_id
- )
-
-
-@utils.supported_filters(
- MACHINE_PRIMARY_FILEDS,
- optional_support_keys=SUPPORTED_FIELDS
-)
-@utils.input_validates(mac=utils.check_mac)
-def _add_machine(mac, owner_id=None, session=None, **kwargs):
- """Add a machine."""
- if isinstance(owner_id, (int, long)):
- return utils.add_db_object(
- session, models.Machine,
- True,
- mac,
- owner_id=owner_id,
- **kwargs
- )
- raise exception.InvalidParameter(
- 'owner id %s type is not int compatible' % owner_id
- )
-
-
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_FIELDS)
-def add_machine(
- mac, owner_id=None, session=None, user=None, **kwargs
-):
- """Add a machine."""
- return _add_machine(
- mac,
- owner_id=owner_id,
- session=session, **kwargs
- )
-
-
-def get_machine_internal(machine_id, session=None, **kwargs):
- """Helper function to other files under db/api."""
- return _get_machine(machine_id, session=session, **kwargs)
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_MACHINES
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def get_machine(
- machine_id, exception_when_missing=True,
- user=None, session=None, **kwargs
-):
- """get a machine."""
- return _get_machine(
- machine_id, session=session,
- exception_when_missing=exception_when_missing
- )
-
-
-@utils.supported_filters(
- optional_support_keys=SUPPORTED_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_MACHINES
-)
-@utils.output_filters(
- tag=utils.general_filter_callback,
- location=utils.general_filter_callback
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def list_machines(user=None, session=None, **filters):
- """List machines."""
- machines = utils.list_db_objects(
- session, models.Machine, **filters
- )
- if not user.is_admin and len(machines):
- machines = [m for m in machines if m.owner_id == user.id]
- return machines
-
-
-@utils.wrap_to_dict(RESP_FIELDS)
-def _update_machine(machine_id, session=None, **kwargs):
- """Update a machine."""
- machine = _get_machine(machine_id, session=session)
- return utils.update_db_object(session, machine, **kwargs)
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(ipmi_credentials=utils.check_ipmi_credentials)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_MACHINE
-)
-def update_machine(machine_id, user=None, session=None, **kwargs):
- """Update a machine."""
- return _update_machine(
- machine_id, session=session, **kwargs
- )
-
-
-# replace [ipmi_credentials, tag, location] to
-# [patched_ipmi_credentials, patched_tag, patched_location]
-# in kwargs. It tells db these fields will be patched.
-@utils.replace_filters(
- ipmi_credentials='patched_ipmi_credentials',
- tag='patched_tag',
- location='patched_location'
-)
-@utils.supported_filters(
- optional_support_keys=PATCHED_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@utils.output_validates(ipmi_credentials=utils.check_ipmi_credentials)
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_MACHINE
-)
-def patch_machine(machine_id, user=None, session=None, **kwargs):
- """Patch a machine."""
- return _update_machine(
- machine_id, session=session, **kwargs
- )
-
-
-def _check_machine_deletable(machine):
- """Check a machine deletable."""
- if machine.host:
- host = machine.host
- raise exception.NotAcceptable(
- 'machine %s has host %s on it' % (
- machine.mac, host.name
- )
- )
-
-
-@utils.supported_filters()
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEL_MACHINE
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def del_machine(machine_id, user=None, session=None, **kwargs):
- """Delete a machine."""
- machine = _get_machine(machine_id, session=session)
- _check_machine_deletable(machine)
- return utils.del_db_object(session, machine)
-
-
-@utils.supported_filters(optional_support_keys=['poweron'])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEPLOY_HOST
-)
-@utils.wrap_to_dict(
- RESP_DEPLOY_FIELDS,
- machine=RESP_FIELDS
-)
-def poweron_machine(
- machine_id, poweron={}, user=None, session=None, **kwargs
-):
- """power on machine."""
- from compass.tasks import client as celery_client
- machine = _get_machine(
- machine_id, session=session
- )
- if not user:
- user_id = machine.owner_id
- user_dict = user_api.get_user(user_id, session=session)
- user_email = user_dict['email']
- else:
- user_email = user.email
- celery_client.celery.send_task(
- 'compass.tasks.poweron_machine',
- (machine_id,),
- queue=user_email,
- exchange=user_email,
- routing_key=user_email
- )
- return {
- 'status': 'poweron %s action sent' % machine.mac,
- 'machine': machine
- }
-
-
-@utils.supported_filters(optional_support_keys=['poweroff'])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEPLOY_HOST
-)
-@utils.wrap_to_dict(
- RESP_DEPLOY_FIELDS,
- machine=RESP_FIELDS
-)
-def poweroff_machine(
- machine_id, poweroff={}, user=None, session=None, **kwargs
-):
- """power off machine."""
- from compass.tasks import client as celery_client
- machine = _get_machine(
- machine_id, session=session
- )
- if not user:
- user_id = machine.owner_id
- user_dict = user_api.get_user(user_id, session=session)
- user_email = user_dict['email']
- else:
- user_email = user.email
- celery_client.celery.send_task(
- 'compass.tasks.poweroff_machine',
- (machine_id,),
- queue=user_email,
- exchange=user_email,
- routing_key=user_email
- )
- return {
- 'status': 'poweroff %s action sent' % machine.mac,
- 'machine': machine
- }
-
-
-@utils.supported_filters(optional_support_keys=['reset'])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEPLOY_HOST
-)
-@utils.wrap_to_dict(
- RESP_DEPLOY_FIELDS,
- machine=RESP_FIELDS
-)
-def reset_machine(
- machine_id, reset={}, user=None, session=None, **kwargs
-):
- """reset machine."""
- from compass.tasks import client as celery_client
- machine = _get_machine(
- machine_id, session=session
- )
- if not user:
- user_id = machine.owner_id
- user_dict = user_api.get_user(user_id, session=session)
- user_email = user_dict['email']
- else:
- user_email = user.email
- celery_client.celery.send_task(
- 'compass.tasks.reset_machine',
- (machine_id,),
- queue=user_email,
- exchange=user_email,
- routing_key=user_email
- )
- return {
- 'status': 'reset %s action sent' % machine.mac,
- 'machine': machine
- }
diff --git a/compass-tasks/db/api/metadata.py b/compass-tasks/db/api/metadata.py
deleted file mode 100644
index 16310c8..0000000
--- a/compass-tasks/db/api/metadata.py
+++ /dev/null
@@ -1,517 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Metadata related database operations."""
-import copy
-import logging
-import string
-
-from compass.db.api import adapter as adapter_api
-from compass.db.api import database
-from compass.db.api import utils
-from compass.db import callback as metadata_callback
-from compass.db import exception
-from compass.db import models
-from compass.db import validator as metadata_validator
-
-
-from compass.utils import setting_wrapper as setting
-from compass.utils import util
-
-
-OS_FIELDS = None
-PACKAGE_FIELDS = None
-FLAVOR_FIELDS = None
-OSES_METADATA = None
-PACKAGES_METADATA = None
-FLAVORS_METADATA = None
-OSES_METADATA_UI_CONVERTERS = None
-FLAVORS_METADATA_UI_CONVERTERS = None
-
-
-def _get_field_from_configuration(configs):
- """Get fields from configurations."""
- fields = {}
- for config in configs:
- if not isinstance(config, dict):
- raise exception.InvalidParameter(
- 'config %s is not dict' % config
- )
- field_name = config['NAME']
- fields[field_name] = {
- 'name': field_name,
- 'id': field_name,
- 'field_type': config.get('FIELD_TYPE', basestring),
- 'display_type': config.get('DISPLAY_TYPE', 'text'),
- 'validator': config.get('VALIDATOR', None),
- 'js_validator': config.get('JS_VALIDATOR', None),
- 'description': config.get('DESCRIPTION', field_name)
- }
- return fields
-
-
-def _get_os_fields_from_configuration():
- """Get os fields from os field config dir."""
- env_locals = {}
- env_locals.update(metadata_validator.VALIDATOR_LOCALS)
- env_locals.update(metadata_callback.CALLBACK_LOCALS)
- configs = util.load_configs(
- setting.OS_FIELD_DIR,
- env_locals=env_locals
- )
- return _get_field_from_configuration(
- configs
- )
-
-
-def _get_package_fields_from_configuration():
- """Get package fields from package field config dir."""
- env_locals = {}
- env_locals.update(metadata_validator.VALIDATOR_LOCALS)
- env_locals.update(metadata_callback.CALLBACK_LOCALS)
- configs = util.load_configs(
- setting.PACKAGE_FIELD_DIR,
- env_locals=env_locals
- )
- return _get_field_from_configuration(
- configs
- )
-
-
-def _get_flavor_fields_from_configuration():
- """Get flavor fields from flavor field config dir."""
- env_locals = {}
- env_locals.update(metadata_validator.VALIDATOR_LOCALS)
- env_locals.update(metadata_callback.CALLBACK_LOCALS)
- configs = util.load_configs(
- setting.FLAVOR_FIELD_DIR,
- env_locals=env_locals
- )
- return _get_field_from_configuration(
- configs
- )
-
-
-def _get_metadata_from_configuration(
- path, name, config,
- fields, **kwargs
-):
- """Recursively get metadata from configuration.
-
- Args:
- path: used to indicate the path to the root element.
- mainly for trouble shooting.
- name: the key of the metadata section.
- config: the value of the metadata section.
- fields: all fields defined in os fields or package fields dir.
- """
- if not isinstance(config, dict):
- raise exception.InvalidParameter(
- '%s config %s is not dict' % (path, config)
- )
- metadata_self = config.get('_self', {})
- if 'field' in metadata_self:
- field_name = metadata_self['field']
- field = fields[field_name]
- else:
- field = {}
- # mapping to may contain $ like $partition. Here we replace the
- # $partition to the key of the correspendent config. The backend then
- # can use this kind of feature to support multi partitions when we
- # only declare the partition metadata in one place.
- mapping_to_template = metadata_self.get('mapping_to', None)
- if mapping_to_template:
- mapping_to = string.Template(
- mapping_to_template
- ).safe_substitute(
- **kwargs
- )
- else:
- mapping_to = None
- self_metadata = {
- 'name': name,
- 'display_name': metadata_self.get('display_name', name),
- 'field_type': field.get('field_type', dict),
- 'display_type': field.get('display_type', None),
- 'description': metadata_self.get(
- 'description', field.get('description', None)
- ),
- 'is_required': metadata_self.get('is_required', False),
- 'required_in_whole_config': metadata_self.get(
- 'required_in_whole_config', False),
- 'mapping_to': mapping_to,
- 'validator': metadata_self.get(
- 'validator', field.get('validator', None)
- ),
- 'js_validator': metadata_self.get(
- 'js_validator', field.get('js_validator', None)
- ),
- 'default_value': metadata_self.get('default_value', None),
- 'default_callback': metadata_self.get('default_callback', None),
- 'default_callback_params': metadata_self.get(
- 'default_callback_params', {}),
- 'options': metadata_self.get('options', None),
- 'options_callback': metadata_self.get('options_callback', None),
- 'options_callback_params': metadata_self.get(
- 'options_callback_params', {}),
- 'autofill_callback': metadata_self.get(
- 'autofill_callback', None),
- 'autofill_callback_params': metadata_self.get(
- 'autofill_callback_params', {}),
- 'required_in_options': metadata_self.get(
- 'required_in_options', False)
- }
- self_metadata.update(kwargs)
- metadata = {'_self': self_metadata}
- # Key extension used to do two things:
- # one is to return the extended metadata that $<something>
- # will be replace to possible extensions.
- # The other is to record the $<something> to extended value
- # and used in future mapping_to subsititution.
- # TODO(grace): select proper name instead of key_extensions if
- # you think it is better.
- # Suppose key_extension is {'$partition': ['/var', '/']} for $partition
- # the metadata for $partition will be mapped to {
- # '/var': ..., '/': ...} and kwargs={'partition': '/var'} and
- # kwargs={'partition': '/'} will be parsed to recursive metadata parsing
- # for sub metadata under '/var' and '/'. Then in the metadata parsing
- # for the sub metadata, this kwargs will be used to substitute mapping_to.
- key_extensions = metadata_self.get('key_extensions', {})
- general_keys = []
- for key, value in config.items():
- if key.startswith('_'):
- continue
- if key in key_extensions:
- if not key.startswith('$'):
- raise exception.InvalidParameter(
- '%s subkey %s should start with $' % (
- path, key
- )
- )
- extended_keys = key_extensions[key]
- for extended_key in extended_keys:
- if extended_key.startswith('$'):
- raise exception.InvalidParameter(
- '%s extended key %s should not start with $' % (
- path, extended_key
- )
- )
- sub_kwargs = dict(kwargs)
- sub_kwargs[key[1:]] = extended_key
- metadata[extended_key] = _get_metadata_from_configuration(
- '%s/%s' % (path, extended_key), extended_key, value,
- fields, **sub_kwargs
- )
- else:
- if key.startswith('$'):
- general_keys.append(key)
- metadata[key] = _get_metadata_from_configuration(
- '%s/%s' % (path, key), key, value,
- fields, **kwargs
- )
- if len(general_keys) > 1:
- raise exception.InvalidParameter(
- 'foud multi general keys in %s: %s' % (
- path, general_keys
- )
- )
- return metadata
-
-
-def _get_oses_metadata_from_configuration():
- """Get os metadata from os metadata config dir."""
- oses_metadata = {}
- env_locals = {}
- env_locals.update(metadata_validator.VALIDATOR_LOCALS)
- env_locals.update(metadata_callback.CALLBACK_LOCALS)
- configs = util.load_configs(
- setting.OS_METADATA_DIR,
- env_locals=env_locals
- )
- for config in configs:
- os_name = config['OS']
- os_metadata = oses_metadata.setdefault(os_name, {})
- for key, value in config['METADATA'].items():
- os_metadata[key] = _get_metadata_from_configuration(
- key, key, value, OS_FIELDS
- )
-
- oses = adapter_api.OSES
- parents = {}
- for os_name, os in oses.items():
- parent = os.get('parent', None)
- parents[os_name] = parent
- for os_name, os in oses.items():
- oses_metadata[os_name] = util.recursive_merge_dict(
- os_name, oses_metadata, parents
- )
- return oses_metadata
-
-
-def _get_packages_metadata_from_configuration():
- """Get package metadata from package metadata config dir."""
- packages_metadata = {}
- env_locals = {}
- env_locals.update(metadata_validator.VALIDATOR_LOCALS)
- env_locals.update(metadata_callback.CALLBACK_LOCALS)
- configs = util.load_configs(
- setting.PACKAGE_METADATA_DIR,
- env_locals=env_locals
- )
- for config in configs:
- adapter_name = config['ADAPTER']
- package_metadata = packages_metadata.setdefault(adapter_name, {})
- for key, value in config['METADATA'].items():
- package_metadata[key] = _get_metadata_from_configuration(
- key, key, value, PACKAGE_FIELDS
- )
- adapters = adapter_api.ADAPTERS
- parents = {}
- for adapter_name, adapter in adapters.items():
- parent = adapter.get('parent', None)
- parents[adapter_name] = parent
- for adapter_name, adapter in adapters.items():
- packages_metadata[adapter_name] = util.recursive_merge_dict(
- adapter_name, packages_metadata, parents
- )
- return packages_metadata
-
-
-def _get_flavors_metadata_from_configuration():
- """Get flavor metadata from flavor metadata config dir."""
- flavors_metadata = {}
- env_locals = {}
- env_locals.update(metadata_validator.VALIDATOR_LOCALS)
- env_locals.update(metadata_callback.CALLBACK_LOCALS)
- configs = util.load_configs(
- setting.FLAVOR_METADATA_DIR,
- env_locals=env_locals
- )
- for config in configs:
- adapter_name = config['ADAPTER']
- flavor_name = config['FLAVOR']
- flavor_metadata = flavors_metadata.setdefault(
- adapter_name, {}
- ).setdefault(flavor_name, {})
- for key, value in config['METADATA'].items():
- flavor_metadata[key] = _get_metadata_from_configuration(
- key, key, value, FLAVOR_FIELDS
- )
-
- packages_metadata = PACKAGES_METADATA
- adapters_flavors = adapter_api.ADAPTERS_FLAVORS
- for adapter_name, adapter_flavors in adapters_flavors.items():
- package_metadata = packages_metadata.get(adapter_name, {})
- for flavor_name, flavor in adapter_flavors.items():
- flavor_metadata = flavors_metadata.setdefault(
- adapter_name, {}
- ).setdefault(flavor_name, {})
- util.merge_dict(flavor_metadata, package_metadata, override=False)
- return flavors_metadata
-
-
-def _filter_metadata(metadata, **kwargs):
- if not isinstance(metadata, dict):
- return metadata
- filtered_metadata = {}
- for key, value in metadata.items():
- if key == '_self':
- default_value = value.get('default_value', None)
- if default_value is None:
- default_callback_params = value.get(
- 'default_callback_params', {}
- )
- callback_params = dict(kwargs)
- if default_callback_params:
- callback_params.update(default_callback_params)
- default_callback = value.get('default_callback', None)
- if default_callback:
- default_value = default_callback(key, **callback_params)
- options = value.get('options', None)
- if options is None:
- options_callback_params = value.get(
- 'options_callback_params', {}
- )
- callback_params = dict(kwargs)
- if options_callback_params:
- callback_params.update(options_callback_params)
-
- options_callback = value.get('options_callback', None)
- if options_callback:
- options = options_callback(key, **callback_params)
- filtered_metadata[key] = value
- if default_value is not None:
- filtered_metadata[key]['default_value'] = default_value
- if options is not None:
- filtered_metadata[key]['options'] = options
- else:
- filtered_metadata[key] = _filter_metadata(value, **kwargs)
- return filtered_metadata
-
-
-def _load_metadata(force_reload=False):
- """Load metadata information into memory.
-
- If force_reload, the metadata information will be reloaded
- even if the metadata is already loaded.
- """
- adapter_api.load_adapters_internal(force_reload=force_reload)
- global OS_FIELDS
- if force_reload or OS_FIELDS is None:
- OS_FIELDS = _get_os_fields_from_configuration()
- global PACKAGE_FIELDS
- if force_reload or PACKAGE_FIELDS is None:
- PACKAGE_FIELDS = _get_package_fields_from_configuration()
- global FLAVOR_FIELDS
- if force_reload or FLAVOR_FIELDS is None:
- FLAVOR_FIELDS = _get_flavor_fields_from_configuration()
- global OSES_METADATA
- if force_reload or OSES_METADATA is None:
- OSES_METADATA = _get_oses_metadata_from_configuration()
- global PACKAGES_METADATA
- if force_reload or PACKAGES_METADATA is None:
- PACKAGES_METADATA = _get_packages_metadata_from_configuration()
- global FLAVORS_METADATA
- if force_reload or FLAVORS_METADATA is None:
- FLAVORS_METADATA = _get_flavors_metadata_from_configuration()
- global OSES_METADATA_UI_CONVERTERS
- if force_reload or OSES_METADATA_UI_CONVERTERS is None:
- OSES_METADATA_UI_CONVERTERS = (
- _get_oses_metadata_ui_converters_from_configuration()
- )
- global FLAVORS_METADATA_UI_CONVERTERS
- if force_reload or FLAVORS_METADATA_UI_CONVERTERS is None:
- FLAVORS_METADATA_UI_CONVERTERS = (
- _get_flavors_metadata_ui_converters_from_configuration()
- )
-
-
-def _get_oses_metadata_ui_converters_from_configuration():
- """Get os metadata ui converters from os metadata mapping config dir.
-
- os metadata ui converter is used to convert os metadata to
- the format UI can understand and show.
- """
- oses_metadata_ui_converters = {}
- configs = util.load_configs(setting.OS_MAPPING_DIR)
- for config in configs:
- os_name = config['OS']
- oses_metadata_ui_converters[os_name] = config.get('CONFIG_MAPPING', {})
-
- oses = adapter_api.OSES
- parents = {}
- for os_name, os in oses.items():
- parent = os.get('parent', None)
- parents[os_name] = parent
- for os_name, os in oses.items():
- oses_metadata_ui_converters[os_name] = util.recursive_merge_dict(
- os_name, oses_metadata_ui_converters, parents
- )
- return oses_metadata_ui_converters
-
-
-def _get_flavors_metadata_ui_converters_from_configuration():
- """Get flavor metadata ui converters from flavor mapping config dir."""
- flavors_metadata_ui_converters = {}
- configs = util.load_configs(setting.FLAVOR_MAPPING_DIR)
- for config in configs:
- adapter_name = config['ADAPTER']
- flavor_name = config['FLAVOR']
- flavors_metadata_ui_converters.setdefault(
- adapter_name, {}
- )[flavor_name] = config.get('CONFIG_MAPPING', {})
- adapters = adapter_api.ADAPTERS
- parents = {}
- for adapter_name, adapter in adapters.items():
- parent = adapter.get('parent', None)
- parents[adapter_name] = parent
- for adapter_name, adapter in adapters.items():
- flavors_metadata_ui_converters[adapter_name] = (
- util.recursive_merge_dict(
- adapter_name, flavors_metadata_ui_converters, parents
- )
- )
- return flavors_metadata_ui_converters
-
-
-def get_packages_metadata_internal(force_reload=False):
- """Get deployable package metadata."""
- _load_metadata(force_reload=force_reload)
- metadata_mapping = {}
- adapters = adapter_api.ADAPTERS
- for adapter_name, adapter in adapters.items():
- if adapter.get('deployable'):
- metadata_mapping[adapter_name] = _filter_metadata(
- PACKAGES_METADATA.get(adapter_name, {})
- )
- else:
- logging.info(
- 'ignore metadata since its adapter %s is not deployable',
- adapter_name
- )
- return metadata_mapping
-
-
-def get_flavors_metadata_internal(force_reload=False):
- """Get deployable flavor metadata."""
- _load_metadata(force_reload=force_reload)
- metadata_mapping = {}
- adapters_flavors = adapter_api.ADAPTERS_FLAVORS
- for adapter_name, adapter_flavors in adapters_flavors.items():
- adapter = adapter_api.ADAPTERS[adapter_name]
- if not adapter.get('deployable'):
- logging.info(
- 'ignore metadata since its adapter %s is not deployable',
- adapter_name
- )
- continue
- for flavor_name, flavor in adapter_flavors.items():
- flavor_metadata = FLAVORS_METADATA.get(
- adapter_name, {}
- ).get(flavor_name, {})
- metadata = _filter_metadata(flavor_metadata)
- metadata_mapping.setdefault(
- adapter_name, {}
- )[flavor_name] = metadata
- return metadata_mapping
-
-
-def get_flavors_metadata_ui_converters_internal(force_reload=False):
- """Get usable flavor metadata ui converters."""
- _load_metadata(force_reload=force_reload)
- return FLAVORS_METADATA_UI_CONVERTERS
-
-
-def get_oses_metadata_internal(force_reload=False):
- """Get deployable os metadata."""
- _load_metadata(force_reload=force_reload)
- metadata_mapping = {}
- oses = adapter_api.OSES
- for os_name, os in oses.items():
- if os.get('deployable'):
- metadata_mapping[os_name] = _filter_metadata(
- OSES_METADATA.get(os_name, {})
- )
- else:
- logging.info(
- 'ignore metadata since its os %s is not deployable',
- os_name
- )
- return metadata_mapping
-
-
-def get_oses_metadata_ui_converters_internal(force_reload=False):
- """Get usable os metadata ui converters."""
- _load_metadata(force_reload=force_reload)
- return OSES_METADATA_UI_CONVERTERS
diff --git a/compass-tasks/db/api/metadata_holder.py b/compass-tasks/db/api/metadata_holder.py
deleted file mode 100644
index 24afc67..0000000
--- a/compass-tasks/db/api/metadata_holder.py
+++ /dev/null
@@ -1,731 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Metadata related object holder."""
-import logging
-
-from compass.db.api import adapter as adapter_api
-from compass.db.api import adapter_holder as adapter_holder_api
-from compass.db.api import database
-from compass.db.api import metadata as metadata_api
-from compass.db.api import permission
-from compass.db.api import user as user_api
-from compass.db.api import utils
-from compass.db import exception
-from compass.db import models
-from compass.utils import setting_wrapper as setting
-from compass.utils import util
-
-
-RESP_METADATA_FIELDS = [
- 'os_config', 'package_config'
-]
-RESP_UI_METADATA_FIELDS = [
- 'os_global_config', 'flavor_config'
-]
-
-
-def load_metadatas(force_reload=False):
- """Load metadatas."""
- # TODO(xicheng): today we load metadata in memory as it original
- # format in files in metadata.py. We get these inmemory metadata
- # and do some translation, store the translated metadata into memory
- # too in metadata_holder.py. api can only access the global inmemory
- # data in metadata_holder.py.
- _load_os_metadatas(force_reload=force_reload)
- _load_package_metadatas(force_reload=force_reload)
- _load_flavor_metadatas(force_reload=force_reload)
- _load_os_metadata_ui_converters(force_reload=force_reload)
- _load_flavor_metadata_ui_converters(force_reload=force_reload)
-
-
-def _load_os_metadata_ui_converters(force_reload=False):
- global OS_METADATA_UI_CONVERTERS
- if force_reload or OS_METADATA_UI_CONVERTERS is None:
- logging.info('load os metadatas ui converters into memory')
- OS_METADATA_UI_CONVERTERS = (
- metadata_api.get_oses_metadata_ui_converters_internal(
- force_reload=force_reload
- )
- )
-
-
-def _load_os_metadatas(force_reload=False):
- """Load os metadata from inmemory db and map it by os_id."""
- global OS_METADATA_MAPPING
- if force_reload or OS_METADATA_MAPPING is None:
- logging.info('load os metadatas into memory')
- OS_METADATA_MAPPING = metadata_api.get_oses_metadata_internal(
- force_reload=force_reload
- )
-
-
-def _load_flavor_metadata_ui_converters(force_reload=False):
- """Load flavor metadata ui converters from inmemory db.
-
- The loaded metadata is mapped by flavor id.
- """
- global FLAVOR_METADATA_UI_CONVERTERS
- if force_reload or FLAVOR_METADATA_UI_CONVERTERS is None:
- logging.info('load flavor metadata ui converters into memory')
- FLAVOR_METADATA_UI_CONVERTERS = {}
- adapters_flavors_metadata_ui_converters = (
- metadata_api.get_flavors_metadata_ui_converters_internal(
- force_reload=force_reload
- )
- )
- for adapter_name, adapter_flavors_metadata_ui_converters in (
- adapters_flavors_metadata_ui_converters.items()
- ):
- for flavor_name, flavor_metadata_ui_converter in (
- adapter_flavors_metadata_ui_converters.items()
- ):
- FLAVOR_METADATA_UI_CONVERTERS[
- '%s:%s' % (adapter_name, flavor_name)
- ] = flavor_metadata_ui_converter
-
-
-@util.deprecated
-def _load_package_metadatas(force_reload=False):
- """Load deployable package metadata from inmemory db."""
- global PACKAGE_METADATA_MAPPING
- if force_reload or PACKAGE_METADATA_MAPPING is None:
- logging.info('load package metadatas into memory')
- PACKAGE_METADATA_MAPPING = (
- metadata_api.get_packages_metadata_internal(
- force_reload=force_reload
- )
- )
-
-
-def _load_flavor_metadatas(force_reload=False):
- """Load flavor metadata from inmemory db.
-
- The loaded metadata are mapped by flavor id.
- """
- global FLAVOR_METADATA_MAPPING
- if force_reload or FLAVOR_METADATA_MAPPING is None:
- logging.info('load flavor metadatas into memory')
- FLAVOR_METADATA_MAPPING = {}
- adapters_flavors_metadata = (
- metadata_api.get_flavors_metadata_internal(
- force_reload=force_reload
- )
- )
- for adapter_name, adapter_flavors_metadata in (
- adapters_flavors_metadata.items()
- ):
- for flavor_name, flavor_metadata in (
- adapter_flavors_metadata.items()
- ):
- FLAVOR_METADATA_MAPPING[
- '%s:%s' % (adapter_name, flavor_name)
- ] = flavor_metadata
-
-
-OS_METADATA_MAPPING = None
-PACKAGE_METADATA_MAPPING = None
-FLAVOR_METADATA_MAPPING = None
-OS_METADATA_UI_CONVERTERS = None
-FLAVOR_METADATA_UI_CONVERTERS = None
-
-
-def validate_os_config(
- config, os_id, whole_check=False, **kwargs
-):
- """Validate os config."""
- load_metadatas()
- if os_id not in OS_METADATA_MAPPING:
- raise exception.InvalidParameter(
- 'os %s is not found in os metadata mapping' % os_id
- )
- _validate_config(
- '', config, OS_METADATA_MAPPING[os_id],
- whole_check, **kwargs
- )
-
-
-@util.deprecated
-def validate_package_config(
- config, adapter_id, whole_check=False, **kwargs
-):
- """Validate package config."""
- load_metadatas()
- if adapter_id not in PACKAGE_METADATA_MAPPING:
- raise exception.InvalidParameter(
- 'adapter %s is not found in package metedata mapping' % adapter_id
- )
- _validate_config(
- '', config, PACKAGE_METADATA_MAPPING[adapter_id],
- whole_check, **kwargs
- )
-
-
-def validate_flavor_config(
- config, flavor_id, whole_check=False, **kwargs
-):
- """Validate flavor config."""
- load_metadatas()
- if not flavor_id:
- logging.info('There is no flavor, skipping flavor validation...')
- elif flavor_id not in FLAVOR_METADATA_MAPPING:
- raise exception.InvalidParameter(
- 'flavor %s is not found in flavor metedata mapping' % flavor_id
- )
- else:
- _validate_config(
- '', config, FLAVOR_METADATA_MAPPING[flavor_id],
- whole_check, **kwargs
- )
-
-
-def _filter_metadata(metadata, **kwargs):
- """Filter metadata before return it to api.
-
- Some metadata fields are not json compatible or
- only used in db/api internally.
- We should strip these fields out before return to api.
- """
- if not isinstance(metadata, dict):
- return metadata
- filtered_metadata = {}
- for key, value in metadata.items():
- if key == '_self':
- filtered_metadata[key] = {
- 'name': value['name'],
- 'description': value.get('description', None),
- 'default_value': value.get('default_value', None),
- 'is_required': value.get('is_required', False),
- 'required_in_whole_config': value.get(
- 'required_in_whole_config', False),
- 'js_validator': value.get('js_validator', None),
- 'options': value.get('options', None),
- 'required_in_options': value.get(
- 'required_in_options', False),
- 'field_type': value.get(
- 'field_type_data', 'str'),
- 'display_type': value.get('display_type', None),
- 'mapping_to': value.get('mapping_to', None)
- }
- else:
- filtered_metadata[key] = _filter_metadata(value, **kwargs)
- return filtered_metadata
-
-
-@util.deprecated
-def _get_package_metadata(adapter_id):
- """get package metadata."""
- load_metadatas()
- if adapter_id not in PACKAGE_METADATA_MAPPING:
- raise exception.RecordNotExists(
- 'adpater %s does not exist' % adapter_id
- )
- return _filter_metadata(
- PACKAGE_METADATA_MAPPING[adapter_id]
- )
-
-
-@util.deprecated
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_METADATAS
-)
-@utils.wrap_to_dict(RESP_METADATA_FIELDS)
-def get_package_metadata(adapter_id, user=None, session=None, **kwargs):
- """Get package metadata from adapter."""
- return {
- 'package_config': _get_package_metadata(adapter_id)
- }
-
-
-def _get_flavor_metadata(flavor_id):
- """get flavor metadata."""
- load_metadatas()
- if not flavor_id:
- logging.info('There is no flavor id, skipping...')
- elif flavor_id not in FLAVOR_METADATA_MAPPING:
- raise exception.RecordNotExists(
- 'flavor %s does not exist' % flavor_id
- )
- else:
- return _filter_metadata(FLAVOR_METADATA_MAPPING[flavor_id])
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_METADATAS
-)
-@utils.wrap_to_dict(RESP_METADATA_FIELDS)
-def get_flavor_metadata(flavor_id, user=None, session=None, **kwargs):
- """Get flavor metadata by flavor."""
- return {
- 'package_config': _get_flavor_metadata(flavor_id)
- }
-
-
-def _get_os_metadata(os_id):
- """get os metadata."""
- load_metadatas()
- if os_id not in OS_METADATA_MAPPING:
- raise exception.RecordNotExists(
- 'os %s does not exist' % os_id
- )
- return _filter_metadata(OS_METADATA_MAPPING[os_id])
-
-
-def _get_os_metadata_ui_converter(os_id):
- """get os metadata ui converter."""
- load_metadatas()
- if os_id not in OS_METADATA_UI_CONVERTERS:
- raise exception.RecordNotExists(
- 'os %s does not exist' % os_id
- )
- return OS_METADATA_UI_CONVERTERS[os_id]
-
-
-def _get_flavor_metadata_ui_converter(flavor_id):
- """get flavor metadata ui converter."""
- load_metadatas()
- if flavor_id not in FLAVOR_METADATA_UI_CONVERTERS:
- raise exception.RecordNotExists(
- 'flavor %s does not exist' % flavor_id
- )
- return FLAVOR_METADATA_UI_CONVERTERS[flavor_id]
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_METADATAS
-)
-@utils.wrap_to_dict(RESP_METADATA_FIELDS)
-def get_os_metadata(os_id, user=None, session=None, **kwargs):
- """get os metadatas."""
- return {'os_config': _get_os_metadata(os_id)}
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_METADATAS
-)
-@utils.wrap_to_dict(RESP_UI_METADATA_FIELDS)
-def get_os_ui_metadata(os_id, user=None, session=None, **kwargs):
- """Get os metadata ui converter by os."""
- metadata = _get_os_metadata(os_id)
- metadata_ui_converter = _get_os_metadata_ui_converter(os_id)
- return _get_ui_metadata(metadata, metadata_ui_converter)
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_METADATAS
-)
-@utils.wrap_to_dict(RESP_UI_METADATA_FIELDS)
-def get_flavor_ui_metadata(flavor_id, user=None, session=None, **kwargs):
- """Get flavor ui metadata by flavor."""
- metadata = _get_flavor_metadata(flavor_id)
- metadata_ui_converter = _get_flavor_metadata_ui_converter(flavor_id)
- return _get_ui_metadata(metadata, metadata_ui_converter)
-
-
-def _get_ui_metadata(metadata, metadata_ui_converter):
- """convert metadata to ui metadata.
-
- Args:
- metadata: metadata we defined in metadata files.
- metadata_ui_converter: metadata ui converter defined in metadata
- mapping files. Used to convert orignal
- metadata to ui understandable metadata.
-
- Returns:
- ui understandable metadata.
- """
- ui_metadata = {}
- ui_metadata[metadata_ui_converter['mapped_name']] = []
- for mapped_child in metadata_ui_converter['mapped_children']:
- data_dict = {}
- for ui_key, ui_value in mapped_child.items():
- for key, value in ui_value.items():
- if 'data' == key:
- result_data = []
- _get_ui_metadata_data(
- metadata[ui_key], value, result_data
- )
- data_dict['data'] = result_data
- else:
- data_dict[key] = value
- ui_metadata[metadata_ui_converter['mapped_name']].append(data_dict)
- return ui_metadata
-
-
-def _get_ui_metadata_data(metadata, config, result_data):
- """Get ui metadata data and fill to result."""
- data_dict = {}
- for key, config_value in config.items():
- if isinstance(config_value, dict) and key != 'content_data':
- if key in metadata.keys():
- _get_ui_metadata_data(metadata[key], config_value, result_data)
- else:
- _get_ui_metadata_data(metadata, config_value, result_data)
- elif isinstance(config_value, list):
- option_list = []
- for item in config_value:
- if isinstance(item, dict):
- option_list.append(item)
- data_dict[key] = option_list
- else:
- if isinstance(metadata['_self'][item], bool):
- data_dict[item] = str(metadata['_self'][item]).lower()
- else:
- data_dict[item] = metadata['_self'][item]
- else:
- data_dict[key] = config_value
- if data_dict:
- result_data.append(data_dict)
- return result_data
-
-
-@util.deprecated
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_METADATAS
-)
-@utils.wrap_to_dict(RESP_METADATA_FIELDS)
-def get_package_os_metadata(
- adapter_id, os_id,
- user=None, session=None, **kwargs
-):
- """Get metadata by adapter and os."""
- adapter = adapter_holder_api.get_adapter(
- adapter_id, user=user, session=session
- )
- os_ids = [os['id'] for os in adapter['supported_oses']]
- if os_id not in os_ids:
- raise exception.InvalidParameter(
- 'os %s is not in the supported os list of adapter %s' % (
- os_id, adapter_id
- )
- )
- metadatas = {}
- metadatas['os_config'] = _get_os_metadata(
- os_id
- )
- metadatas['package_config'] = _get_package_metadata(
- adapter_id
- )
- return metadatas
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_METADATAS
-)
-@utils.wrap_to_dict(RESP_METADATA_FIELDS)
-def get_flavor_os_metadata(
- flavor_id, os_id,
- user=None, session=None, **kwargs
-):
- """Get metadata by flavor and os."""
- flavor = adapter_holder_api.get_flavor(
- flavor_id, user=user, session=session
- )
- adapter_id = flavor['adapter_id']
- adapter = adapter_holder_api.get_adapter(
- adapter_id, user=user, session=session
- )
- os_ids = [os['id'] for os in adapter['supported_oses']]
- if os_id not in os_ids:
- raise exception.InvalidParameter(
- 'os %s is not in the supported os list of adapter %s' % (
- os_id, adapter_id
- )
- )
- metadatas = {}
- metadatas['os_config'] = _get_os_metadata(
- session, os_id
- )
- metadatas['package_config'] = _get_flavor_metadata(
- session, flavor_id
- )
- return metadatas
-
-
-def _validate_self(
- config_path, config_key, config,
- metadata, whole_check,
- **kwargs
-):
- """validate config by metadata self section."""
- logging.debug('validate config self %s', config_path)
- if '_self' not in metadata:
- if isinstance(config, dict):
- _validate_config(
- config_path, config, metadata, whole_check, **kwargs
- )
- return
- field_type = metadata['_self'].get('field_type', basestring)
- if not isinstance(config, field_type):
- raise exception.InvalidParameter(
- '%s config type is not %s: %s' % (config_path, field_type, config)
- )
- is_required = metadata['_self'].get(
- 'is_required', False
- )
- required_in_whole_config = metadata['_self'].get(
- 'required_in_whole_config', False
- )
- if isinstance(config, basestring):
- if config == '' and not is_required and not required_in_whole_config:
- # ignore empty config when it is optional
- return
- required_in_options = metadata['_self'].get(
- 'required_in_options', False
- )
- options = metadata['_self'].get('options', None)
- if required_in_options:
- if field_type in [int, basestring, float, bool]:
- if options and config not in options:
- raise exception.InvalidParameter(
- '%s config is not in %s: %s' % (
- config_path, options, config
- )
- )
- elif field_type in [list, tuple]:
- if options and not set(config).issubset(set(options)):
- raise exception.InvalidParameter(
- '%s config is not in %s: %s' % (
- config_path, options, config
- )
- )
- elif field_type == dict:
- if options and not set(config.keys()).issubset(set(options)):
- raise exception.InvalidParameter(
- '%s config is not in %s: %s' % (
- config_path, options, config
- )
- )
- validator = metadata['_self'].get('validator', None)
- logging.debug('validate by validator %s', validator)
- if validator:
- if not validator(config_key, config, **kwargs):
- raise exception.InvalidParameter(
- '%s config is invalid' % config_path
- )
- if isinstance(config, dict):
- _validate_config(
- config_path, config, metadata, whole_check, **kwargs
- )
-
-
-def _validate_config(
- config_path, config, metadata, whole_check,
- **kwargs
-):
- """validate config by metadata."""
- logging.debug('validate config %s', config_path)
- generals = {}
- specified = {}
- for key, value in metadata.items():
- if key.startswith('$'):
- generals[key] = value
- elif key.startswith('_'):
- pass
- else:
- specified[key] = value
- config_keys = set(config.keys())
- specified_keys = set(specified.keys())
- intersect_keys = config_keys & specified_keys
- not_found_keys = config_keys - specified_keys
- redundant_keys = specified_keys - config_keys
- for key in redundant_keys:
- if '_self' not in specified[key]:
- continue
- if specified[key]['_self'].get('is_required', False):
- raise exception.InvalidParameter(
- '%s/%s does not find but it is required' % (
- config_path, key
- )
- )
- if (
- whole_check and
- specified[key]['_self'].get(
- 'required_in_whole_config', False
- )
- ):
- raise exception.InvalidParameter(
- '%s/%s does not find but it is required in whole config' % (
- config_path, key
- )
- )
- for key in intersect_keys:
- _validate_self(
- '%s/%s' % (config_path, key),
- key, config[key], specified[key], whole_check,
- **kwargs
- )
- for key in not_found_keys:
- if not generals:
- raise exception.InvalidParameter(
- 'key %s missing in metadata %s' % (
- key, config_path
- )
- )
- for general_key, general_value in generals.items():
- _validate_self(
- '%s/%s' % (config_path, key),
- key, config[key], general_value, whole_check,
- **kwargs
- )
-
-
-def _autofill_self_config(
- config_path, config_key, config,
- metadata,
- **kwargs
-):
- """Autofill config by metadata self section."""
- if '_self' not in metadata:
- if isinstance(config, dict):
- _autofill_config(
- config_path, config, metadata, **kwargs
- )
- return config
- logging.debug(
- 'autofill %s by metadata %s', config_path, metadata['_self']
- )
- autofill_callback = metadata['_self'].get(
- 'autofill_callback', None
- )
- autofill_callback_params = metadata['_self'].get(
- 'autofill_callback_params', {}
- )
- callback_params = dict(kwargs)
- if autofill_callback_params:
- callback_params.update(autofill_callback_params)
- default_value = metadata['_self'].get(
- 'default_value', None
- )
- if default_value is not None:
- callback_params['default_value'] = default_value
- options = metadata['_self'].get(
- 'options', None
- )
- if options is not None:
- callback_params['options'] = options
- if autofill_callback:
- config = autofill_callback(
- config_key, config, **callback_params
- )
- if config is None:
- new_config = {}
- else:
- new_config = config
- if isinstance(new_config, dict):
- _autofill_config(
- config_path, new_config, metadata, **kwargs
- )
- if new_config:
- config = new_config
- return config
-
-
-def _autofill_config(
- config_path, config, metadata, **kwargs
-):
- """autofill config by metadata."""
- generals = {}
- specified = {}
- for key, value in metadata.items():
- if key.startswith('$'):
- generals[key] = value
- elif key.startswith('_'):
- pass
- else:
- specified[key] = value
- config_keys = set(config.keys())
- specified_keys = set(specified.keys())
- intersect_keys = config_keys & specified_keys
- not_found_keys = config_keys - specified_keys
- redundant_keys = specified_keys - config_keys
- for key in redundant_keys:
- self_config = _autofill_self_config(
- '%s/%s' % (config_path, key),
- key, None, specified[key], **kwargs
- )
- if self_config is not None:
- config[key] = self_config
- for key in intersect_keys:
- config[key] = _autofill_self_config(
- '%s/%s' % (config_path, key),
- key, config[key], specified[key],
- **kwargs
- )
- for key in not_found_keys:
- for general_key, general_value in generals.items():
- config[key] = _autofill_self_config(
- '%s/%s' % (config_path, key),
- key, config[key], general_value,
- **kwargs
- )
- return config
-
-
-def autofill_os_config(
- config, os_id, **kwargs
-):
- load_metadatas()
- if os_id not in OS_METADATA_MAPPING:
- raise exception.InvalidParameter(
- 'os %s is not found in os metadata mapping' % os_id
- )
-
- return _autofill_config(
- '', config, OS_METADATA_MAPPING[os_id], **kwargs
- )
-
-
-def autofill_package_config(
- config, adapter_id, **kwargs
-):
- load_metadatas()
- if adapter_id not in PACKAGE_METADATA_MAPPING:
- raise exception.InvalidParameter(
- 'adapter %s is not found in package metadata mapping' % adapter_id
- )
-
- return _autofill_config(
- '', config, PACKAGE_METADATA_MAPPING[adapter_id], **kwargs
- )
-
-
-def autofill_flavor_config(
- config, flavor_id, **kwargs
-):
- load_metadatas()
- if not flavor_id:
- logging.info('There is no flavor, skipping...')
- elif flavor_id not in FLAVOR_METADATA_MAPPING:
- raise exception.InvalidParameter(
- 'flavor %s is not found in flavor metadata mapping' % flavor_id
- )
- else:
- return _autofill_config(
- '', config, FLAVOR_METADATA_MAPPING[flavor_id], **kwargs
- )
diff --git a/compass-tasks/db/api/network.py b/compass-tasks/db/api/network.py
deleted file mode 100644
index e2bf7d3..0000000
--- a/compass-tasks/db/api/network.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Network related database operations."""
-import logging
-import netaddr
-import re
-
-from compass.db.api import database
-from compass.db.api import permission
-from compass.db.api import user as user_api
-from compass.db.api import utils
-from compass.db import exception
-from compass.db import models
-
-
-SUPPORTED_FIELDS = ['subnet', 'name']
-RESP_FIELDS = [
- 'id', 'name', 'subnet', 'created_at', 'updated_at'
-]
-ADDED_FIELDS = ['subnet']
-OPTIONAL_ADDED_FIELDS = ['name']
-IGNORE_FIELDS = [
- 'id', 'created_at', 'updated_at'
-]
-UPDATED_FIELDS = ['subnet', 'name']
-
-
-def _check_subnet(subnet):
- """Check subnet format is correct."""
- try:
- netaddr.IPNetwork(subnet)
- except Exception as error:
- logging.exception(error)
- raise exception.InvalidParameter(
- 'subnet %s format unrecognized' % subnet)
-
-
-@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_SUBNETS
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def list_subnets(user=None, session=None, **filters):
- """List subnets."""
- return utils.list_db_objects(
- session, models.Subnet, **filters
- )
-
-
-def _get_subnet(subnet_id, session=None, **kwargs):
- """Get subnet by subnet id."""
- if isinstance(subnet_id, (int, long)):
- return utils.get_db_object(
- session, models.Subnet,
- id=subnet_id, **kwargs
- )
- raise exception.InvalidParameter(
- 'subnet id %s type is not int compatible' % subnet_id
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_SUBNETS
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def get_subnet(
- subnet_id, exception_when_missing=True,
- user=None, session=None, **kwargs
-):
- """Get subnet info."""
- return _get_subnet(
- subnet_id, session=session,
- exception_when_missing=exception_when_missing
- )
-
-
-@utils.supported_filters(
- ADDED_FIELDS, optional_support_keys=OPTIONAL_ADDED_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(subnet=_check_subnet)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_SUBNET
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def add_subnet(
- exception_when_existing=True, subnet=None,
- user=None, session=None, **kwargs
-):
- """Create a subnet."""
- return utils.add_db_object(
- session, models.Subnet,
- exception_when_existing, subnet, **kwargs
- )
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(subnet=_check_subnet)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_SUBNET
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def update_subnet(subnet_id, user=None, session=None, **kwargs):
- """Update a subnet."""
- subnet = _get_subnet(
- subnet_id, session=session
- )
- return utils.update_db_object(session, subnet, **kwargs)
-
-
-def _check_subnet_deletable(subnet):
- """Check a subnet deletable."""
- if subnet.host_networks:
- host_networks = [
- '%s:%s=%s' % (
- host_network.host.name, host_network.interface,
- host_network.ip
- )
- for host_network in subnet.host_networks
- ]
- raise exception.NotAcceptable(
- 'subnet %s contains host networks %s' % (
- subnet.subnet, host_networks
- )
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEL_SUBNET
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def del_subnet(subnet_id, user=None, session=None, **kwargs):
- """Delete a subnet."""
- subnet = _get_subnet(
- subnet_id, session=session
- )
- _check_subnet_deletable(subnet)
- return utils.del_db_object(session, subnet)
diff --git a/compass-tasks/db/api/permission.py b/compass-tasks/db/api/permission.py
deleted file mode 100644
index f4d777a..0000000
--- a/compass-tasks/db/api/permission.py
+++ /dev/null
@@ -1,357 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Permission database operations."""
-import re
-
-from compass.db.api import database
-from compass.db.api import user as user_api
-from compass.db.api import utils
-from compass.db import exception
-from compass.db import models
-from compass.utils import util
-
-
-SUPPORTED_FIELDS = ['id', 'name', 'alias', 'description']
-RESP_FIELDS = ['id', 'name', 'alias', 'description']
-
-
-class PermissionWrapper(object):
- def __init__(self, name, alias, description):
- self.name = name
- self.alias = alias
- self.description = description
-
- def to_dict(self):
- return {
- 'name': self.name,
- 'alias': self.alias,
- 'description': self.description
- }
-
-
-PERMISSION_LIST_PERMISSIONS = PermissionWrapper(
- 'list_permissions', 'list permissions', 'list all permissions'
-)
-PERMISSION_LIST_SWITCHES = PermissionWrapper(
- 'list_switches', 'list switches', 'list all switches'
-)
-PERMISSION_LIST_SWITCH_FILTERS = PermissionWrapper(
- 'list_switch_filters',
- 'list switch filters',
- 'list switch filters'
-)
-PERMISSION_ADD_SWITCH = PermissionWrapper(
- 'add_switch', 'add switch', 'add switch'
-)
-PERMISSION_UPDATE_SWITCH_FILTERS = PermissionWrapper(
- 'update_switch_filters',
- 'update switch filters',
- 'update switch filters'
-)
-PERMISSION_DEL_SWITCH = PermissionWrapper(
- 'delete_switch', 'delete switch', 'delete switch'
-)
-PERMISSION_LIST_SWITCH_MACHINES = PermissionWrapper(
- 'list_switch_machines', 'list switch machines', 'list switch machines'
-)
-PERMISSION_ADD_SWITCH_MACHINE = PermissionWrapper(
- 'add_switch_machine', 'add switch machine', 'add switch machine'
-)
-PERMISSION_DEL_SWITCH_MACHINE = PermissionWrapper(
- 'del_switch_machine', 'delete switch machine', 'del switch machine'
-)
-PERMISSION_UPDATE_SWITCH_MACHINES = PermissionWrapper(
- 'update_switch_machines',
- 'update switch machines',
- 'update switch machines'
-)
-PERMISSION_LIST_MACHINES = PermissionWrapper(
- 'list_machines', 'list machines', 'list machines'
-)
-PERMISSION_ADD_MACHINE = PermissionWrapper(
- 'add_machine', 'add machine', 'add machine'
-)
-PERMISSION_DEL_MACHINE = PermissionWrapper(
- 'delete_machine', 'delete machine', 'delete machine'
-)
-PERMISSION_LIST_ADAPTERS = PermissionWrapper(
- 'list_adapters', 'list adapters', 'list adapters'
-)
-PERMISSION_LIST_METADATAS = PermissionWrapper(
- 'list_metadatas', 'list metadatas', 'list metadatas'
-)
-PERMISSION_LIST_SUBNETS = PermissionWrapper(
- 'list_subnets', 'list subnets', 'list subnets'
-)
-PERMISSION_ADD_SUBNET = PermissionWrapper(
- 'add_subnet', 'add subnet', 'add subnet'
-)
-PERMISSION_DEL_SUBNET = PermissionWrapper(
- 'del_subnet', 'del subnet', 'del subnet'
-)
-PERMISSION_LIST_CLUSTERS = PermissionWrapper(
- 'list_clusters', 'list clusters', 'list clusters'
-)
-PERMISSION_ADD_CLUSTER = PermissionWrapper(
- 'add_cluster', 'add cluster', 'add cluster'
-)
-PERMISSION_DEL_CLUSTER = PermissionWrapper(
- 'del_cluster', 'del cluster', 'del cluster'
-)
-PERMISSION_LIST_CLUSTER_CONFIG = PermissionWrapper(
- 'list_cluster_config', 'list cluster config', 'list cluster config'
-)
-PERMISSION_ADD_CLUSTER_CONFIG = PermissionWrapper(
- 'add_cluster_config', 'add cluster config', 'add cluster config'
-)
-PERMISSION_DEL_CLUSTER_CONFIG = PermissionWrapper(
- 'del_cluster_config', 'del cluster config', 'del cluster config'
-)
-PERMISSION_UPDATE_CLUSTER_HOSTS = PermissionWrapper(
- 'update_cluster_hosts',
- 'update cluster hosts',
- 'update cluster hosts'
-)
-PERMISSION_DEL_CLUSTER_HOST = PermissionWrapper(
- 'del_clusterhost', 'delete clusterhost', 'delete clusterhost'
-)
-PERMISSION_REVIEW_CLUSTER = PermissionWrapper(
- 'review_cluster', 'review cluster', 'review cluster'
-)
-PERMISSION_DEPLOY_CLUSTER = PermissionWrapper(
- 'deploy_cluster', 'deploy cluster', 'deploy cluster'
-)
-PERMISSION_DEPLOY_HOST = PermissionWrapper(
- 'deploy_host', 'deploy host', 'deploy host'
-)
-PERMISSION_GET_CLUSTER_STATE = PermissionWrapper(
- 'get_cluster_state', 'get cluster state', 'get cluster state'
-)
-PERMISSION_UPDATE_CLUSTER_STATE = PermissionWrapper(
- 'update_cluster_state', 'update cluster state',
- 'update cluster state'
-)
-PERMISSION_LIST_HOSTS = PermissionWrapper(
- 'list_hosts', 'list hosts', 'list hosts'
-)
-PERMISSION_LIST_HOST_CLUSTERS = PermissionWrapper(
- 'list_host_clusters',
- 'list host clusters',
- 'list host clusters'
-)
-PERMISSION_UPDATE_HOST = PermissionWrapper(
- 'update_host', 'update host', 'update host'
-)
-PERMISSION_DEL_HOST = PermissionWrapper(
- 'del_host', 'del host', 'del host'
-)
-PERMISSION_LIST_HOST_CONFIG = PermissionWrapper(
- 'list_host_config', 'list host config', 'list host config'
-)
-PERMISSION_ADD_HOST_CONFIG = PermissionWrapper(
- 'add_host_config', 'add host config', 'add host config'
-)
-PERMISSION_DEL_HOST_CONFIG = PermissionWrapper(
- 'del_host_config', 'del host config', 'del host config'
-)
-PERMISSION_LIST_HOST_NETWORKS = PermissionWrapper(
- 'list_host_networks',
- 'list host networks',
- 'list host networks'
-)
-PERMISSION_ADD_HOST_NETWORK = PermissionWrapper(
- 'add_host_network', 'add host network', 'add host network'
-)
-PERMISSION_DEL_HOST_NETWORK = PermissionWrapper(
- 'del_host_network', 'del host network', 'del host network'
-)
-PERMISSION_GET_HOST_STATE = PermissionWrapper(
- 'get_host_state', 'get host state', 'get host state'
-)
-PERMISSION_UPDATE_HOST_STATE = PermissionWrapper(
- 'update_host_state', 'update host sate', 'update host state'
-)
-PERMISSION_LIST_CLUSTERHOSTS = PermissionWrapper(
- 'list_clusterhosts', 'list cluster hosts', 'list cluster hosts'
-)
-PERMISSION_LIST_CLUSTERHOST_CONFIG = PermissionWrapper(
- 'list_clusterhost_config',
- 'list clusterhost config',
- 'list clusterhost config'
-)
-PERMISSION_ADD_CLUSTERHOST_CONFIG = PermissionWrapper(
- 'add_clusterhost_config',
- 'add clusterhost config',
- 'add clusterhost config'
-)
-PERMISSION_DEL_CLUSTERHOST_CONFIG = PermissionWrapper(
- 'del_clusterhost_config',
- 'del clusterhost config',
- 'del clusterhost config'
-)
-PERMISSION_GET_CLUSTERHOST_STATE = PermissionWrapper(
- 'get_clusterhost_state',
- 'get clusterhost state',
- 'get clusterhost state'
-)
-PERMISSION_UPDATE_CLUSTERHOST_STATE = PermissionWrapper(
- 'update_clusterhost_state',
- 'update clusterhost state',
- 'update clusterhost state'
-)
-PERMISSION_LIST_HEALTH_REPORT = PermissionWrapper(
- 'list_health_reports',
- 'list health check report',
- 'list health check report'
-)
-PERMISSION_GET_HEALTH_REPORT = PermissionWrapper(
- 'get_health_report',
- 'get health report',
- 'get health report'
-)
-PERMISSION_CHECK_CLUSTER_HEALTH = PermissionWrapper(
- 'start_check_cluster_health',
- 'start check cluster health',
- 'start check cluster health'
-)
-PERMISSION_SET_HEALTH_CHECK_ERROR = PermissionWrapper(
- 'set_error_state',
- 'set health check into error state',
- 'set health check into error state'
-)
-PERMISSION_DELETE_REPORT = PermissionWrapper(
- 'delete_reports',
- 'delete health reports',
- 'delete health reports'
-)
-PERMISSIONS = [
- PERMISSION_LIST_PERMISSIONS,
- PERMISSION_LIST_SWITCHES,
- PERMISSION_ADD_SWITCH,
- PERMISSION_DEL_SWITCH,
- PERMISSION_LIST_SWITCH_FILTERS,
- PERMISSION_UPDATE_SWITCH_FILTERS,
- PERMISSION_LIST_SWITCH_MACHINES,
- PERMISSION_ADD_SWITCH_MACHINE,
- PERMISSION_DEL_SWITCH_MACHINE,
- PERMISSION_UPDATE_SWITCH_MACHINES,
- PERMISSION_LIST_MACHINES,
- PERMISSION_ADD_MACHINE,
- PERMISSION_DEL_MACHINE,
- PERMISSION_LIST_ADAPTERS,
- PERMISSION_LIST_METADATAS,
- PERMISSION_LIST_SUBNETS,
- PERMISSION_ADD_SUBNET,
- PERMISSION_DEL_SUBNET,
- PERMISSION_LIST_CLUSTERS,
- PERMISSION_ADD_CLUSTER,
- PERMISSION_DEL_CLUSTER,
- PERMISSION_LIST_CLUSTER_CONFIG,
- PERMISSION_ADD_CLUSTER_CONFIG,
- PERMISSION_DEL_CLUSTER_CONFIG,
- PERMISSION_UPDATE_CLUSTER_HOSTS,
- PERMISSION_DEL_CLUSTER_HOST,
- PERMISSION_REVIEW_CLUSTER,
- PERMISSION_DEPLOY_CLUSTER,
- PERMISSION_GET_CLUSTER_STATE,
- PERMISSION_UPDATE_CLUSTER_STATE,
- PERMISSION_LIST_HOSTS,
- PERMISSION_LIST_HOST_CLUSTERS,
- PERMISSION_UPDATE_HOST,
- PERMISSION_DEL_HOST,
- PERMISSION_LIST_HOST_CONFIG,
- PERMISSION_ADD_HOST_CONFIG,
- PERMISSION_DEL_HOST_CONFIG,
- PERMISSION_LIST_HOST_NETWORKS,
- PERMISSION_ADD_HOST_NETWORK,
- PERMISSION_DEL_HOST_NETWORK,
- PERMISSION_GET_HOST_STATE,
- PERMISSION_UPDATE_HOST_STATE,
- PERMISSION_DEPLOY_HOST,
- PERMISSION_LIST_CLUSTERHOSTS,
- PERMISSION_LIST_CLUSTERHOST_CONFIG,
- PERMISSION_ADD_CLUSTERHOST_CONFIG,
- PERMISSION_DEL_CLUSTERHOST_CONFIG,
- PERMISSION_GET_CLUSTERHOST_STATE,
- PERMISSION_UPDATE_CLUSTERHOST_STATE,
- PERMISSION_LIST_HEALTH_REPORT,
- PERMISSION_GET_HEALTH_REPORT,
- PERMISSION_CHECK_CLUSTER_HEALTH,
- PERMISSION_SET_HEALTH_CHECK_ERROR,
- PERMISSION_DELETE_REPORT
-]
-
-
-@util.deprecated
-def list_permissions_internal(session, **filters):
- """internal functions used only by other db.api modules."""
- return utils.list_db_objects(session, models.Permission, **filters)
-
-
-@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
-@database.run_in_session()
-@user_api.check_user_permission(PERMISSION_LIST_PERMISSIONS)
-@utils.wrap_to_dict(RESP_FIELDS)
-def list_permissions(user=None, session=None, **filters):
- """list permissions."""
- return utils.list_db_objects(
- session, models.Permission, **filters
- )
-
-
-def _get_permission(permission_id, session=None, **kwargs):
- """Get permission object by the unique key of Permission table."""
- if isinstance(permission_id, (int, long)):
- return utils.get_db_object(
- session, models.Permission, id=permission_id, **kwargs)
- raise exception.InvalidParameter(
- 'permission id %s type is not int compatible' % permission_id
- )
-
-
-def get_permission_internal(permission_id, session=None, **kwargs):
- return _get_permission(permission_id, session=session, **kwargs)
-
-
-@utils.supported_filters()
-@database.run_in_session()
-@user_api.check_user_permission(PERMISSION_LIST_PERMISSIONS)
-@utils.wrap_to_dict(RESP_FIELDS)
-def get_permission(
- permission_id, exception_when_missing=True,
- user=None, session=None, **kwargs
-):
- """get permissions."""
- return _get_permission(
- permission_id, session=session,
- exception_when_missing=exception_when_missing
- )
-
-
-def add_permissions_internal(session=None):
- """internal functions used by other db.api modules only."""
- permissions = []
- for permission in PERMISSIONS:
- permissions.append(
- utils.add_db_object(
- session, models.Permission,
- True,
- permission.name,
- alias=permission.alias,
- description=permission.description
- )
- )
-
- return permissions
diff --git a/compass-tasks/db/api/switch.py b/compass-tasks/db/api/switch.py
deleted file mode 100644
index 647eec0..0000000
--- a/compass-tasks/db/api/switch.py
+++ /dev/null
@@ -1,1213 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Switch database operations."""
-import logging
-import netaddr
-import re
-
-from compass.db.api import database
-from compass.db.api import permission
-from compass.db.api import user as user_api
-from compass.db.api import utils
-from compass.db import exception
-from compass.db import models
-from compass.utils import setting_wrapper as setting
-from compass.utils import util
-
-
-SUPPORTED_FIELDS = ['ip_int', 'vendor', 'state']
-SUPPORTED_FILTER_FIELDS = ['ip_int', 'vendor', 'state']
-SUPPORTED_SWITCH_MACHINES_FIELDS = [
- 'switch_ip_int', 'port', 'vlans', 'mac', 'tag', 'location',
- 'owner_id'
-]
-SUPPORTED_MACHINES_FIELDS = [
- 'port', 'vlans', 'mac', 'tag', 'location', 'owner_id'
-]
-SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS = [
- 'switch_ip_int', 'port', 'vlans', 'mac',
- 'tag', 'location', 'os_name'
-]
-SUPPORTED_MACHINES_HOSTS_FIELDS = [
- 'port', 'vlans', 'mac', 'tag', 'location',
- 'os_name'
-]
-IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
-ADDED_FIELDS = ['ip']
-OPTIONAL_ADDED_FIELDS = [
- 'credentials', 'vendor', 'state', 'err_msg', 'machine_filters'
-]
-UPDATED_FIELDS = [
- 'ip', 'credentials', 'vendor', 'state',
- 'err_msg', 'put_machine_filters'
-]
-PATCHED_FIELDS = ['patched_credentials', 'patched_machine_filters']
-UPDATED_FILTERS_FIELDS = ['put_machine_filters']
-PATCHED_FILTERS_FIELDS = ['patched_machine_filters']
-ADDED_MACHINES_FIELDS = ['mac']
-OPTIONAL_ADDED_MACHINES_FIELDS = [
- 'ipmi_credentials', 'tag', 'location', 'owner_id'
-]
-ADDED_SWITCH_MACHINES_FIELDS = ['port']
-OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS = ['vlans']
-UPDATED_MACHINES_FIELDS = [
- 'ipmi_credentials',
- 'tag', 'location'
-]
-UPDATED_SWITCH_MACHINES_FIELDS = ['port', 'vlans', 'owner_id']
-PATCHED_MACHINES_FIELDS = [
- 'patched_ipmi_credentials',
- 'patched_tag', 'patched_location'
-]
-PATCHED_SWITCH_MACHINES_FIELDS = ['patched_vlans']
-RESP_FIELDS = [
- 'id', 'ip', 'credentials', 'vendor', 'state', 'err_msg',
- 'filters', 'created_at', 'updated_at'
-]
-RESP_FILTERS_FIELDS = [
- 'id', 'ip', 'filters', 'created_at', 'updated_at'
-]
-RESP_ACTION_FIELDS = [
- 'status', 'details'
-]
-RESP_MACHINES_FIELDS = [
- 'id', 'switch_id', 'switch_ip', 'machine_id', 'switch_machine_id',
- 'port', 'vlans', 'mac', 'owner_id',
- 'ipmi_credentials', 'tag', 'location',
- 'created_at', 'updated_at'
-]
-RESP_MACHINES_HOSTS_FIELDS = [
- 'id', 'switch_id', 'switch_ip', 'machine_id', 'switch_machine_id',
- 'port', 'vlans', 'mac',
- 'ipmi_credentials', 'tag', 'location', 'ip',
- 'name', 'hostname', 'os_name', 'owner',
- 'os_installer', 'reinstall_os', 'os_installed',
- 'clusters', 'created_at', 'updated_at'
-]
-RESP_CLUSTER_FIELDS = [
- 'name', 'id'
-]
-
-
-def _check_machine_filters(machine_filters):
- """Check if machine filters format is acceptable."""
- logging.debug('check machine filters: %s', machine_filters)
- models.Switch.parse_filters(machine_filters)
-
-
-def _check_vlans(vlans):
- """Check vlans format is acceptable."""
- for vlan in vlans:
- if not isinstance(vlan, int):
- raise exception.InvalidParameter(
- 'vlan %s is not int' % vlan
- )
-
-
-@utils.supported_filters(
- ADDED_FIELDS,
- optional_support_keys=OPTIONAL_ADDED_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(
- ip=utils.check_ip,
- credentials=utils.check_switch_credentials,
- machine_filters=_check_machine_filters
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def _add_switch(
- ip, exception_when_existing=True,
- machine_filters=setting.SWITCHES_DEFAULT_FILTERS,
- session=None, **kwargs
-):
- """Add switch by switch ip."""
- ip_int = long(netaddr.IPAddress(ip))
- return utils.add_db_object(
- session, models.Switch, exception_when_existing, ip_int,
- machine_filters=machine_filters, **kwargs
- )
-
-
-def get_switch_internal(
- switch_id, session=None, **kwargs
-):
- """Get switch by switch id.
-
- Should only be used by other files under db/api
- """
- return _get_switch(switch_id, session=session, **kwargs)
-
-
-def _get_switch(switch_id, session=None, **kwargs):
- """Get Switch object switch id."""
- if isinstance(switch_id, (int, long)):
- return utils.get_db_object(
- session, models.Switch,
- id=switch_id, **kwargs
- )
- raise exception.InvalidParameter(
- 'switch id %s type is not int compatible' % switch_id)
-
-
-def _get_switch_by_ip(switch_ip, session=None, **kwargs):
- """Get switch by switch ip."""
- switch_ip_int = long(netaddr.IPAddress(switch_ip))
- return utils.get_db_object(
- session, models.Switch,
- ip_int=switch_ip_int, **kwargs
- )
-
-
-def _get_switch_machine(switch_id, machine_id, session=None, **kwargs):
- """Get switch machine by switch id and machine id."""
- switch = _get_switch(switch_id, session=session)
- from compass.db.api import machine as machine_api
- machine = machine_api.get_machine_internal(machine_id, session=session)
- return utils.get_db_object(
- session, models.SwitchMachine,
- switch_id=switch.id, machine_id=machine.id, **kwargs
- )
-
-
-def _get_switchmachine(switch_machine_id, session=None, **kwargs):
- """Get switch machine by switch_machine_id."""
- if not isinstance(switch_machine_id, (int, long)):
- raise exception.InvalidParameter(
- 'switch machine id %s type is not int compatible' % (
- switch_machine_id
- )
- )
- return utils.get_db_object(
- session, models.SwitchMachine,
- switch_machine_id=switch_machine_id, **kwargs
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_SWITCHES
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def get_switch(
- switch_id, exception_when_missing=True,
- user=None, session=None, **kwargs
-):
- """get a switch by switch id."""
- return _get_switch(
- switch_id, session=session,
- exception_when_missing=exception_when_missing
- )
-
-
-@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_SWITCHES
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def list_switches(user=None, session=None, **filters):
- """List switches."""
- # TODO(xicheng): should discuss with weidong.
- # If we can deprecate the use of DEFAULT_SWITCH_IP,
- # The code will be simpler.
- # The UI should use /machines-hosts instead of
- # /switches-machines-hosts and can show multi switch ip/port
- # under one row of machine info.
- switches = utils.list_db_objects(
- session, models.Switch, **filters
- )
- if 'ip_int' in filters:
- return switches
- else:
- return [
- switch for switch in switches
- if switch.ip != setting.DEFAULT_SWITCH_IP
- ]
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEL_SWITCH
-)
-@utils.wrap_to_dict(RESP_FIELDS)
-def del_switch(switch_id, user=None, session=None, **kwargs):
- """Delete a switch.
-
- If switch is not the default switch, and the machine under this switch
- is only connected to this switch, the machine will be moved to connect
- to default switch. Otherwise we can only simply delete the switch
- machine. The purpose here to make sure there is no machine not
- connecting to any switch.
- """
- # TODO(xicheng): Simplify the logic if the default switch feature
- # can be deprecated.
- switch = _get_switch(switch_id, session=session)
- default_switch = _get_switch_by_ip(
- setting.DEFAULT_SWITCH_IP, session=session
- )
- if switch.id != default_switch.id:
- for switch_machine in switch.switch_machines:
- machine = switch_machine.machine
- if len(machine.switch_machines) <= 1:
- utils.add_db_object(
- session, models.SwitchMachine,
- False,
- default_switch.id, machine.id,
- port=switch_machine.port
- )
- return utils.del_db_object(session, switch)
-
-
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_SWITCH
-)
-def add_switch(
- exception_when_existing=True, ip=None,
- user=None, session=None, **kwargs
-):
- """Create a switch."""
- return _add_switch(
- ip,
- exception_when_existing=exception_when_existing,
- session=session, **kwargs
- )
-
-
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_SWITCH
-)
-def add_switches(
- exception_when_existing=False,
- data=[], user=None, session=None
-):
- """Create switches."""
- # TODO(xicheng): simplify the batch api.
- switches = []
- fail_switches = []
- for switch_data in data:
- switch_object = _get_switch_by_ip(
- switch_data['ip'], session=session,
- exception_when_missing=False
- )
- if switch_object:
- logging.error('ip %s exists in switch %s' % (
- switch_data['ip'], switch_object.id
- ))
- fail_switches.append(switch_data)
- else:
- switches.append(
- _add_switch(
- exception_when_existing=exception_when_existing,
- session=session,
- **switch_data
- )
- )
- return {
- 'switches': switches,
- 'fail_switches': fail_switches
- }
-
-
-@utils.wrap_to_dict(RESP_FIELDS)
-def _update_switch(switch_id, session=None, **kwargs):
- """Update a switch."""
- switch = _get_switch(switch_id, session=session)
- return utils.update_db_object(session, switch, **kwargs)
-
-
-# replace machine_filters in kwargs to put_machine_filters,
-# which is used to tell db this is a put action for the field.
-@utils.replace_filters(
- machine_filters='put_machine_filters'
-)
-@utils.supported_filters(
- optional_support_keys=UPDATED_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(
- credentials=utils.check_switch_credentials,
- put_machine_filters=_check_machine_filters
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_SWITCH
-)
-def update_switch(switch_id, user=None, session=None, **kwargs):
- """Update fields of a switch."""
- return _update_switch(switch_id, session=session, **kwargs)
-
-
-# replace credentials to patched_credentials,
-# machine_filters to patched_machine_filters in kwargs.
-# This is to tell db they are patch action to the above fields.
-@utils.replace_filters(
- credentials='patched_credentials',
- machine_filters='patched_machine_filters'
-)
-@utils.supported_filters(
- optional_support_keys=PATCHED_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(
- patched_machine_filters=_check_machine_filters
-)
-@database.run_in_session()
-@utils.output_validates(
- credentials=utils.check_switch_credentials
-)
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_SWITCH
-)
-def patch_switch(switch_id, user=None, session=None, **kwargs):
- """Patch fields of a switch."""
- return _update_switch(switch_id, session=session, **kwargs)
-
-
-@util.deprecated
-@utils.supported_filters(optional_support_keys=SUPPORTED_FILTER_FIELDS)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_SWITCH_FILTERS
-)
-@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
-def list_switch_filters(user=None, session=None, **filters):
- """List all switches' filters."""
- return utils.list_db_objects(
- session, models.Switch, **filters
- )
-
-
-@util.deprecated
-@utils.supported_filters()
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_SWITCH_FILTERS
-)
-@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
-def get_switch_filters(
- switch_id, exception_when_missing=True,
- user=None, session=None, **kwargs
-):
- """get filters of a switch."""
- return _get_switch(
- switch_id, session=session,
- exception_when_missing=exception_when_missing
- )
-
-
-@util.deprecated
-@utils.replace_filters(
- machine_filters='put_machine_filters'
-)
-@utils.supported_filters(
- optional_support_keys=UPDATED_FILTERS_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(put_machine_filters=_check_machine_filters)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_SWITCH_FILTERS
-)
-@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
-def update_switch_filters(switch_id, user=None, session=None, **kwargs):
- """Update filters of a switch."""
- switch = _get_switch(switch_id, session=session)
- return utils.update_db_object(session, switch, **kwargs)
-
-
-@util.deprecated
-@utils.replace_filters(
- machine_filters='patched_machine_filters'
-)
-@utils.supported_filters(
- optional_support_keys=PATCHED_FILTERS_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(patched_machine_filters=_check_machine_filters)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_SWITCH_FILTERS
-)
-@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
-def patch_switch_filter(switch_id, user=None, session=None, **kwargs):
- """Patch filters to a switch."""
- switch = _get_switch(switch_id, session=session)
- return utils.update_db_object(session, switch, **kwargs)
-
-
-@util.deprecated
-def get_switch_machines_internal(session, **filters):
- return utils.list_db_objects(
- session, models.SwitchMachine, **filters
- )
-
-
-def _filter_port(port_filter, obj):
- """filter switch machines by port.
-
- supported port_filter keys: [
- 'startswith', 'endswith', 'resp_lt',
- 'resp_le', 'resp_gt', 'resp_ge', 'resp_range'
- ]
-
- port_filter examples:
- {
- 'startswitch': 'ae', 'endswith': '',
- 'resp_ge': 20, 'resp_le': 30,
- }
- """
- port_prefix = port_filter.get('startswith', '')
- port_suffix = port_filter.get('endswith', '')
- pattern = re.compile(r'%s(\d+)%s' % (port_prefix, port_suffix))
- match = pattern.match(obj)
- if not match:
- return False
- port_number = int(match.group(1))
- if (
- 'resp_lt' in port_filter and
- port_number >= port_filter['resp_lt']
- ):
- return False
- if (
- 'resp_le' in port_filter and
- port_number > port_filter['resp_le']
- ):
- return False
- if (
- 'resp_gt' in port_filter and
- port_number <= port_filter['resp_gt']
- ):
- return False
- if (
- 'resp_ge' in port_filter and
- port_number < port_filter['resp_ge']
- ):
- return False
- if 'resp_range' in port_filter:
- resp_range = port_filter['resp_range']
- if not isinstance(resp_range, list):
- resp_range = [resp_range]
- in_range = False
- for port_start, port_end in resp_range:
- if port_start <= port_number <= port_end:
- in_range = True
- break
- if not in_range:
- return False
- return True
-
-
-def _filter_vlans(vlan_filter, obj):
- """Filter switch machines by vlan.
-
- supported keys in vlan_filter:
- ['resp_in']
- """
- vlans = set(obj)
- if 'resp_in' in vlan_filter:
- resp_vlans = set(vlan_filter['resp_in'])
- if not (vlans & resp_vlans):
- return False
- return True
-
-
-@utils.output_filters(
- port=_filter_port, vlans=_filter_vlans,
- tag=utils.general_filter_callback,
- location=utils.general_filter_callback
-)
-@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
-def _filter_switch_machines(switch_machines):
- """Get filtered switch machines.
-
- The filters are defined in each switch.
- """
- return [
- switch_machine for switch_machine in switch_machines
- if not switch_machine.filtered
- ]
-
-
-@utils.output_filters(
- missing_ok=True,
- port=_filter_port, vlans=_filter_vlans,
- tag=utils.general_filter_callback,
- location=utils.general_filter_callback,
- os_name=utils.general_filter_callback,
-)
-@utils.wrap_to_dict(
- RESP_MACHINES_HOSTS_FIELDS,
- clusters=RESP_CLUSTER_FIELDS
-)
-def _filter_switch_machines_hosts(switch_machines):
- """Similar as _filter_switch_machines, but also return host info."""
- filtered_switch_machines = [
- switch_machine for switch_machine in switch_machines
- if not switch_machine.filtered
- ]
- switch_machines_hosts = []
- for switch_machine in filtered_switch_machines:
- machine = switch_machine.machine
- host = machine.host
- if host:
- switch_machine_host_dict = host.to_dict()
- else:
- switch_machine_host_dict = machine.to_dict()
- switch_machine_host_dict.update(
- switch_machine.to_dict()
- )
- switch_machines_hosts.append(switch_machine_host_dict)
- return switch_machines_hosts
-
-
-@utils.supported_filters(
- optional_support_keys=SUPPORTED_MACHINES_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_SWITCH_MACHINES
-)
-def list_switch_machines(
- switch_id, user=None, session=None, **filters
-):
- """Get switch machines of a switch."""
- switch = _get_switch(switch_id, session=session)
- switch_machines = utils.list_db_objects(
- session, models.SwitchMachine, switch_id=switch.id, **filters
- )
- if not user.is_admin and len(switch_machines):
- switch_machines = [m for m in switch_machines if m.machine.owner_id == user.id]
- return _filter_switch_machines(switch_machines)
-
-
-# replace ip_int to switch_ip_int in kwargs
-@utils.replace_filters(
- ip_int='switch_ip_int'
-)
-@utils.supported_filters(
- optional_support_keys=SUPPORTED_SWITCH_MACHINES_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_SWITCH_MACHINES
-)
-def list_switchmachines(user=None, session=None, **filters):
- """List switch machines."""
- switch_machines = utils.list_db_objects(
- session, models.SwitchMachine, **filters
- )
- return _filter_switch_machines(
- switch_machines
- )
-
-
-@utils.supported_filters(
- optional_support_keys=SUPPORTED_MACHINES_HOSTS_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_SWITCH_MACHINES
-)
-def list_switch_machines_hosts(
- switch_id, user=None, session=None, **filters
-):
- """Get switch machines and possible hosts of a switch."""
- switch = _get_switch(switch_id, session=session)
- switch_machines = utils.list_db_objects(
- session, models.SwitchMachine, switch_id=switch.id, **filters
- )
- return _filter_switch_machines_hosts(
- switch_machines
- )
-
-
-# replace ip_int to switch_ip_int in kwargs
-@utils.replace_filters(
- ip_int='switch_ip_int'
-)
-@utils.supported_filters(
- optional_support_keys=SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_SWITCH_MACHINES
-)
-def list_switchmachines_hosts(user=None, session=None, **filters):
- """List switch machines hnd possible hosts."""
- switch_machines = utils.list_db_objects(
- session, models.SwitchMachine, **filters
- )
- if not user.is_admin and len(switch_machines):
- switch_machines = [m for m in switch_machines if m.machine.owner_id == user.id]
- return _filter_switch_machines_hosts(
- switch_machines
- )
-
-
-@utils.supported_filters(
- ADDED_MACHINES_FIELDS,
- optional_support_keys=OPTIONAL_ADDED_MACHINES_FIELDS,
- ignore_support_keys=OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS
-)
-@utils.input_validates(mac=utils.check_mac)
-def _add_machine_if_not_exist(mac=None, session=None, **kwargs):
- """Add machine if the mac does not exist in any machine."""
- return utils.add_db_object(
- session, models.Machine, False,
- mac, **kwargs)
-
-
-@utils.supported_filters(
- ADDED_SWITCH_MACHINES_FIELDS,
- optional_support_keys=OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS,
- ignore_support_keys=OPTIONAL_ADDED_MACHINES_FIELDS
-)
-@utils.input_validates(vlans=_check_vlans)
-def _add_switch_machine_only(
- switch, machine, exception_when_existing=True,
- session=None, owner_id=None, port=None, **kwargs
-):
- """add a switch machine."""
- return utils.add_db_object(
- session, models.SwitchMachine,
- exception_when_existing,
- switch.id, machine.id, port=port,
- owner_id=owner_id,
- **kwargs
- )
-
-
-@utils.supported_filters(
- ADDED_MACHINES_FIELDS + ADDED_SWITCH_MACHINES_FIELDS,
- optional_support_keys=(
- OPTIONAL_ADDED_MACHINES_FIELDS +
- OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS
- ),
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
-def _add_switch_machine(
- switch_id, exception_when_existing=True,
- mac=None, port=None, session=None, owner_id=None, **kwargs
-):
- """Add switch machine.
-
- If underlying machine does not exist, also create the underlying
- machine.
- """
- switch = _get_switch(switch_id, session=session)
- machine = _add_machine_if_not_exist(
- mac=mac, session=session, owner_id=owner_id, **kwargs
- )
- return _add_switch_machine_only(
- switch, machine,
- exception_when_existing,
- port=port, session=session, **kwargs
- )
-
-
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_SWITCH_MACHINE
-)
-def add_switch_machine(
- switch_id, exception_when_existing=True,
- mac=None, user=None, session=None,
- owner_id=None, **kwargs
-):
- """Add switch machine to a switch."""
- return _add_switch_machine(
- switch_id,
- exception_when_existing=exception_when_existing,
- mac=mac, session=session, owner_id=owner_id, **kwargs
- )
-
-
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_SWITCH_MACHINE
-)
-@utils.wrap_to_dict(
- [
- 'switches_machines',
- 'duplicate_switches_machines',
- 'fail_switches_machines'
- ],
- switches_machines=RESP_MACHINES_FIELDS,
- duplicate_switches_machines=RESP_MACHINES_FIELDS
-)
-def add_switch_machines(
- exception_when_existing=False,
- data=[], user=None, session=None, owner_id=None
-):
- """Add switch machines."""
- switch_machines = []
- duplicate_switch_machines = []
- failed_switch_machines = []
- switches_mapping = {}
- switch_machines_mapping = {}
- switch_ips = []
- for item_data in data:
- switch_ip = item_data['switch_ip']
- if switch_ip not in switches_mapping:
- switch_object = _get_switch_by_ip(
- switch_ip, session=session,
- exception_when_missing=False
- )
- if switch_object:
- switch_ips.append(switch_ip)
- switches_mapping[switch_ip] = switch_object
- else:
- logging.error(
- 'switch %s does not exist' % switch_ip
- )
- item_data.pop('switch_ip')
- failed_switch_machines.append(item_data)
- else:
- switch_object = switches_mapping[switch_ip]
- if switch_object:
- item_data.pop('switch_ip')
- switch_machines_mapping.setdefault(
- switch_object.id, []
- ).append(item_data)
-
- for switch_ip in switch_ips:
- switch_object = switches_mapping[switch_ip]
- switch_id = switch_object.id
- machines = switch_machines_mapping[switch_id]
- for machine in machines:
- mac = machine['mac']
- machine_object = _add_machine_if_not_exist(
- mac=mac, session=session
- )
- switch_machine_object = _get_switch_machine(
- switch_id, machine_object.id, session=session,
- exception_when_missing=False
- )
- if switch_machine_object:
- port = machine['port']
- switch_machine_id = switch_machine_object.switch_machine_id
- exist_port = switch_machine_object.port
- if exist_port != port:
- logging.error(
- 'switch machine %s exist port %s is '
- 'different from added port %s' % (
- switch_machine_id,
- exist_port, port
- )
- )
- failed_switch_machines.append(machine)
- else:
- logging.error(
- 'iswitch machine %s is dulicate, '
- 'will not be override' % switch_machine_id
- )
- duplicate_switch_machines.append(machine)
- else:
- del machine['mac']
- switch_machines.append(_add_switch_machine_only(
- switch_object, machine_object,
- exception_when_existing,
- session=session, owner_id=owner_id, **machine
- ))
- return {
- 'switches_machines': switch_machines,
- 'duplicate_switches_machines': duplicate_switch_machines,
- 'fail_switches_machines': failed_switch_machines
- }
-
-
-@utils.supported_filters(optional_support_keys=['find_machines'])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_SWITCH_MACHINES
-)
-@utils.wrap_to_dict(RESP_ACTION_FIELDS)
-def poll_switch(switch_id, user=None, session=None, **kwargs):
- """poll switch to get machines."""
- from compass.tasks import client as celery_client
- switch = _get_switch(switch_id, session=session)
- celery_client.celery.send_task(
- 'compass.tasks.pollswitch',
- (user.email, switch.ip, switch.credentials),
- queue=user.email,
- exchange=user.email,
- routing_key=user.email
- )
- return {
- 'status': 'action %s sent' % kwargs,
- 'details': {
- }
- }
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_SWITCH_MACHINES
-)
-@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
-def get_switch_machine(
- switch_id, machine_id, exception_when_missing=True,
- user=None, session=None, **kwargs
-):
- """get a switch machine by switch id and machine id."""
- return _get_switch_machine(
- switch_id, machine_id, session=session,
- exception_when_missing=exception_when_missing
- )
-
-
-@utils.supported_filters([])
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_LIST_SWITCH_MACHINES
-)
-@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
-def get_switchmachine(
- switch_machine_id, exception_when_missing=True,
- user=None, session=None, **kwargs
-):
- """get a switch machine by switch_machine_id."""
- return _get_switchmachine(
- switch_machine_id, session=session,
- exception_when_missing=exception_when_missing
- )
-
-
-@utils.supported_filters(
- optional_support_keys=(
- UPDATED_MACHINES_FIELDS + PATCHED_MACHINES_FIELDS
- ),
- ignore_support_keys=(
- UPDATED_SWITCH_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
- )
-)
-def _update_machine_if_necessary(
- machine, session=None, **kwargs
-):
- """Update machine is there is something to update."""
- utils.update_db_object(
- session, machine, **kwargs
- )
-
-
-@utils.supported_filters(
- optional_support_keys=(
- UPDATED_SWITCH_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
- ),
- ignore_support_keys=(
- UPDATED_MACHINES_FIELDS + PATCHED_MACHINES_FIELDS
- )
-)
-def _update_switch_machine_only(switch_machine, session=None, **kwargs):
- """Update switch machine."""
- return utils.update_db_object(
- session, switch_machine, **kwargs
- )
-
-
-def _update_switch_machine(
- switch_machine, session=None, **kwargs
-):
- """Update switch machine.
-
- If there are some attributes of underlying machine need to update,
- also update them in underlying machine.
- """
- _update_machine_if_necessary(
- switch_machine.machine, session=session, **kwargs
- )
- return _update_switch_machine_only(
- switch_machine, session=session, **kwargs
- )
-
-
-@utils.supported_filters(
- optional_support_keys=(
- UPDATED_MACHINES_FIELDS + UPDATED_SWITCH_MACHINES_FIELDS
- ),
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(vlans=_check_vlans)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_SWITCH_MACHINE
-)
-@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
-def update_switch_machine(
- switch_id, machine_id, user=None,
- session=None, **kwargs
-):
- """Update switch machine by switch id and machine id."""
- switch_machine = _get_switch_machine(
- switch_id, machine_id, session=session
- )
- return _update_switch_machine(
- switch_machine,
- session=session, **kwargs
- )
-
-
-@utils.supported_filters(
- optional_support_keys=(
- UPDATED_MACHINES_FIELDS + UPDATED_SWITCH_MACHINES_FIELDS
- ),
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(vlans=_check_vlans)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_SWITCH_MACHINE
-)
-@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
-def update_switchmachine(switch_machine_id, user=None, session=None, **kwargs):
- """Update switch machine by switch_machine_id."""
- switch_machine = _get_switchmachine(
- switch_machine_id, session=session
- )
- return _update_switch_machine(
- switch_machine,
- session=session, **kwargs
- )
-
-
-# replace [vlans, ipmi_credentials, tag, location] to
-# [patched_vlans, patched_ipmi_credentials, patched_tag,
-# patched_location] in kwargs. It tells db these fields will
-# be patched.
-@utils.replace_filters(
- vlans='patched_vlans',
- ipmi_credentials='patched_ipmi_credentials',
- tag='patched_tag',
- location='patched_location'
-)
-@utils.supported_filters(
- optional_support_keys=(
- PATCHED_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
- ),
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(patched_vlans=_check_vlans)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_SWITCH_MACHINE
-)
-@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
-def patch_switch_machine(
- switch_id, machine_id, user=None,
- session=None, **kwargs
-):
- """Patch switch machine by switch_id and machine_id."""
- switch_machine = _get_switch_machine(
- switch_id, machine_id, session=session
- )
- return _update_switch_machine(
- switch_machine,
- session=session, **kwargs
- )
-
-
-# replace [vlans, ipmi_credentials, tag, location] to
-# [patched_vlans, patched_ipmi_credentials, patched_tag,
-# patched_location] in kwargs. It tells db these fields will
-# be patched.
-@utils.replace_filters(
- vlans='patched_vlans',
- ipmi_credentials='patched_ipmi_credentials',
- tag='patched_tag',
- location='patched_location'
-)
-@utils.supported_filters(
- optional_support_keys=(
- PATCHED_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
- ),
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(patched_vlans=_check_vlans)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_ADD_SWITCH_MACHINE
-)
-@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
-def patch_switchmachine(switch_machine_id, user=None, session=None, **kwargs):
- """Patch switch machine by switch_machine_id."""
- switch_machine = _get_switchmachine(
- switch_machine_id, session=session
- )
- return _update_switch_machine(
- switch_machine,
- session=session, **kwargs
- )
-
-
-def _del_switch_machine(
- switch_machine, session=None
-):
- """Delete switch machine.
-
- If this is the last switch machine associated to underlying machine,
- add a switch machine record to default switch to make the machine
- searchable.
- """
- default_switch = _get_switch_by_ip(
- setting.DEFAULT_SWITCH_IP, session=session
- )
- machine = switch_machine.machine
- if len(machine.switch_machines) <= 1:
- utils.add_db_object(
- session, models.SwitchMachine,
- False,
- default_switch.id, machine.id,
- port=switch_machine.port
- )
- return utils.del_db_object(session, switch_machine)
-
-
-@utils.supported_filters()
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEL_SWITCH_MACHINE
-)
-@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
-def del_switch_machine(
- switch_id, machine_id, user=None,
- session=None, **kwargs
-):
- """Delete switch machine by switch id and machine id."""
- switch_machine = _get_switch_machine(
- switch_id, machine_id, session=session
- )
- return _del_switch_machine(switch_machine, session=session)
-
-
-@utils.supported_filters()
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_DEL_SWITCH_MACHINE
-)
-@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
-def del_switchmachine(switch_machine_id, user=None, session=None, **kwargs):
- """Delete switch machine by switch_machine_id."""
- switch_machine = _get_switchmachine(
- switch_machine_id, session=session
- )
- return _del_switch_machine(switch_machine, session=session)
-
-
-@utils.supported_filters(
- ['machine_id'],
- optional_support_keys=UPDATED_SWITCH_MACHINES_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-def _add_machine_to_switch(
- switch_id, machine_id, session=None, **kwargs
-):
- """Add machine to switch."""
- switch = _get_switch(switch_id, session=session)
- from compass.db.api import machine as machine_api
- machine = machine_api.get_machine_internal(
- machine_id, session=session
- )
- _add_switch_machine_only(
- switch, machine, False,
- owner_id=machine.owner_id, **kwargs
- )
-
-
-def _add_machines(switch, machines, session=None):
- """Add machines to switch.
-
- Args:
- machines: list of dict which contains attributes to
- add machine to switch.
-
- machines example:
- {{'machine_id': 1, 'port': 'ae20'}]
- """
- for machine in machines:
- _add_machine_to_switch(
- switch.id, session=session, **machine
- )
-
-
-def _remove_machines(switch, machines, session=None):
- """Remove machines from switch.
-
- Args:
- machines: list of machine id.
-
- machines example:
- [1,2]
- """
- utils.del_db_objects(
- session, models.SwitchMachine,
- switch_id=switch.id, machine_id=machines
- )
-
-
-def _set_machines(switch, machines, session=None):
- """Reset machines to a switch.
-
- Args:
- machines: list of dict which contains attributes to
- add machine to switch.
-
- machines example:
- {{'machine_id': 1, 'port': 'ae20'}]
- """
- utils.del_db_objects(
- session, models.SwitchMachine,
- switch_id=switch.id
- )
- for switch_machine in machines:
- _add_machine_to_switch(
- switch.id, session=session, **switch_machine
- )
-
-
-@utils.supported_filters(
- optional_support_keys=[
- 'add_machines', 'remove_machines', 'set_machines'
- ]
-)
-@database.run_in_session()
-@user_api.check_user_permission(
- permission.PERMISSION_UPDATE_SWITCH_MACHINES
-)
-@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
-def update_switch_machines(
- switch_id, add_machines=[], remove_machines=[],
- set_machines=None, user=None, session=None, **kwargs
-):
- """update switch's machines"""
- switch = _get_switch(switch_id, session=session)
- if remove_machines:
- _remove_machines(
- switch, remove_machines, session=session
- )
- if add_machines:
- _add_machines(
- switch, add_machines, session=session
- )
- if set_machines is not None:
- _set_machines(
- switch, set_machines, session=session
- )
- return switch.switch_machines
diff --git a/compass-tasks/db/api/user.py b/compass-tasks/db/api/user.py
deleted file mode 100644
index db039eb..0000000
--- a/compass-tasks/db/api/user.py
+++ /dev/null
@@ -1,553 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""User database operations."""
-import datetime
-import functools
-import logging
-import re
-
-from flask.ext.login import UserMixin
-
-from compass.db.api import database
-from compass.db.api import utils
-from compass.db import exception
-from compass.db import models
-
-from compass.utils import setting_wrapper as setting
-from compass.utils import util
-
-
-SUPPORTED_FIELDS = ['email', 'is_admin', 'active']
-PERMISSION_SUPPORTED_FIELDS = ['name']
-SELF_UPDATED_FIELDS = ['email', 'firstname', 'lastname', 'password']
-ADMIN_UPDATED_FIELDS = ['is_admin', 'active']
-IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
-UPDATED_FIELDS = [
- 'email', 'firstname', 'lastname', 'password', 'is_admin', 'active'
-]
-ADDED_FIELDS = ['email', 'password']
-OPTIONAL_ADDED_FIELDS = ['is_admin', 'active']
-PERMISSION_ADDED_FIELDS = ['permission_id']
-RESP_FIELDS = [
- 'id', 'email', 'is_admin', 'active', 'firstname',
- 'lastname', 'created_at', 'updated_at'
-]
-RESP_TOKEN_FIELDS = [
- 'id', 'user_id', 'token', 'expire_timestamp'
-]
-PERMISSION_RESP_FIELDS = [
- 'id', 'user_id', 'permission_id', 'name', 'alias', 'description',
- 'created_at', 'updated_at'
-]
-
-
-def _check_email(email):
- """Check email is email format."""
- if '@' not in email:
- raise exception.InvalidParameter(
- 'there is no @ in email address %s.' % email
- )
-
-
-def _check_user_permission(user, permission, session=None):
- """Check user has permission."""
- if not user:
- logging.info('empty user means the call is from internal')
- return
- if user.is_admin:
- return
-
- user_permission = utils.get_db_object(
- session, models.UserPermission,
- False, user_id=user.id, name=permission.name
- )
- if not user_permission:
- raise exception.Forbidden(
- 'user %s does not have permission %s' % (
- user.email, permission.name
- )
- )
-
-
-def check_user_permission(permission):
- """Decorator to check user having permission."""
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- user = kwargs.get('user')
- if user is not None:
- session = kwargs.get('session')
- if session is None:
- raise exception.DatabaseException(
- 'wrapper check_user_permission does not run in session'
- )
- _check_user_permission(user, permission, session=session)
- return func(*args, **kwargs)
- else:
- return func(*args, **kwargs)
- return wrapper
- return decorator
-
-
-def check_user_admin():
- """Decorator to check user is admin."""
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- user = kwargs.get('user')
- if user is not None:
- if not user.is_admin:
- raise exception.Forbidden(
- 'User %s is not admin.' % (
- user.email
- )
- )
- return func(*args, **kwargs)
- else:
- return func(*args, **kwargs)
- return wrapper
- return decorator
-
-
-def check_user_admin_or_owner():
- """Decorator to check user is admin or the owner of the resource."""
- def decorator(func):
- @functools.wraps(func)
- def wrapper(user_id, *args, **kwargs):
- user = kwargs.get('user')
- if user is not None:
- session = kwargs.get('session')
- if session is None:
- raise exception.DatabaseException(
- 'wrapper check_user_admin_or_owner is '
- 'not called in session'
- )
- check_user = _get_user(user_id, session=session)
- if not user.is_admin and user.id != check_user.id:
- raise exception.Forbidden(
- 'User %s is not admin or the owner of user %s.' % (
- user.email, check_user.email
- )
- )
-
- return func(
- user_id, *args, **kwargs
- )
- else:
- return func(
- user_id, *args, **kwargs
- )
- return wrapper
- return decorator
-
-
-def _add_user_permissions(user, session=None, **permission_filters):
- """add permissions to a user."""
- from compass.db.api import permission as permission_api
- for api_permission in permission_api.list_permissions(
- session=session, **permission_filters
- ):
- utils.add_db_object(
- session, models.UserPermission, False,
- user.id, api_permission['id']
- )
-
-
-def _remove_user_permissions(user, session=None, **permission_filters):
- """remove permissions from a user."""
- from compass.db.api import permission as permission_api
- permission_ids = [
- api_permission['id']
- for api_permission in permission_api.list_permissions(
- session=session, **permission_filters
- )
- ]
- utils.del_db_objects(
- session, models.UserPermission,
- user_id=user.id, permission_id=permission_ids
- )
-
-
-def _set_user_permissions(user, session=None, **permission_filters):
- """set permissions to a user."""
- utils.del_db_objects(
- session, models.UserPermission,
- user_id=user.id
- )
- _add_user_permissions(session, user, **permission_filters)
-
-
-class UserWrapper(UserMixin):
- """Wrapper class provided to flask."""
-
- def __init__(
- self, id, email, crypted_password,
- active=True, is_admin=False,
- expire_timestamp=None, token='', **kwargs
- ):
- self.id = id
- self.email = email
- self.password = crypted_password
- self.active = active
- self.is_admin = is_admin
- self.expire_timestamp = expire_timestamp
- if not token:
- self.token = self.get_auth_token()
- else:
- self.token = token
- super(UserWrapper, self).__init__()
-
- def authenticate(self, password):
- if not util.encrypt(password, self.password) == self.password:
- raise exception.Unauthorized('%s password mismatch' % self.email)
-
- def get_auth_token(self):
- return util.encrypt(self.email)
-
- def is_active(self):
- return self.active
-
- def get_id(self):
- return self.token
-
- def is_authenticated(self):
- current_time = datetime.datetime.now()
- return (
- not self.expire_timestamp or
- current_time < self.expire_timestamp
- )
-
- def __str__(self):
- return '%s[email:%s,password:%s]' % (
- self.__class__.__name__, self.email, self.password)
-
-
-@database.run_in_session()
-def get_user_object(email, session=None, **kwargs):
- """get user and convert to UserWrapper object."""
- user = utils.get_db_object(
- session, models.User, False, email=email
- )
- if not user:
- raise exception.Unauthorized(
- '%s unauthorized' % email
- )
- user_dict = user.to_dict()
- user_dict.update(kwargs)
- return UserWrapper(**user_dict)
-
-
-@database.run_in_session(exception_when_in_session=False)
-def get_user_object_from_token(token, session=None):
- """Get user from token and convert to UserWrapper object.
-
- ::note:
- get_user_object_from_token may be called in session.
- """
- expire_timestamp = {
- 'ge': datetime.datetime.now()
- }
- user_token = utils.get_db_object(
- session, models.UserToken, False,
- token=token, expire_timestamp=expire_timestamp
- )
- if not user_token:
- raise exception.Unauthorized(
- 'invalid user token: %s' % token
- )
- user_dict = _get_user(
- user_token.user_id, session=session
- ).to_dict()
- user_dict['token'] = token
- expire_timestamp = user_token.expire_timestamp
- user_dict['expire_timestamp'] = expire_timestamp
- return UserWrapper(**user_dict)
-
-
-@utils.supported_filters()
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_TOKEN_FIELDS)
-def record_user_token(
- token, expire_timestamp, user=None, session=None
-):
- """record user token in database."""
- user_token = utils.get_db_object(
- session, models.UserToken, False,
- user_id=user.id, token=token
- )
- if not user_token:
- return utils.add_db_object(
- session, models.UserToken, True,
- token, user_id=user.id,
- expire_timestamp=expire_timestamp
- )
- elif expire_timestamp > user_token.expire_timestamp:
- return utils.update_db_object(
- session, user_token, expire_timestamp=expire_timestamp
- )
- return user_token
-
-
-@utils.supported_filters()
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_TOKEN_FIELDS)
-def clean_user_token(token, user=None, session=None):
- """clean user token in database."""
- return utils.del_db_objects(
- session, models.UserToken,
- token=token, user_id=user.id
- )
-
-
-def _get_user(user_id, session=None, **kwargs):
- """Get user object by user id."""
- if isinstance(user_id, (int, long)):
- return utils.get_db_object(
- session, models.User, id=user_id, **kwargs
- )
- raise exception.InvalidParameter(
- 'user id %s type is not int compatible' % user_id
- )
-
-
-@utils.supported_filters()
-@database.run_in_session()
-@check_user_admin_or_owner()
-@utils.wrap_to_dict(RESP_FIELDS)
-def get_user(
- user_id, exception_when_missing=True,
- user=None, session=None, **kwargs
-):
- """get a user."""
- return _get_user(
- user_id, session=session,
- exception_when_missing=exception_when_missing
- )
-
-
-@utils.supported_filters()
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_FIELDS)
-def get_current_user(
- exception_when_missing=True, user=None,
- session=None, **kwargs
-):
- """get current user."""
- return _get_user(
- user.id, session=session,
- exception_when_missing=exception_when_missing
- )
-
-
-@utils.supported_filters(
- optional_support_keys=SUPPORTED_FIELDS
-)
-@database.run_in_session()
-@check_user_admin()
-@utils.wrap_to_dict(RESP_FIELDS)
-def list_users(user=None, session=None, **filters):
- """List all users."""
- return utils.list_db_objects(
- session, models.User, **filters
- )
-
-
-@utils.input_validates(email=_check_email)
-@utils.supported_filters(
- ADDED_FIELDS,
- optional_support_keys=OPTIONAL_ADDED_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@check_user_admin()
-@utils.wrap_to_dict(RESP_FIELDS)
-def add_user(
- exception_when_existing=True, user=None,
- session=None, email=None, **kwargs
-):
- """Create a user and return created user object."""
- add_user = utils.add_db_object(
- session, models.User,
- exception_when_existing, email,
- **kwargs)
- _add_user_permissions(
- add_user,
- session=session,
- name=setting.COMPASS_DEFAULT_PERMISSIONS
- )
- return add_user
-
-
-@utils.supported_filters()
-@database.run_in_session()
-@check_user_admin()
-@utils.wrap_to_dict(RESP_FIELDS)
-def del_user(user_id, user=None, session=None, **kwargs):
- """delete a user and return the deleted user object."""
- del_user = _get_user(user_id, session=session)
- return utils.del_db_object(session, del_user)
-
-
-@utils.supported_filters(
- optional_support_keys=UPDATED_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@utils.input_validates(email=_check_email)
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_FIELDS)
-def update_user(user_id, user=None, session=None, **kwargs):
- """Update a user and return the updated user object."""
- update_user = _get_user(
- user_id, session=session,
- )
- allowed_fields = set()
- if user.is_admin:
- allowed_fields |= set(ADMIN_UPDATED_FIELDS)
- if user.id == update_user.id:
- allowed_fields |= set(SELF_UPDATED_FIELDS)
- unsupported_fields = set(kwargs) - allowed_fields
- if unsupported_fields:
- # The user is not allowed to update a user.
- raise exception.Forbidden(
- 'User %s has no permission to update user %s fields %s.' % (
- user.email, user.email, unsupported_fields
- )
- )
- return utils.update_db_object(session, update_user, **kwargs)
-
-
-@utils.supported_filters(optional_support_keys=PERMISSION_SUPPORTED_FIELDS)
-@database.run_in_session()
-@check_user_admin_or_owner()
-@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
-def get_permissions(
- user_id, user=None, exception_when_missing=True,
- session=None, **kwargs
-):
- """List permissions of a user."""
- get_user = _get_user(
- user_id, session=session,
- exception_when_missing=exception_when_missing
- )
- return utils.list_db_objects(
- session, models.UserPermission, user_id=get_user.id, **kwargs
- )
-
-
-def _get_permission(user_id, permission_id, session=None, **kwargs):
- """Get user permission by user id and permission id."""
- user = _get_user(user_id, session=session)
- from compass.db.api import permission as permission_api
- permission = permission_api.get_permission_internal(
- permission_id, session=session
- )
- return utils.get_db_object(
- session, models.UserPermission,
- user_id=user.id, permission_id=permission.id,
- **kwargs
- )
-
-
-@utils.supported_filters()
-@database.run_in_session()
-@check_user_admin_or_owner()
-@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
-def get_permission(
- user_id, permission_id, exception_when_missing=True,
- user=None, session=None, **kwargs
-):
- """Get a permission of a user."""
- return _get_permission(
- user_id, permission_id,
- exception_when_missing=exception_when_missing,
- session=session,
- **kwargs
- )
-
-
-@utils.supported_filters()
-@database.run_in_session()
-@check_user_admin_or_owner()
-@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
-def del_permission(user_id, permission_id, user=None, session=None, **kwargs):
- """Delete a permission from a user."""
- user_permission = _get_permission(
- user_id, permission_id,
- session=session, **kwargs
- )
- return utils.del_db_object(session, user_permission)
-
-
-@utils.supported_filters(
- PERMISSION_ADDED_FIELDS,
- ignore_support_keys=IGNORE_FIELDS
-)
-@database.run_in_session()
-@check_user_admin()
-@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
-def add_permission(
- user_id, permission_id=None, exception_when_existing=True,
- user=None, session=None
-):
- """Add a permission to a user."""
- get_user = _get_user(user_id, session=session)
- from compass.db.api import permission as permission_api
- get_permission = permission_api.get_permission_internal(
- permission_id, session=session
- )
- return utils.add_db_object(
- session, models.UserPermission, exception_when_existing,
- get_user.id, get_permission.id
- )
-
-
-def _get_permission_filters(permission_ids):
- """Helper function to filter permissions."""
- if permission_ids == 'all':
- return {}
- else:
- return {'id': permission_ids}
-
-
-@utils.supported_filters(
- optional_support_keys=[
- 'add_permissions', 'remove_permissions', 'set_permissions'
- ]
-)
-@database.run_in_session()
-@check_user_admin()
-@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
-def update_permissions(
- user_id, add_permissions=[], remove_permissions=[],
- set_permissions=None, user=None, session=None, **kwargs
-):
- """update user permissions."""
- update_user = _get_user(user_id, session=session)
- if remove_permissions:
- _remove_user_permissions(
- update_user, session=session,
- **_get_permission_filters(remove_permissions)
- )
- if add_permissions:
- _add_user_permissions(
- update_user, session=session,
- **_get_permission_filters(add_permissions)
- )
- if set_permissions is not None:
- _set_user_permissions(
- update_user, session=session,
- **_get_permission_filters(set_permissions)
- )
- return update_user.user_permissions
diff --git a/compass-tasks/db/api/user_log.py b/compass-tasks/db/api/user_log.py
deleted file mode 100644
index 70de9db..0000000
--- a/compass-tasks/db/api/user_log.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""UserLog database operations."""
-import logging
-
-from compass.db.api import database
-from compass.db.api import user as user_api
-from compass.db.api import utils
-from compass.db import exception
-from compass.db import models
-
-
-SUPPORTED_FIELDS = ['user_email', 'timestamp']
-USER_SUPPORTED_FIELDS = ['timestamp']
-RESP_FIELDS = ['user_id', 'action', 'timestamp']
-
-
-@database.run_in_session()
-def log_user_action(user_id, action, session=None):
- """Log user action."""
- utils.add_db_object(
- session, models.UserLog, True, user_id=user_id, action=action
- )
-
-
-@utils.supported_filters(optional_support_keys=USER_SUPPORTED_FIELDS)
-@database.run_in_session()
-@user_api.check_user_admin_or_owner()
-@utils.wrap_to_dict(RESP_FIELDS)
-def list_user_actions(user_id, user=None, session=None, **filters):
- """list user actions of a user."""
- list_user = user_api.get_user(user_id, user=user, session=session)
- return utils.list_db_objects(
- session, models.UserLog, order_by=['timestamp'],
- user_id=list_user['id'], **filters
- )
-
-
-@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
-@user_api.check_user_admin()
-@database.run_in_session()
-@utils.wrap_to_dict(RESP_FIELDS)
-def list_actions(user=None, session=None, **filters):
- """list actions of all users."""
- return utils.list_db_objects(
- session, models.UserLog, order_by=['timestamp'], **filters
- )
-
-
-@utils.supported_filters()
-@database.run_in_session()
-@user_api.check_user_admin_or_owner()
-@utils.wrap_to_dict(RESP_FIELDS)
-def del_user_actions(user_id, user=None, session=None, **filters):
- """delete actions of a user."""
- del_user = user_api.get_user(user_id, user=user, session=session)
- return utils.del_db_objects(
- session, models.UserLog, user_id=del_user['id'], **filters
- )
-
-
-@utils.supported_filters()
-@database.run_in_session()
-@user_api.check_user_admin()
-@utils.wrap_to_dict(RESP_FIELDS)
-def del_actions(user=None, session=None, **filters):
- """delete actions of all users."""
- return utils.del_db_objects(
- session, models.UserLog, **filters
- )
diff --git a/compass-tasks/db/api/utils.py b/compass-tasks/db/api/utils.py
deleted file mode 100644
index a44f26e..0000000
--- a/compass-tasks/db/api/utils.py
+++ /dev/null
@@ -1,1286 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Utils for database usage."""
-
-import functools
-import inspect
-import logging
-import netaddr
-import re
-
-from inspect import isfunction
-from sqlalchemy import and_
-from sqlalchemy import or_
-
-from compass.db import exception
-from compass.db import models
-from compass.utils import util
-
-
-def model_query(session, model):
- """model query.
-
- Return sqlalchemy query object.
- """
- if not issubclass(model, models.BASE):
- raise exception.DatabaseException("model should be sublass of BASE!")
-
- return session.query(model)
-
-
-def _default_list_condition_func(col_attr, value, condition_func):
- """The default condition func for a list of data.
-
- Given the condition func for single item of data, this function
- wrap the condition_func and return another condition func using
- or_ to merge the conditions of each single item to deal with a
- list of data item.
-
- Args:
- col_attr: the colomn name
- value: the column value need to be compared.
- condition_func: the sqlalchemy condition object like ==
-
- Examples:
- col_attr is name, value is ['a', 'b', 'c'] and
- condition_func is ==, the returned condition is
- name == 'a' or name == 'b' or name == 'c'
- """
- conditions = []
- for sub_value in value:
- condition = condition_func(col_attr, sub_value)
- if condition is not None:
- conditions.append(condition)
- if conditions:
- return or_(*conditions)
- else:
- return None
-
-
-def _one_item_list_condition_func(col_attr, value, condition_func):
- """The wrapper condition func to deal with one item data list.
-
- For simplification, it is used to reduce generating too complex
- sql conditions.
- """
- if value:
- return condition_func(col_attr, value[0])
- else:
- return None
-
-
-def _model_condition_func(
- col_attr, value,
- item_condition_func,
- list_condition_func=_default_list_condition_func
-):
- """Return sql condition based on value type."""
- if isinstance(value, list):
- if not value:
- return None
- if len(value) == 1:
- return item_condition_func(col_attr, value)
- return list_condition_func(
- col_attr, value, item_condition_func
- )
- else:
- return item_condition_func(col_attr, value)
-
-
-def _between_condition(col_attr, value):
- """Return sql range condition."""
- if value[0] is not None and value[1] is not None:
- return col_attr.between(value[0], value[1])
- if value[0] is not None:
- return col_attr >= value[0]
- if value[1] is not None:
- return col_attr <= value[1]
- return None
-
-
-def model_order_by(query, model, order_by):
- """append order by into sql query model."""
- if not order_by:
- return query
- order_by_cols = []
- for key in order_by:
- if isinstance(key, tuple):
- key, is_desc = key
- else:
- is_desc = False
- if isinstance(key, basestring):
- if hasattr(model, key):
- col_attr = getattr(model, key)
- else:
- continue
- else:
- col_attr = key
- if is_desc:
- order_by_cols.append(col_attr.desc())
- else:
- order_by_cols.append(col_attr)
- return query.order_by(*order_by_cols)
-
-
-def _model_condition(col_attr, value):
- """Generate condition for one column.
-
- Example for col_attr is name:
- value is 'a': name == 'a'
- value is ['a']: name == 'a'
- value is ['a', 'b']: name == 'a' or name == 'b'
- value is {'eq': 'a'}: name == 'a'
- value is {'lt': 'a'}: name < 'a'
- value is {'le': 'a'}: name <= 'a'
- value is {'gt': 'a'}: name > 'a'
- value is {'ge': 'a'}: name >= 'a'
- value is {'ne': 'a'}: name != 'a'
- value is {'in': ['a', 'b']}: name in ['a', 'b']
- value is {'notin': ['a', 'b']}: name not in ['a', 'b']
- value is {'startswith': 'abc'}: name like 'abc%'
- value is {'endswith': 'abc'}: name like '%abc'
- value is {'like': 'abc'}: name like '%abc%'
- value is {'between': ('a', 'c')}: name >= 'a' and name <= 'c'
- value is [{'lt': 'a'}]: name < 'a'
- value is [{'lt': 'a'}, {'gt': c'}]: name < 'a' or name > 'c'
- value is {'lt': 'c', 'gt': 'a'}: name > 'a' and name < 'c'
-
- If value is a list, the condition is the or relationship among
- conditions of each item.
- If value is dict and there are multi keys in the dict, the relationship
- is and conditions of each key.
- Otherwise the condition is to compare the column with the value.
- """
- if isinstance(value, list):
- basetype_values = []
- composite_values = []
- for item in value:
- if isinstance(item, (list, dict)):
- composite_values.append(item)
- else:
- basetype_values.append(item)
- conditions = []
- if basetype_values:
- if len(basetype_values) == 1:
- condition = (col_attr == basetype_values[0])
- else:
- condition = col_attr.in_(basetype_values)
- conditions.append(condition)
- for composite_value in composite_values:
- condition = _model_condition(col_attr, composite_value)
- if condition is not None:
- conditions.append(condition)
- if not conditions:
- return None
- if len(conditions) == 1:
- return conditions[0]
- return or_(*conditions)
- elif isinstance(value, dict):
- conditions = []
- if 'eq' in value:
- conditions.append(_model_condition_func(
- col_attr, value['eq'],
- lambda attr, data: attr == data,
- lambda attr, data, item_condition_func: attr.in_(data)
- ))
- if 'lt' in value:
- conditions.append(_model_condition_func(
- col_attr, value['lt'],
- lambda attr, data: attr < data,
- _one_item_list_condition_func
- ))
- if 'gt' in value:
- conditions.append(_model_condition_func(
- col_attr, value['gt'],
- lambda attr, data: attr > data,
- _one_item_list_condition_func
- ))
- if 'le' in value:
- conditions.append(_model_condition_func(
- col_attr, value['le'],
- lambda attr, data: attr <= data,
- _one_item_list_condition_func
- ))
- if 'ge' in value:
- conditions.append(_model_condition_func(
- col_attr, value['ge'],
- lambda attr, data: attr >= data,
- _one_item_list_condition_func
- ))
- if 'ne' in value:
- conditions.append(_model_condition_func(
- col_attr, value['ne'],
- lambda attr, data: attr != data,
- lambda attr, data, item_condition_func: attr.notin_(data)
- ))
- if 'in' in value:
- conditions.append(col_attr.in_(value['in']))
- if 'notin' in value:
- conditions.append(col_attr.notin_(value['notin']))
- if 'startswith' in value:
- conditions.append(_model_condition_func(
- col_attr, value['startswith'],
- lambda attr, data: attr.like('%s%%' % data)
- ))
- if 'endswith' in value:
- conditions.append(_model_condition_func(
- col_attr, value['endswith'],
- lambda attr, data: attr.like('%%%s' % data)
- ))
- if 'like' in value:
- conditions.append(_model_condition_func(
- col_attr, value['like'],
- lambda attr, data: attr.like('%%%s%%' % data)
- ))
- if 'between' in value:
- conditions.append(_model_condition_func(
- col_attr, value['between'],
- _between_condition
- ))
- conditions = [
- condition
- for condition in conditions
- if condition is not None
- ]
- if not conditions:
- return None
- if len(conditions) == 1:
- return conditions[0]
- return and_(conditions)
- else:
- condition = (col_attr == value)
- return condition
-
-
-def model_filter(query, model, **filters):
- """Append conditons to query for each possible column."""
- for key, value in filters.items():
- if isinstance(key, basestring):
- if hasattr(model, key):
- col_attr = getattr(model, key)
- else:
- continue
- else:
- col_attr = key
- condition = _model_condition(col_attr, value)
- if condition is not None:
- query = query.filter(condition)
- return query
-
-
-def replace_output(**output_mapping):
- """Decorator to recursively relace output by output mapping.
-
- The replacement detail is described in _replace_output.
- """
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- return _replace_output(
- func(*args, **kwargs), **output_mapping
- )
- return wrapper
- return decorator
-
-
-def _replace_output(data, **output_mapping):
- """Helper to replace output data.
-
- Example:
- data = {'a': 'hello'}
- output_mapping = {'a': 'b'}
- returns: {'b': 'hello'}
-
- data = {'a': {'b': 'hello'}}
- output_mapping = {'a': 'b'}
- returns: {'b': {'b': 'hello'}}
-
- data = {'a': {'b': 'hello'}}
- output_mapping = {'a': {'b': 'c'}}
- returns: {'a': {'c': 'hello'}}
-
- data = [{'a': 'hello'}, {'a': 'hi'}]
- output_mapping = {'a': 'b'}
- returns: [{'b': 'hello'}, {'b': 'hi'}]
- """
- if isinstance(data, list):
- return [
- _replace_output(item, **output_mapping)
- for item in data
- ]
- if not isinstance(data, dict):
- raise exception.InvalidResponse(
- '%s type is not dict' % data
- )
- info = {}
- for key, value in data.items():
- if key in output_mapping:
- output_key = output_mapping[key]
- if isinstance(output_key, basestring):
- info[output_key] = value
- else:
- info[key] = (
- _replace_output(value, **output_key)
- )
- else:
- info[key] = value
- return info
-
-
-def get_wrapped_func(func):
- """Get wrapped function instance.
-
- Example:
- @dec1
- @dec2
- myfunc(*args, **kwargs)
-
- get_wrapped_func(myfunc) returns function object with
- following attributes:
- __name__: 'myfunc'
- args: args
- kwargs: kwargs
- otherwise myfunc is function object with following attributes:
- __name__: partial object ...
- args: ...
- kwargs: ...
- """
- if func.func_closure:
- for closure in func.func_closure:
- if isfunction(closure.cell_contents):
- return get_wrapped_func(closure.cell_contents)
- return func
- else:
- return func
-
-
-def wrap_to_dict(support_keys=[], **filters):
- """Decrator to convert returned object to dict.
-
- The details is decribed in _wrapper_dict.
- """
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- return _wrapper_dict(
- func(*args, **kwargs), support_keys, **filters
- )
- return wrapper
- return decorator
-
-
-def _wrapper_dict(data, support_keys, **filters):
- """Helper for warpping db object into dictionary.
-
- If data is list, convert it to a list of dict
- If data is Base model, convert it to dict
- for the data as a dict, filter it with the supported keys.
- For each filter_key, filter_value in filters, also filter
- data[filter_key] by filter_value recursively if it exists.
-
- Example:
- data is models.Switch, it will be converted to
- {
- 'id': 1, 'ip': '10.0.0.1', 'ip_int': 123456,
- 'credentials': {'version': 2, 'password': 'abc'}
- }
- Then if support_keys are ['id', 'ip', 'credentials'],
- it will be filtered to {
- 'id': 1, 'ip': '10.0.0.1',
- 'credentials': {'version': 2, 'password': 'abc'}
- }
- Then if filters is {'credentials': ['version']},
- it will be filtered to {
- 'id': 1, 'ip': '10.0.0.1',
- 'credentials': {'version': 2}
- }
- """
- logging.debug(
- 'wrap dict %s by support_keys=%s filters=%s',
- data, support_keys, filters
- )
- if isinstance(data, list):
- return [
- _wrapper_dict(item, support_keys, **filters)
- for item in data
- ]
- if isinstance(data, models.HelperMixin):
- data = data.to_dict()
- if not isinstance(data, dict):
- raise exception.InvalidResponse(
- 'response %s type is not dict' % data
- )
- info = {}
- try:
- for key in support_keys:
- if key in data and data[key] is not None:
- if key in filters:
- filter_keys = filters[key]
- if isinstance(filter_keys, dict):
- info[key] = _wrapper_dict(
- data[key], filter_keys.keys(),
- **filter_keys
- )
- else:
- info[key] = _wrapper_dict(
- data[key], filter_keys
- )
- else:
- info[key] = data[key]
- return info
- except Exception as error:
- logging.exception(error)
- raise error
-
-
-def replace_filters(**kwarg_mapping):
- """Decorator to replace kwargs.
-
- Examples:
- kwargs: {'a': 'b'}, kwarg_mapping: {'a': 'c'}
- replaced kwargs to decorated func:
- {'c': 'b'}
-
- replace_filters is used to replace caller's input
- to make it understandable by models.py.
- """
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- replaced_kwargs = {}
- for key, value in kwargs.items():
- if key in kwarg_mapping:
- replaced_kwargs[kwarg_mapping[key]] = value
- else:
- replaced_kwargs[key] = value
- return func(*args, **replaced_kwargs)
- return wrapper
- return decorator
-
-
-def supported_filters(
- support_keys=[],
- optional_support_keys=[],
- ignore_support_keys=[],
-):
- """Decorator to check kwargs keys.
-
- keys in kwargs and in ignore_support_keys will be removed.
- If any unsupported keys found, a InvalidParameter
- exception raises.
-
- Args:
- support_keys: keys that must exist.
- optional_support_keys: keys that may exist.
- ignore_support_keys: keys should be ignored.
-
- Assumption: args without default value is supposed to exist.
- You can add them in support_keys or not but we will make sure
- it appears when we call the decorated function.
- We do best match on both args and kwargs to make sure if the
- key appears or not.
-
- Examples:
- decorated func: func(a, b, c=3, d=4, **kwargs)
-
- support_keys=['e'] and call func(e=5):
- raises: InvalidParameter: missing declared arg
- support_keys=['e'] and call func(1,2,3,4,5,e=6):
- raises: InvalidParameter: caller sending more args
- support_keys=['e'] and call func(1,2):
- raises: InvalidParameter: supported keys ['e'] missing
- support_keys=['d', 'e'] and call func(1,2,e=3):
- raises: InvalidParameter: supported keys ['d'] missing
- support_keys=['d', 'e'] and call func(1,2,d=4, e=3):
- passed
- support_keys=['d'], optional_support_keys=['e']
- and call func(1,2, d=3):
- passed
- support_keys=['d'], optional_support_keys=['e']
- and call func(1,2, d=3, e=4, f=5):
- raises: InvalidParameter: unsupported keys ['f']
- support_keys=['d'], optional_support_keys=['e'],
- ignore_support_keys=['f']
- and call func(1,2, d=3, e=4, f=5):
- passed to decorated keys: func(1,2, d=3, e=4)
- """
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **filters):
- wrapped_func = get_wrapped_func(func)
- argspec = inspect.getargspec(wrapped_func)
- wrapped_args = argspec.args
- args_defaults = argspec.defaults
- # wrapped_must_args are positional args caller must pass in.
- if args_defaults:
- wrapped_must_args = wrapped_args[:-len(args_defaults)]
- else:
- wrapped_must_args = wrapped_args[:]
- # make sure any positional args without default value in
- # decorated function should appear in args or filters.
- if len(args) < len(wrapped_must_args):
- remain_args = wrapped_must_args[len(args):]
- for remain_arg in remain_args:
- if remain_arg not in filters:
- raise exception.InvalidParameter(
- 'function missing declared arg %s '
- 'while caller sends args %s' % (
- remain_arg, args
- )
- )
- # make sure args should be no more than positional args
- # declared in decorated function.
- if len(args) > len(wrapped_args):
- raise exception.InvalidParameter(
- 'function definition args %s while the caller '
- 'sends args %s' % (
- wrapped_args, args
- )
- )
- # exist_args are positional args caller has given.
- exist_args = dict(zip(wrapped_args, args)).keys()
- must_support_keys = set(support_keys)
- all_support_keys = must_support_keys | set(optional_support_keys)
- wrapped_supported_keys = set(filters) | set(exist_args)
- unsupported_keys = (
- set(filters) - set(wrapped_args) -
- all_support_keys - set(ignore_support_keys)
- )
- # unsupported_keys are the keys that are not in support_keys,
- # optional_support_keys, ignore_support_keys and are not passed in
- # by positional args. It means the decorated function may
- # not understand these parameters.
- if unsupported_keys:
- raise exception.InvalidParameter(
- 'filter keys %s are not supported for %s' % (
- list(unsupported_keys), wrapped_func
- )
- )
- # missing_keys are the keys that must exist but missing in
- # both positional args or kwargs.
- missing_keys = must_support_keys - wrapped_supported_keys
- if missing_keys:
- raise exception.InvalidParameter(
- 'filter keys %s not found for %s' % (
- list(missing_keys), wrapped_func
- )
- )
- # We filter kwargs to eliminate ignore_support_keys in kwargs
- # passed to decorated function.
- filtered_filters = dict([
- (key, value)
- for key, value in filters.items()
- if key not in ignore_support_keys
- ])
- return func(*args, **filtered_filters)
- return wrapper
- return decorator
-
-
-def input_filters(
- **filters
-):
- """Decorator to filter kwargs.
-
- For key in kwargs, if the key exists and filters
- and the return of call filters[key] is False, the key
- will be removed from kwargs.
-
- The function definition of filters[key] is
- func(value, *args, **kwargs) compared with decorated
- function func(*args, **kwargs)
-
- The function is used to filter kwargs in case some
- kwargs should be removed conditionally depends on the
- related filters.
-
- Examples:
- filters={'a': func(value, *args, **kwargs)}
- @input_filters(**filters)
- decorated_func(*args, **kwargs)
- func returns False.
- Then when call decorated_func(a=1, b=2)
- it will be actually called the decorated func with
- b=2. a=1 will be removed since it does not pass filtering.
- """
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- filtered_kwargs = {}
- for key, value in kwargs.items():
- if key in filters:
- if filters[key](value, *args, **kwargs):
- filtered_kwargs[key] = value
- else:
- logging.debug(
- 'ignore filtered input key %s' % key
- )
- else:
- filtered_kwargs[key] = value
- return func(*args, **filtered_kwargs)
- return wrapper
- return decorator
-
-
-def _obj_equal_or_subset(check, obj):
- """Used by output filter to check if obj is in check."""
- if check == obj:
- return True
- if not issubclass(obj.__class__, check.__class__):
- return False
- if isinstance(obj, dict):
- return _dict_equal_or_subset(check, obj)
- elif isinstance(obj, list):
- return _list_equal_or_subset(check, obj)
- else:
- return False
-
-
-def _list_equal_or_subset(check_list, obj_list):
- """Used by output filter to check if obj_list is in check_list"""
- if not isinstance(check_list, list):
- return False
- return set(check_list).issubset(set(obj_list))
-
-
-def _dict_equal_or_subset(check_dict, obj_dict):
- """Used by output filter to check if obj_dict in check_dict."""
- if not isinstance(check_dict, dict):
- return False
- for key, value in check_dict.items():
- if (
- key not in obj_dict or
- not _obj_equal_or_subset(check_dict[key], obj_dict[key])
- ):
- return False
- return True
-
-
-def general_filter_callback(general_filter, obj):
- """General filter function to filter output.
-
- Since some fields stored in database is json encoded and
- we want to do the deep match for the json encoded field to
- do the filtering in some cases, we introduces the output_filters
- and general_filter_callback to deal with this kind of cases.
-
- We do special treatment for key 'resp_eq' to check if
- obj is the recursively subset of general_filter['resp_eq']
-
-
- Example:
- obj: 'b'
- general_filter: {}
- returns: True
-
- obj: 'b'
- general_filter: {'resp_in': ['a', 'b']}
- returns: True
-
- obj: 'b'
- general_filter: {'resp_in': ['a']}
- returns: False
-
- obj: 'b'
- general_filter: {'resp_eq': 'b'}
- returns: True
-
- obj: 'b'
- general_filter: {'resp_eq': 'a'}
- returns: False
-
- obj: 'b'
- general_filter: {'resp_range': ('a', 'c')}
- returns: True
-
- obj: 'd'
- general_filter: {'resp_range': ('a', 'c')}
- returns: False
-
- If there are multi keys in dict, the output is filtered
- by and relationship.
-
- If the general_filter is a list, the output is filtered
- by or relationship.
-
- Supported general filters: [
- 'resp_eq', 'resp_in', 'resp_lt',
- 'resp_le', 'resp_gt', 'resp_ge',
- 'resp_match', 'resp_range'
- ]
- """
- if isinstance(general_filter, list):
- if not general_filter:
- return True
- return any([
- general_filter_callback(item, obj)
- for item in general_filter
- ])
- elif isinstance(general_filter, dict):
- if 'resp_eq' in general_filter:
- if not _obj_equal_or_subset(
- general_filter['resp_eq'], obj
- ):
- return False
- if 'resp_in' in general_filter:
- in_filters = general_filter['resp_in']
- if not any([
- _obj_equal_or_subset(in_filer, obj)
- for in_filer in in_filters
- ]):
- return False
- if 'resp_lt' in general_filter:
- if obj >= general_filter['resp_lt']:
- return False
- if 'resp_le' in general_filter:
- if obj > general_filter['resp_le']:
- return False
- if 'resp_gt' in general_filter:
- if obj <= general_filter['resp_gt']:
- return False
- if 'resp_ge' in general_filter:
- if obj < general_filter['resp_gt']:
- return False
- if 'resp_match' in general_filter:
- if not re.match(general_filter['resp_match'], obj):
- return False
- if 'resp_range' in general_filter:
- resp_range = general_filter['resp_range']
- if not isinstance(resp_range, list):
- resp_range = [resp_range]
- in_range = False
- for range_start, range_end in resp_range:
- if range_start <= obj <= range_end:
- in_range = True
- if not in_range:
- return False
- return True
- else:
- return True
-
-
-def filter_output(filter_callbacks, kwargs, obj, missing_ok=False):
- """Filter ouput.
-
- For each key in filter_callbacks, if it exists in kwargs,
- kwargs[key] tells what we need to filter. If the call of
- filter_callbacks[key] returns False, it tells the obj should be
- filtered out of output.
- """
- for callback_key, callback_value in filter_callbacks.items():
- if callback_key not in kwargs:
- continue
- if callback_key not in obj:
- if missing_ok:
- continue
- else:
- raise exception.InvalidResponse(
- '%s is not in %s' % (callback_key, obj)
- )
- if not callback_value(
- kwargs[callback_key], obj[callback_key]
- ):
- return False
- return True
-
-
-def output_filters(missing_ok=False, **filter_callbacks):
- """Decorator to filter output list.
-
- Each filter_callback should have the definition like:
- func({'resp_eq': 'a'}, 'a')
- """
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- filtered_obj_list = []
- obj_list = func(*args, **kwargs)
- for obj in obj_list:
- if filter_output(
- filter_callbacks, kwargs, obj, missing_ok
- ):
- filtered_obj_list.append(obj)
- return filtered_obj_list
- return wrapper
- return decorator
-
-
-def _input_validates(args_validators, kwargs_validators, *args, **kwargs):
- """Used by input_validators to validate inputs."""
- for i, value in enumerate(args):
- if i < len(args_validators) and args_validators[i]:
- args_validators[i](value)
- for key, value in kwargs.items():
- if kwargs_validators.get(key):
- kwargs_validators[key](value)
-
-
-def input_validates(*args_validators, **kwargs_validators):
- """Decorator to validate input.
-
- Each validator should have definition like:
- func('00:01:02:03:04:05')
- """
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- _input_validates(
- args_validators, kwargs_validators,
- *args, **kwargs
- )
- return func(*args, **kwargs)
- return wrapper
- return decorator
-
-
-def _input_validates_with_args(
- args_validators, kwargs_validators, *args, **kwargs
-):
- """Validate input with validators.
-
- Each validator takes the arguments of the decorated function
- as its arguments. The function definition is like:
- func(value, *args, **kwargs) compared with the decorated
- function func(*args, **kwargs).
- """
- for i, value in enumerate(args):
- if i < len(args_validators) and args_validators[i]:
- args_validators[i](value, *args, **kwargs)
- for key, value in kwargs.items():
- if kwargs_validators.get(key):
- kwargs_validators[key](value, *args, **kwargs)
-
-
-def input_validates_with_args(
- *args_validators, **kwargs_validators
-):
- """Decorator to validate input."""
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- _input_validates_with_args(
- args_validators, kwargs_validators,
- *args, **kwargs
- )
- return func(*args, **kwargs)
- return wrapper
- return decorator
-
-
-def _output_validates_with_args(
- kwargs_validators, obj, *args, **kwargs
-):
- """Validate output with validators.
-
- Each validator takes the arguments of the decorated function
- as its arguments. The function definition is like:
- func(value, *args, **kwargs) compared with the decorated
- function func(*args, **kwargs).
- """
- if isinstance(obj, list):
- for item in obj:
- _output_validates_with_args(
- kwargs_validators, item, *args, **kwargs
- )
- return
- if isinstance(obj, models.HelperMixin):
- obj = obj.to_dict()
- if not isinstance(obj, dict):
- raise exception.InvalidResponse(
- 'response %s type is not dict' % str(obj)
- )
- try:
- for key, value in obj.items():
- if key in kwargs_validators:
- kwargs_validators[key](value, *args, **kwargs)
- except Exception as error:
- logging.exception(error)
- raise error
-
-
-def output_validates_with_args(**kwargs_validators):
- """Decorator to validate output.
-
- The validator can take the arguments of the decorated
- function as its arguments.
- """
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- obj = func(*args, **kwargs)
- if isinstance(obj, list):
- for obj_item in obj:
- _output_validates_with_args(
- kwargs_validators, obj_item,
- *args, **kwargs
- )
- else:
- _output_validates_with_args(
- kwargs_validators, obj,
- *args, **kwargs
- )
- return obj
- return wrapper
- return decorator
-
-
-def _output_validates(kwargs_validators, obj):
- """Validate output.
-
- Each validator has following signature:
- func(value)
- """
- if isinstance(obj, list):
- for item in obj:
- _output_validates(kwargs_validators, item)
- return
- if isinstance(obj, models.HelperMixin):
- obj = obj.to_dict()
- if not isinstance(obj, dict):
- raise exception.InvalidResponse(
- 'response %s type is not dict' % str(obj)
- )
- try:
- for key, value in obj.items():
- if key in kwargs_validators:
- kwargs_validators[key](value)
- except Exception as error:
- logging.exception(error)
- raise error
-
-
-def output_validates(**kwargs_validators):
- """Decorator to validate output."""
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- obj = func(*args, **kwargs)
- if isinstance(obj, list):
- for obj_item in obj:
- _output_validates(kwargs_validators, obj_item)
- else:
- _output_validates(kwargs_validators, obj)
- return obj
- return wrapper
- return decorator
-
-
-def get_db_object(session, table, exception_when_missing=True, **kwargs):
- """Get db object.
-
- If not exception_when_missing and the db object can not be found,
- return None instead of raising exception.
- """
- if not session:
- raise exception.DatabaseException('session param is None')
- with session.begin(subtransactions=True):
- logging.debug(
- 'session %s get db object %s from table %s',
- id(session), kwargs, table.__name__)
- db_object = model_filter(
- model_query(session, table), table, **kwargs
- ).first()
- logging.debug(
- 'session %s got db object %s', id(session), db_object
- )
- if db_object:
- return db_object
-
- if not exception_when_missing:
- return None
-
- raise exception.RecordNotExists(
- 'Cannot find the record in table %s: %s' % (
- table.__name__, kwargs
- )
- )
-
-
-def add_db_object(session, table, exception_when_existing=True,
- *args, **kwargs):
- """Create db object.
-
- If not exception_when_existing and the db object exists,
- Instead of raising exception, updating the existing db object.
- """
- if not session:
- raise exception.DatabaseException('session param is None')
- with session.begin(subtransactions=True):
- logging.debug(
- 'session %s add object %s atributes %s to table %s',
- id(session), args, kwargs, table.__name__)
- argspec = inspect.getargspec(table.__init__)
- arg_names = argspec.args[1:]
- arg_defaults = argspec.defaults
- if not arg_defaults:
- arg_defaults = []
- if not (
- len(arg_names) - len(arg_defaults) <= len(args) <= len(arg_names)
- ):
- raise exception.InvalidParameter(
- 'arg names %s does not match arg values %s' % (
- arg_names, args)
- )
- db_keys = dict(zip(arg_names, args))
- if db_keys:
- db_object = session.query(table).filter_by(**db_keys).first()
- else:
- db_object = None
-
- new_object = False
- if db_object:
- logging.debug(
- 'got db object %s: %s', db_keys, db_object
- )
- if exception_when_existing:
- raise exception.DuplicatedRecord(
- '%s exists in table %s' % (db_keys, table.__name__)
- )
- else:
- db_object = table(**db_keys)
- new_object = True
-
- for key, value in kwargs.items():
- setattr(db_object, key, value)
-
- if new_object:
- session.add(db_object)
- session.flush()
- db_object.initialize()
- db_object.validate()
- logging.debug(
- 'session %s db object %s added', id(session), db_object
- )
- return db_object
-
-
-def list_db_objects(session, table, order_by=[], **filters):
- """List db objects.
-
- If order by given, the db objects should be sorted by the ordered keys.
- """
- if not session:
- raise exception.DatabaseException('session param is None')
- with session.begin(subtransactions=True):
- logging.debug(
- 'session %s list db objects by filters %s in table %s',
- id(session), filters, table.__name__
- )
- db_objects = model_order_by(
- model_filter(
- model_query(session, table),
- table,
- **filters
- ),
- table,
- order_by
- ).all()
- logging.debug(
- 'session %s got listed db objects: %s',
- id(session), db_objects
- )
- return db_objects
-
-
-def del_db_objects(session, table, **filters):
- """delete db objects."""
- if not session:
- raise exception.DatabaseException('session param is None')
- with session.begin(subtransactions=True):
- logging.debug(
- 'session %s delete db objects by filters %s in table %s',
- id(session), filters, table.__name__
- )
- query = model_filter(
- model_query(session, table), table, **filters
- )
- db_objects = query.all()
- query.delete(synchronize_session=False)
- logging.debug(
- 'session %s db objects %s deleted', id(session), db_objects
- )
- return db_objects
-
-
-def update_db_objects(session, table, updates={}, **filters):
- """Update db objects."""
- if not session:
- raise exception.DatabaseException('session param is None')
- with session.begin(subtransactions=True):
- logging.debug(
- 'session %s update db objects by filters %s in table %s',
- id(session), filters, table.__name__)
- db_objects = model_filter(
- model_query(session, table), table, **filters
- ).all()
- for db_object in db_objects:
- logging.debug('update db object %s: %s', db_object, updates)
- update_db_object(session, db_object, **updates)
- logging.debug(
- 'session %s db objects %s updated',
- id(session), db_objects
- )
- return db_objects
-
-
-def update_db_object(session, db_object, **kwargs):
- """Update db object."""
- if not session:
- raise exception.DatabaseException('session param is None')
- with session.begin(subtransactions=True):
- logging.debug(
- 'session %s update db object %s by value %s',
- id(session), db_object, kwargs
- )
- for key, value in kwargs.items():
- setattr(db_object, key, value)
- session.flush()
- db_object.update()
- db_object.validate()
- logging.debug(
- 'session %s db object %s updated',
- id(session), db_object
- )
- return db_object
-
-
-def del_db_object(session, db_object):
- """Delete db object."""
- if not session:
- raise exception.DatabaseException('session param is None')
- with session.begin(subtransactions=True):
- logging.debug(
- 'session %s delete db object %s',
- id(session), db_object
- )
- session.delete(db_object)
- logging.debug(
- 'session %s db object %s deleted',
- id(session), db_object
- )
- return db_object
-
-
-def check_ip(ip):
- """Check ip is ip address formatted."""
- try:
- netaddr.IPAddress(ip)
- except Exception as error:
- logging.exception(error)
- raise exception.InvalidParameter(
- 'ip address %s format uncorrect' % ip
- )
-
-
-def check_mac(mac):
- """Check mac is mac address formatted."""
- try:
- netaddr.EUI(mac)
- except Exception as error:
- logging.exception(error)
- raise exception.InvalidParameter(
- 'invalid mac address %s' % mac
- )
-
-
-NAME_PATTERN = re.compile(r'[a-zA-Z0-9][a-zA-Z0-9_-]*')
-
-
-def check_name(name):
- """Check name meeting name format requirement."""
- if not NAME_PATTERN.match(name):
- raise exception.InvalidParameter(
- 'name %s does not match the pattern %s' % (
- name, NAME_PATTERN.pattern
- )
- )
-
-
-def _check_ipmi_credentials_ip(ip):
- check_ip(ip)
-
-
-def check_ipmi_credentials(ipmi_credentials):
- """Check ipmi credentials format is correct."""
- if not ipmi_credentials:
- return
- if not isinstance(ipmi_credentials, dict):
- raise exception.InvalidParameter(
- 'invalid ipmi credentials %s' % ipmi_credentials
-
- )
- for key in ipmi_credentials:
- if key not in ['ip', 'username', 'password']:
- raise exception.InvalidParameter(
- 'unrecognized field %s in ipmi credentials %s' % (
- key, ipmi_credentials
- )
- )
- for key in ['ip', 'username', 'password']:
- if key not in ipmi_credentials:
- raise exception.InvalidParameter(
- 'no field %s in ipmi credentials %s' % (
- key, ipmi_credentials
- )
- )
- check_ipmi_credential_field = '_check_ipmi_credentials_%s' % key
- this_module = globals()
- if check_ipmi_credential_field in this_module:
- this_module[check_ipmi_credential_field](
- ipmi_credentials[key]
- )
- else:
- logging.debug(
- 'function %s is not defined', check_ipmi_credential_field
- )
-
-
-def _check_switch_credentials_version(version):
- if version not in ['1', '2c', '3']:
- raise exception.InvalidParameter(
- 'unknown snmp version %s' % version
- )
-
-
-def check_switch_credentials(credentials):
- """Check switch credentials format is correct."""
- if not credentials:
- return
- if not isinstance(credentials, dict):
- raise exception.InvalidParameter(
- 'credentials %s is not dict' % credentials
- )
- for key in credentials:
- if key not in ['version', 'community']:
- raise exception.InvalidParameter(
- 'unrecognized key %s in credentials %s' % (key, credentials)
- )
- for key in ['version', 'community']:
- if key not in credentials:
- raise exception.InvalidParameter(
- 'there is no %s field in credentials %s' % (key, credentials)
- )
-
- key_check_func_name = '_check_switch_credentials_%s' % key
- this_module = globals()
- if key_check_func_name in this_module:
- this_module[key_check_func_name](
- credentials[key]
- )
- else:
- logging.debug(
- 'function %s is not defined',
- key_check_func_name
- )
diff --git a/compass-tasks/db/callback.py b/compass-tasks/db/callback.py
deleted file mode 100644
index 35798bc..0000000
--- a/compass-tasks/db/callback.py
+++ /dev/null
@@ -1,204 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Metadata Callback methods."""
-import logging
-import netaddr
-import random
-import re
-import socket
-
-from compass.db import exception
-from compass.utils import setting_wrapper as setting
-from compass.utils import util
-
-
-CALLBACK_GLOBALS = globals()
-CALLBACK_LOCALS = locals()
-CALLBACK_CONFIGS = util.load_configs(
- setting.CALLBACK_DIR,
- config_name_suffix='.py',
- env_globals=CALLBACK_GLOBALS,
- env_locals=CALLBACK_LOCALS
-)
-for callback_config in CALLBACK_CONFIGS:
- CALLBACK_LOCALS.update(callback_config)
-
-
-def default_proxy(name, **kwargs):
- return setting.COMPASS_SUPPORTED_PROXY
-
-
-def proxy_options(name, **kwargs):
- return [setting.COMPASS_SUPPORTED_PROXY]
-
-
-def default_noproxy(name, **kwargs):
- return setting.COMPASS_SUPPORTED_DEFAULT_NOPROXY
-
-
-def noproxy_options(name, **kwargs):
- return setting.COMPASS_SUPPORTED_DEFAULT_NOPROXY
-
-
-def default_ntp_server(name, **kwargs):
- return setting.COMPASS_SUPPORTED_NTP_SERVER
-
-
-def ntp_server_options(name, **kwargs):
- return setting.COMPASS_SUPPORTED_NTP_SERVER
-
-
-def default_dns_servers(name, **kwargs):
- return setting.COMPASS_SUPPORTED_DNS_SERVERS
-
-
-def dns_servers_options(name, **kwargs):
- return setting.COMPASS_SUPPORTED_DNS_SERVERS
-
-
-def default_domain(name, **kwargs):
- if setting.COMPASS_SUPPORTED_DOMAINS:
- return setting.COMPASS_SUPPORTED_DOMAINS[0]
- else:
- return None
-
-
-def domain_options(name, **kwargs):
- return setting.COMPASS_SUPPORTED_DOMAINS
-
-
-def default_search_path(name, **kwargs):
- return setting.COMPASS_SUPPORTED_DOMAINS
-
-
-def search_path_options(name, **kwargs):
- return setting.COMPASS_SUPPORTED_DOMAINS
-
-
-def default_gateway(name, **kwargs):
- return setting.COMPASS_SUPPORTED_DEFAULT_GATEWAY
-
-
-def default_gateway_options(name, **kwargs):
- return [setting.COMPASS_SUPPORTED_DEFAULT_GATEWAY]
-
-
-def default_localrepo(name, **kwargs):
- return setting.COMPASS_SUPPORTED_LOCAL_REPO
-
-
-def default_localrepo_options(name, **kwargs):
- return [setting.COMPASS_SUPPORTED_LOCAL_REPO]
-
-
-def autofill_callback_default(name, config, **kwargs):
- if config is None:
- if (
- 'autofill_types' not in kwargs or
- not (set(kwargs['autofill_types']) & set(kwargs))
- ):
- return None
- if 'default_value' not in kwargs:
- return None
- return kwargs['default_value']
- return config
-
-
-def autofill_callback_random_option(name, config, **kwargs):
- if config is None:
- if (
- 'autofill_types' not in kwargs or
- not (set(kwargs['autofill_types']) & set(kwargs))
- ):
- return None
- if 'options' not in kwargs or not kwargs['options']:
- return None
- return random.choice(kwargs['options'])
- return config
-
-
-def autofill_no_proxy(name, config, **kwargs):
- logging.debug(
- 'autofill %s config %s by params %s',
- name, config, kwargs
- )
- if 'cluster' in kwargs:
- if config is None:
- config = []
- if 'default_value' in kwargs:
- for default_no_proxy in kwargs['default_value']:
- if default_no_proxy and default_no_proxy not in config:
- config.append(default_no_proxy)
- cluster = kwargs['cluster']
- for clusterhost in cluster.clusterhosts:
- host = clusterhost.host
- hostname = host.name
- if hostname not in config:
- config.append(hostname)
- for host_network in host.host_networks:
- if host_network.is_mgmt:
- ip = host_network.ip
- if ip not in config:
- config.append(ip)
- if not config:
- return config
- return [no_proxy for no_proxy in config if no_proxy]
-
-
-def autofill_network_mapping(name, config, **kwargs):
- logging.debug(
- 'autofill %s config %s by params %s',
- name, config, kwargs
- )
- if not config:
- return config
- if isinstance(config, basestring):
- config = {
- 'interface': config,
- 'subnet': None
- }
- if not isinstance(config, dict):
- return config
- if 'interface' not in config:
- return config
- subnet = None
- interface = config['interface']
- if 'cluster' in kwargs:
- cluster = kwargs['cluster']
- for clusterhost in cluster.clusterhosts:
- host = clusterhost.host
- for host_network in host.host_networks:
- if host_network.interface == interface:
- subnet = host_network.subnet.subnet
- elif 'clusterhost' in kwargs:
- clusterhost = kwargs['clusterhost']
- host = clusterhost.host
- for host_network in host.host_networks:
- if host_network.interface == interface:
- subnet = host_network.subnet.subnet
- if not subnet:
- raise exception.InvalidParameter(
- 'interface %s not found in host(s)' % interface
- )
- if 'subnet' not in config or not config['subnet']:
- config['subnet'] = subnet
- else:
- if config['subnet'] != subnet:
- raise exception.InvalidParameter(
- 'subnet %s in config is not equal to subnet %s in hosts' % (
- config['subnet'], subnet
- )
- )
- return config
diff --git a/compass-tasks/db/config_validation/__init__.py b/compass-tasks/db/config_validation/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/compass-tasks/db/config_validation/__init__.py
+++ /dev/null
diff --git a/compass-tasks/db/config_validation/default_validator.py b/compass-tasks/db/config_validation/default_validator.py
deleted file mode 100644
index 224447f..0000000
--- a/compass-tasks/db/config_validation/default_validator.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Default config validation function."""
-
-from sqlalchemy import or_
-
-from compass.db.models import OSConfigField
-from compass.db.models import OSConfigMetadata
-from compass.db import validator
-
-MAPPER = {
- "os_id": {
- "metaTable": OSConfigMetadata,
- "metaFieldTable": OSConfigField
- }
- # "adapter_id": {
- # "metaTable": AdapterConfigMetadata,
- # "metaFieldTable": AdapterConfigField
- # }
-}
-
-
-def validate_config(session, config, id_name, id_value, patch=True):
- """Validates config.
-
- Validates the given config value according to the config
- metadata of the asscoiated os_id or adapter_id. Returns
- a tuple (status, message).
- """
- if id_name not in MAPPER.keys():
- return (False, "Invalid id type %s" % id_name)
-
- meta_table = MAPPER[id_name]['metaTable']
- metafield_table = MAPPER[id_name]['metaFieldTable']
- with session.begin(subtransactions=True):
- name_col = name_col = getattr(meta_table, 'name')
- id_col = getattr(meta_table, id_name)
-
- return _validate_config_helper(session, config,
- name_col, id_col, id_value,
- meta_table, metafield_table,
- patch)
-
-
-def _validate_config_helper(session, config,
- name_col, id_col, id_value,
- meta_table, metafield_table, patch=True):
-
- with session.begin(subtransactions=True):
- for elem in config:
-
- obj = session.query(meta_table).filter(name_col == elem)\
- .filter(or_(id_col is None,
- id_col == id_value)).first()
-
- if not obj and "_type" not in config[elem]:
- return (False, "Invalid metadata '%s'!" % elem)
-
- if "_type" in config[elem]:
- # Metadata is a variable
- metadata_name = config[elem]['_type']
- obj = session.query(meta_table).filter_by(name=metadata_name)\
- .first()
-
- if not obj:
- err_msg = ("Invalid metatdata '%s' or missing '_type'"
- "to indicate this is a variable metatdata."
- % elem)
- return (False, err_msg)
-
- # TODO(Grace): validate metadata here
- del config[elem]['_type']
-
- fields = obj.fields
-
- if not fields:
- is_valid, message = _validate_config_helper(session,
- config[elem],
- name_col, id_col,
- id_value,
- meta_table,
- metafield_table,
- patch)
- if not is_valid:
- return (False, message)
-
- else:
- field_config = config[elem]
- for key in field_config:
- field = session.query(metafield_table)\
- .filter_by(field=key).first()
- if not field:
- # The field is not in schema
- return (False, "Invalid field '%s'!" % key)
-
- value = field_config[key]
- if field.is_required and value is None:
- # The value of this field is required
- # and cannot be none
- err = "The value of field '%s' cannot be null!" % key
- return (False, err)
-
- if field.validator:
- func = getattr(validator, field.validator)
- if not func or not func(value):
- err_msg = ("The value of the field '%s' is "
- "invalid format or None!" % key)
- return (False, err_msg)
-
- # This is a PUT request. We need to check presence of all
- # required fields.
- if not patch:
- for field in fields:
- name = field.field
- if field.is_required and name not in field_config:
- return (False,
- "Missing required field '%s'" % name)
-
- return (True, None)
diff --git a/compass-tasks/db/config_validation/extension/__init__.py b/compass-tasks/db/config_validation/extension/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/compass-tasks/db/config_validation/extension/__init__.py
+++ /dev/null
diff --git a/compass-tasks/db/config_validation/extension/openstack.py b/compass-tasks/db/config_validation/extension/openstack.py
deleted file mode 100644
index 6b3af69..0000000
--- a/compass-tasks/db/config_validation/extension/openstack.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-def validate_cluster_config():
- # TODO(xiaodong): Add openstack specific validation here.
- pass
diff --git a/compass-tasks/db/exception.py b/compass-tasks/db/exception.py
deleted file mode 100644
index 44556c9..0000000
--- a/compass-tasks/db/exception.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Custom exception"""
-import traceback
-
-
-class DatabaseException(Exception):
- """Base class for all database exceptions."""
- def __init__(self, message):
- super(DatabaseException, self).__init__(message)
- self.traceback = traceback.format_exc()
- self.status_code = 400
-
- def to_dict(self):
- return {'message': str(self)}
-
-
-class RecordNotExists(DatabaseException):
- """Define the exception for referring non-existing object in DB."""
- def __init__(self, message):
- super(RecordNotExists, self).__init__(message)
- self.status_code = 404
-
-
-class DuplicatedRecord(DatabaseException):
- """Define the exception for trying to insert an existing object in DB."""
- def __init__(self, message):
- super(DuplicatedRecord, self).__init__(message)
- self.status_code = 409
-
-
-class Unauthorized(DatabaseException):
- """Define the exception for invalid user login."""
- def __init__(self, message):
- super(Unauthorized, self).__init__(message)
- self.status_code = 401
-
-
-class UserDisabled(DatabaseException):
- """Define the exception that a disabled user tries to do some operations.
-
- """
- def __init__(self, message):
- super(UserDisabled, self).__init__(message)
- self.status_code = 403
-
-
-class Forbidden(DatabaseException):
- """Define the exception that a user is trying to make some action
-
- without the right permission.
-
- """
- def __init__(self, message):
- super(Forbidden, self).__init__(message)
- self.status_code = 403
-
-
-class NotAcceptable(DatabaseException):
- """The data is not acceptable."""
- def __init__(self, message):
- super(NotAcceptable, self).__init__(message)
- self.status_code = 406
-
-
-class InvalidParameter(DatabaseException):
- """Define the exception that the request has invalid or missing parameters.
-
- """
- def __init__(self, message):
- super(InvalidParameter, self).__init__(message)
- self.status_code = 400
-
-
-class InvalidResponse(DatabaseException):
- """Define the exception that the response is invalid.
-
- """
- def __init__(self, message):
- super(InvalidResponse, self).__init__(message)
- self.status_code = 400
-
-
-class MultiDatabaseException(DatabaseException):
- """Define the exception composites with multi exceptions."""
- def __init__(self, exceptions):
- super(MultiDatabaseException, self).__init__('multi exceptions')
- self.exceptions = exceptions
- self.status_code = 400
-
- @property
- def traceback(self):
- tracebacks = []
- for exception in self.exceptions:
- tracebacks.append(exception.trackback)
-
- def to_dict(self):
- dict_info = super(MultiDatabaseException, self).to_dict()
- dict_info.update({
- 'exceptions': [
- exception.to_dict() for exception in self.exceptions
- ]
- })
- return dict_info
diff --git a/compass-tasks/db/models.py b/compass-tasks/db/models.py
deleted file mode 100644
index d4b0324..0000000
--- a/compass-tasks/db/models.py
+++ /dev/null
@@ -1,1924 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Database model"""
-import copy
-import datetime
-import logging
-import netaddr
-import re
-import simplejson as json
-
-from sqlalchemy import BigInteger
-from sqlalchemy import Boolean
-from sqlalchemy import Column
-from sqlalchemy import ColumnDefault
-from sqlalchemy import DateTime
-from sqlalchemy import Enum
-from sqlalchemy.ext.declarative import declarative_base
-from sqlalchemy.ext.hybrid import hybrid_property
-from sqlalchemy import Float
-from sqlalchemy import ForeignKey
-from sqlalchemy import Integer
-from sqlalchemy.orm import relationship, backref
-from sqlalchemy import String
-from sqlalchemy import Table
-from sqlalchemy import Text
-from sqlalchemy.types import TypeDecorator
-from sqlalchemy import UniqueConstraint
-
-from compass.db import exception
-from compass.utils import util
-
-
-BASE = declarative_base()
-
-
-class JSONEncoded(TypeDecorator):
- """Represents an immutable structure as a json-encoded string."""
-
- impl = Text
-
- def process_bind_param(self, value, dialect):
- if value is not None:
- value = json.dumps(value)
- return value
-
- def process_result_value(self, value, dialect):
- if value is not None:
- value = json.loads(value)
- return value
-
-
-class TimestampMixin(object):
- """Provides table fields for each row created/updated timestamp."""
- created_at = Column(DateTime, default=lambda: datetime.datetime.now())
- updated_at = Column(DateTime, default=lambda: datetime.datetime.now(),
- onupdate=lambda: datetime.datetime.now())
-
-
-class HelperMixin(object):
- """Provides general fuctions for all compass table models."""
-
- def initialize(self):
- self.update()
-
- def update(self):
- pass
-
- @staticmethod
- def type_compatible(value, column_type):
- """Check if value type is compatible with the column type."""
- if value is None:
- return True
- if not hasattr(column_type, 'python_type'):
- return True
- column_python_type = column_type.python_type
- if isinstance(value, column_python_type):
- return True
- if issubclass(column_python_type, basestring):
- return isinstance(value, basestring)
- if column_python_type in [int, long]:
- return type(value) in [int, long]
- if column_python_type in [float]:
- return type(value) in [float]
- if column_python_type in [bool]:
- return type(value) in [bool]
- return False
-
- def validate(self):
- """Generate validate function to make sure the record is legal."""
- columns = self.__mapper__.columns
- for key, column in columns.items():
- value = getattr(self, key)
- if not self.type_compatible(value, column.type):
- raise exception.InvalidParameter(
- 'column %s value %r type is unexpected: %s' % (
- key, value, column.type
- )
- )
-
- def to_dict(self):
- """General function to convert record to dict.
-
- Convert all columns not starting with '_' to
- {<column_name>: <column_value>}
- """
- keys = self.__mapper__.columns.keys()
- dict_info = {}
- for key in keys:
- if key.startswith('_'):
- continue
- value = getattr(self, key)
- if value is not None:
- if isinstance(value, datetime.datetime):
- value = util.format_datetime(value)
- dict_info[key] = value
- return dict_info
-
-
-class StateMixin(TimestampMixin, HelperMixin):
- """Provides general fields and functions for state related table."""
-
- state = Column(
- Enum(
- 'UNINITIALIZED', 'INITIALIZED', 'UPDATE_PREPARING',
- 'INSTALLING', 'SUCCESSFUL', 'ERROR'
- ),
- ColumnDefault('UNINITIALIZED')
- )
- percentage = Column(Float, default=0.0)
- message = Column(Text, default='')
- severity = Column(
- Enum('INFO', 'WARNING', 'ERROR'),
- ColumnDefault('INFO')
- )
- ready = Column(Boolean, default=False)
-
- def update(self):
- # In state table, some field information is redundant.
- # The update function to make sure all related fields
- # are set to correct state.
- if self.ready:
- self.state = 'SUCCESSFUL'
- if self.state in ['UNINITIALIZED', 'INITIALIZED']:
- self.percentage = 0.0
- self.severity = 'INFO'
- self.message = ''
- if self.state == 'INSTALLING':
- if self.severity == 'ERROR':
- self.state = 'ERROR'
- elif self.percentage >= 1.0:
- self.state = 'SUCCESSFUL'
- self.percentage = 1.0
- if self.state == 'SUCCESSFUL':
- self.percentage = 1.0
- super(StateMixin, self).update()
-
-
-class LogHistoryMixin(TimestampMixin, HelperMixin):
- """Provides general fields and functions for LogHistory related tables."""
- position = Column(Integer, default=0)
- partial_line = Column(Text, default='')
- percentage = Column(Float, default=0.0)
- message = Column(Text, default='')
- severity = Column(
- Enum('ERROR', 'WARNING', 'INFO'),
- ColumnDefault('INFO')
- )
- line_matcher_name = Column(
- String(80), default='start'
- )
-
- def validate(self):
- # TODO(xicheng): some validation can be moved to column.
- if not self.filename:
- raise exception.InvalidParameter(
- 'filename is not set in %s' % self.id
- )
-
-
-class HostNetwork(BASE, TimestampMixin, HelperMixin):
- """Host network table."""
- __tablename__ = 'host_network'
-
- id = Column(Integer, primary_key=True)
- host_id = Column(
- Integer,
- ForeignKey('host.id', onupdate='CASCADE', ondelete='CASCADE')
- )
- interface = Column(
- String(80), nullable=False)
- subnet_id = Column(
- Integer,
- ForeignKey('subnet.id', onupdate='CASCADE', ondelete='CASCADE')
- )
- user_id = Column(Integer, ForeignKey('user.id'))
- ip_int = Column(BigInteger, nullable=False)
- is_mgmt = Column(Boolean, default=False)
- is_promiscuous = Column(Boolean, default=False)
-
- __table_args__ = (
- UniqueConstraint('host_id', 'interface', name='interface_constraint'),
- UniqueConstraint('ip_int', 'user_id', name='ip_constraint')
- )
-
- def __init__(self, host_id, interface, user_id, **kwargs):
- self.host_id = host_id
- self.interface = interface
- self.user_id = user_id
- super(HostNetwork, self).__init__(**kwargs)
-
- def __str__(self):
- return 'HostNetwork[%s=%s]' % (self.interface, self.ip)
-
- @property
- def ip(self):
- return str(netaddr.IPAddress(self.ip_int))
-
- @ip.setter
- def ip(self, value):
- self.ip_int = int(netaddr.IPAddress(value))
-
- @property
- def netmask(self):
- return str(netaddr.IPNetwork(self.subnet.subnet).netmask)
-
- def update(self):
- self.host.config_validated = False
-
- def validate(self):
- # TODO(xicheng): some validation can be moved to column.
- super(HostNetwork, self).validate()
- if not self.subnet:
- raise exception.InvalidParameter(
- 'subnet is not set in %s interface %s' % (
- self.host_id, self.interface
- )
- )
- if not self.ip_int:
- raise exception.InvalidParameter(
- 'ip is not set in %s interface %s' % (
- self.host_id, self.interface
- )
- )
- ip = netaddr.IPAddress(self.ip_int)
- subnet = netaddr.IPNetwork(self.subnet.subnet)
- if ip not in subnet:
- raise exception.InvalidParameter(
- 'ip %s is not in subnet %s' % (
- str(ip), str(subnet)
- )
- )
-
- def to_dict(self):
- dict_info = super(HostNetwork, self).to_dict()
- dict_info['ip'] = self.ip
- dict_info['interface'] = self.interface
- dict_info['netmask'] = self.netmask
- dict_info['subnet'] = self.subnet.subnet
- dict_info['user_id'] = self.user_id
- return dict_info
-
-
-class ClusterHostLogHistory(BASE, LogHistoryMixin):
- """clusterhost installing log history for each file.
-
- """
- __tablename__ = 'clusterhost_log_history'
-
- clusterhost_id = Column(
- 'id', Integer,
- ForeignKey('clusterhost.id', onupdate='CASCADE', ondelete='CASCADE'),
- primary_key=True
- )
- filename = Column(String(80), primary_key=True, nullable=False)
- cluster_id = Column(
- Integer,
- ForeignKey('cluster.id')
- )
- host_id = Column(
- Integer,
- ForeignKey('host.id')
- )
-
- def __init__(self, clusterhost_id, filename, **kwargs):
- self.clusterhost_id = clusterhost_id
- self.filename = filename
- super(ClusterHostLogHistory, self).__init__(**kwargs)
-
- def __str__(self):
- return 'ClusterHostLogHistory[%s:%s]' % (
- self.clusterhost_id, self.filename
- )
-
- def initialize(self):
- self.cluster_id = self.clusterhost.cluster_id
- self.host_id = self.clusterhost.host_id
- super(ClusterHostLogHistory, self).initialize()
-
-
-class HostLogHistory(BASE, LogHistoryMixin):
- """host installing log history for each file.
-
- """
- __tablename__ = 'host_log_history'
-
- id = Column(
- Integer,
- ForeignKey('host.id', onupdate='CASCADE', ondelete='CASCADE'),
- primary_key=True)
- filename = Column(String(80), primary_key=True, nullable=False)
-
- def __init__(self, id, filename, **kwargs):
- self.id = id
- self.filename = filename
- super(HostLogHistory, self).__init__(**kwargs)
-
- def __str__(self):
- return 'HostLogHistory[%s:%s]' % (self.id, self.filename)
-
-
-class ClusterHostState(BASE, StateMixin):
- """ClusterHost state table."""
- __tablename__ = 'clusterhost_state'
-
- id = Column(
- Integer,
- ForeignKey(
- 'clusterhost.id',
- onupdate='CASCADE', ondelete='CASCADE'
- ),
- primary_key=True
- )
-
- def __str__(self):
- return 'ClusterHostState[%s state %s percentage %s]' % (
- self.id, self.state, self.percentage
- )
-
- def update(self):
- """Update clusterhost state.
-
- When clusterhost state is updated, the underlying host state
- may be updated accordingly.
- """
- super(ClusterHostState, self).update()
- host_state = self.clusterhost.host.state
- if self.state == 'INITIALIZED':
- if host_state.state in ['UNINITIALIZED', 'UPDATE_PREPARING']:
- host_state.state = 'INITIALIZED'
- host_state.update()
- elif self.state == 'INSTALLING':
- if host_state.state in [
- 'UNINITIALIZED', 'UPDATE_PREPARING', 'INITIALIZED'
- ]:
- host_state.state = 'INSTALLING'
- host_state.update()
- elif self.state == 'SUCCESSFUL':
- if host_state.state != 'SUCCESSFUL':
- host_state.state = 'SUCCESSFUL'
- host_state.update()
-
-
-class ClusterHost(BASE, TimestampMixin, HelperMixin):
- """ClusterHost table."""
- __tablename__ = 'clusterhost'
-
- clusterhost_id = Column('id', Integer, primary_key=True)
- cluster_id = Column(
- Integer,
- ForeignKey('cluster.id', onupdate='CASCADE', ondelete='CASCADE')
- )
- host_id = Column(
- Integer,
- ForeignKey('host.id', onupdate='CASCADE', ondelete='CASCADE')
- )
- # the list of role names.
- _roles = Column('roles', JSONEncoded, default=[])
- _patched_roles = Column('patched_roles', JSONEncoded, default=[])
- config_step = Column(String(80), default='')
- package_config = Column(JSONEncoded, default={})
- config_validated = Column(Boolean, default=False)
- deployed_package_config = Column(JSONEncoded, default={})
-
- log_histories = relationship(
- ClusterHostLogHistory,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('clusterhost')
- )
-
- __table_args__ = (
- UniqueConstraint('cluster_id', 'host_id', name='constraint'),
- )
-
- state = relationship(
- ClusterHostState,
- uselist=False,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('clusterhost')
- )
-
- def __init__(self, cluster_id, host_id, **kwargs):
- self.cluster_id = cluster_id
- self.host_id = host_id
- self.state = ClusterHostState()
- super(ClusterHost, self).__init__(**kwargs)
-
- def __str__(self):
- return 'ClusterHost[%s:%s]' % (self.clusterhost_id, self.name)
-
- def update(self):
- if self.host.reinstall_os:
- if self.state in ['SUCCESSFUL', 'ERROR']:
- if self.config_validated:
- self.state.state = 'INITIALIZED'
- else:
- self.state.state = 'UNINITIALIZED'
- self.cluster.update()
- self.host.update()
- self.state.update()
- super(ClusterHost, self).update()
-
- @property
- def name(self):
- return '%s.%s' % (self.host.name, self.cluster.name)
-
- @property
- def patched_package_config(self):
- return self.package_config
-
- @patched_package_config.setter
- def patched_package_config(self, value):
- package_config = copy.deepcopy(self.package_config)
- self.package_config = util.merge_dict(package_config, value)
- logging.debug(
- 'patch clusterhost %s package_config: %s',
- self.clusterhost_id, value
- )
- self.config_validated = False
-
- @property
- def put_package_config(self):
- return self.package_config
-
- @put_package_config.setter
- def put_package_config(self, value):
- package_config = copy.deepcopy(self.package_config)
- package_config.update(value)
- self.package_config = package_config
- logging.debug(
- 'put clusterhost %s package_config: %s',
- self.clusterhost_id, value
- )
- self.config_validated = False
-
- @property
- def patched_os_config(self):
- return self.host.os_config
-
- @patched_os_config.setter
- def patched_os_config(self, value):
- host = self.host
- host.patched_os_config = value
-
- @property
- def put_os_config(self):
- return self.host.os_config
-
- @put_os_config.setter
- def put_os_config(self, value):
- host = self.host
- host.put_os_config = value
-
- @property
- def deployed_os_config(self):
- return self.host.deployed_os_config
-
- @deployed_os_config.setter
- def deployed_os_config(self, value):
- host = self.host
- host.deployed_os_config = value
-
- @hybrid_property
- def os_name(self):
- return self.host.os_name
-
- @os_name.expression
- def os_name(cls):
- return cls.host.os_name
-
- @hybrid_property
- def clustername(self):
- return self.cluster.name
-
- @clustername.expression
- def clustername(cls):
- return cls.cluster.name
-
- @hybrid_property
- def hostname(self):
- return self.host.hostname
-
- @hostname.expression
- def hostname(cls):
- return Host.hostname
-
- @property
- def distributed_system_installed(self):
- return self.state.state == 'SUCCESSFUL'
-
- @property
- def resintall_os(self):
- return self.host.reinstall_os
-
- @property
- def reinstall_distributed_system(self):
- return self.cluster.reinstall_distributed_system
-
- @property
- def os_installed(self):
- return self.host.os_installed
-
- @property
- def roles(self):
- # only the role exists in flavor roles will be returned.
- # the role will be sorted as the order defined in flavor
- # roles.
- # duplicate role names will be removed.
- # The returned value is a list of dict like
- # [{'name': 'allinone', 'optional': False}]
- role_names = list(self._roles)
- if not role_names:
- return []
- cluster_roles = self.cluster.flavor['roles']
- if not cluster_roles:
- return []
- roles = []
- for cluster_role in cluster_roles:
- if cluster_role['name'] in role_names:
- roles.append(cluster_role)
- return roles
-
- @roles.setter
- def roles(self, value):
- """value should be a list of role name."""
- self._roles = list(value)
- self.config_validated = False
-
- @property
- def patched_roles(self):
- patched_role_names = list(self._patched_roles)
- if not patched_role_names:
- return []
- cluster_roles = self.cluster.flavor['roles']
- if not cluster_roles:
- return []
- roles = []
- for cluster_role in cluster_roles:
- if cluster_role['name'] in patched_role_names:
- roles.append(cluster_role)
- return roles
-
- @patched_roles.setter
- def patched_roles(self, value):
- """value should be a list of role name."""
- # if value is an empty list, we empty the field
- if value:
- roles = list(self._roles)
- roles.extend(value)
- self._roles = roles
- patched_roles = list(self._patched_roles)
- patched_roles.extend(value)
- self._patched_roles = patched_roles
- self.config_validated = False
- else:
- self._patched_roles = list(value)
- self.config_validated = False
-
- @hybrid_property
- def owner(self):
- return self.cluster.owner
-
- @owner.expression
- def owner(cls):
- return cls.cluster.owner
-
- def state_dict(self):
- """Get clusterhost state dict.
-
- The clusterhost state_dict is different from
- clusterhost.state.to_dict. The main difference is state_dict
- show the progress of both installing os on host and installing
- distributed system on clusterhost. While clusterhost.state.to_dict
- only shows the progress of installing distributed system on
- clusterhost.
- """
- cluster = self.cluster
- host = self.host
- host_state = host.state_dict()
- if not cluster.flavor_name:
- return host_state
- clusterhost_state = self.state.to_dict()
- if clusterhost_state['state'] in ['ERROR', 'SUCCESSFUL']:
- return clusterhost_state
- if (
- clusterhost_state['state'] in 'INSTALLING' and
- clusterhost_state['percentage'] > 0
- ):
- clusterhost_state['percentage'] = min(
- 1.0, (
- 0.5 + clusterhost_state['percentage'] / 2
- )
- )
- return clusterhost_state
-
- host_state['percentage'] = host_state['percentage'] / 2
- if host_state['state'] == 'SUCCESSFUL':
- host_state['state'] = 'INSTALLING'
- return host_state
-
- def to_dict(self):
- dict_info = self.host.to_dict()
- dict_info.update(super(ClusterHost, self).to_dict())
- state_dict = self.state_dict()
- dict_info.update({
- 'distributed_system_installed': self.distributed_system_installed,
- 'reinstall_distributed_system': self.reinstall_distributed_system,
- 'owner': self.owner,
- 'clustername': self.clustername,
- 'name': self.name,
- 'state': state_dict['state']
- })
- dict_info['roles'] = self.roles
- dict_info['patched_roles'] = self.patched_roles
- return dict_info
-
-
-class HostState(BASE, StateMixin):
- """Host state table."""
- __tablename__ = 'host_state'
-
- id = Column(
- Integer,
- ForeignKey('host.id', onupdate='CASCADE', ondelete='CASCADE'),
- primary_key=True
- )
-
- def __str__(self):
- return 'HostState[%s state %s percentage %s]' % (
- self.id, self.state, self.percentage
- )
-
- def update(self):
- """Update host state.
-
- When host state is updated, all clusterhosts on the
- host will update their state if necessary.
- """
- super(HostState, self).update()
- host = self.host
- if self.state == 'INSTALLING':
- host.reinstall_os = False
- for clusterhost in self.host.clusterhosts:
- if clusterhost.state in [
- 'SUCCESSFUL', 'ERROR'
- ]:
- clusterhost.state = 'INSTALLING'
- clusterhost.state.update()
- elif self.state == 'UNINITIALIZED':
- for clusterhost in self.host.clusterhosts:
- if clusterhost.state in [
- 'INITIALIZED', 'INSTALLING', 'SUCCESSFUL', 'ERROR'
- ]:
- clusterhost.state = 'UNINITIALIZED'
- clusterhost.state.update()
- elif self.state == 'UPDATE_PREPARING':
- for clusterhost in self.host.clusterhosts:
- if clusterhost.state in [
- 'INITIALIZED', 'INSTALLING', 'SUCCESSFUL', 'ERROR'
- ]:
- clusterhost.state = 'UPDATE_PREPARING'
- clusterhost.state.update()
- elif self.state == 'INITIALIZED':
- for clusterhost in self.host.clusterhosts:
- if clusterhost.state in [
- 'INSTALLING', 'SUCCESSFUL', 'ERROR'
- ]:
- clusterhost.state = 'INITIALIZED'
- clusterhost.state.update()
-
-
-class Host(BASE, TimestampMixin, HelperMixin):
- """Host table."""
- __tablename__ = 'host'
-
- name = Column(String(80), nullable=True)
- config_step = Column(String(80), default='')
- os_config = Column(JSONEncoded, default={})
- config_validated = Column(Boolean, default=False)
- deployed_os_config = Column(JSONEncoded, default={})
- os_name = Column(String(80))
- creator_id = Column(Integer, ForeignKey('user.id'))
- owner = Column(String(80))
- os_installer = Column(JSONEncoded, default={})
-
- __table_args__ = (
- UniqueConstraint('name', 'owner', name='constraint'),
- )
-
- id = Column(
- Integer,
- ForeignKey('machine.id', onupdate='CASCADE', ondelete='CASCADE'),
- primary_key=True
- )
- reinstall_os = Column(Boolean, default=True)
-
- host_networks = relationship(
- HostNetwork,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('host')
- )
- clusterhosts = relationship(
- ClusterHost,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('host')
- )
- state = relationship(
- HostState,
- uselist=False,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('host')
- )
- log_histories = relationship(
- HostLogHistory,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('host')
- )
-
- def __str__(self):
- return 'Host[%s:%s]' % (self.id, self.name)
-
- @hybrid_property
- def mac(self):
- machine = self.machine
- if machine:
- return machine.mac
- else:
- return None
-
- @property
- def os_id(self):
- return self.os_name
-
- @os_id.setter
- def os_id(self, value):
- self.os_name = value
-
- @hybrid_property
- def hostname(self):
- return self.name
-
- @hostname.expression
- def hostname(cls):
- return cls.name
-
- @property
- def patched_os_config(self):
- return self.os_config
-
- @patched_os_config.setter
- def patched_os_config(self, value):
- os_config = copy.deepcopy(self.os_config)
- self.os_config = util.merge_dict(os_config, value)
- logging.debug('patch host os config in %s: %s', self.id, value)
- self.config_validated = False
-
- @property
- def put_os_config(self):
- return self.os_config
-
- @put_os_config.setter
- def put_os_config(self, value):
- os_config = copy.deepcopy(self.os_config)
- os_config.update(value)
- self.os_config = os_config
- logging.debug('put host os config in %s: %s', self.id, value)
- self.config_validated = False
-
- def __init__(self, id, **kwargs):
- self.id = id
- self.state = HostState()
- super(Host, self).__init__(**kwargs)
-
- def update(self):
- creator = self.creator
- if creator:
- self.owner = creator.email
- if self.reinstall_os:
- if self.state in ['SUCCESSFUL', 'ERROR']:
- if self.config_validated:
- self.state.state = 'INITIALIZED'
- else:
- self.state.state = 'UNINITIALIZED'
- self.state.update()
- self.state.update()
- super(Host, self).update()
-
- def validate(self):
- # TODO(xicheng): some validation can be moved to the column in future.
- super(Host, self).validate()
- creator = self.creator
- if not creator:
- raise exception.InvalidParameter(
- 'creator is not set in host %s' % self.id
- )
- os_name = self.os_name
- if not os_name:
- raise exception.InvalidParameter(
- 'os is not set in host %s' % self.id
- )
- os_installer = self.os_installer
- if not os_installer:
- raise exception.Invalidparameter(
- 'os_installer is not set in host %s' % self.id
- )
-
- @property
- def os_installed(self):
- return self.state.state == 'SUCCESSFUL'
-
- @property
- def clusters(self):
- return [clusterhost.cluster for clusterhost in self.clusterhosts]
-
- def state_dict(self):
- return self.state.to_dict()
-
- def to_dict(self):
- """Host dict contains its underlying machine dict."""
- dict_info = self.machine.to_dict()
- dict_info.update(super(Host, self).to_dict())
- state_dict = self.state_dict()
- ip = None
- for host_network in self.host_networks:
- if host_network.is_mgmt:
- ip = host_network.ip
- dict_info.update({
- 'machine_id': self.machine.id,
- 'os_installed': self.os_installed,
- 'hostname': self.hostname,
- 'ip': ip,
- 'networks': [
- host_network.to_dict()
- for host_network in self.host_networks
- ],
- 'os_id': self.os_id,
- 'clusters': [cluster.to_dict() for cluster in self.clusters],
- 'state': state_dict['state']
- })
- return dict_info
-
-
-class ClusterState(BASE, StateMixin):
- """Cluster state table."""
- __tablename__ = 'cluster_state'
-
- id = Column(
- Integer,
- ForeignKey('cluster.id', onupdate='CASCADE', ondelete='CASCADE'),
- primary_key=True
- )
- total_hosts = Column(
- Integer,
- default=0
- )
- installing_hosts = Column(
- Integer,
- default=0
- )
- completed_hosts = Column(
- Integer,
- default=0
- )
- failed_hosts = Column(
- Integer,
- default=0
- )
-
- def __init__(self, **kwargs):
- super(ClusterState, self).__init__(**kwargs)
-
- def __str__(self):
- return 'ClusterState[%s state %s percentage %s]' % (
- self.id, self.state, self.percentage
- )
-
- def to_dict(self):
- dict_info = super(ClusterState, self).to_dict()
- dict_info['status'] = {
- 'total_hosts': self.total_hosts,
- 'installing_hosts': self.installing_hosts,
- 'completed_hosts': self.completed_hosts,
- 'failed_hosts': self.failed_hosts
- }
- return dict_info
-
- def update(self):
- # all fields of cluster state should be calculated by
- # its each underlying clusterhost state.
- cluster = self.cluster
- clusterhosts = cluster.clusterhosts
- self.total_hosts = len(clusterhosts)
- self.installing_hosts = 0
- self.failed_hosts = 0
- self.completed_hosts = 0
- if not cluster.flavor_name:
- for clusterhost in clusterhosts:
- host = clusterhost.host
- host_state = host.state.state
- if host_state == 'INSTALLING':
- self.installing_hosts += 1
- elif host_state == 'ERROR':
- self.failed_hosts += 1
- elif host_state == 'SUCCESSFUL':
- self.completed_hosts += 1
- else:
- for clusterhost in clusterhosts:
- clusterhost_state = clusterhost.state.state
- if clusterhost_state == 'INSTALLING':
- self.installing_hosts += 1
- elif clusterhost_state == 'ERROR':
- self.failed_hosts += 1
- elif clusterhost_state == 'SUCCESSFUL':
- self.completed_hosts += 1
- if self.total_hosts:
- if self.completed_hosts == self.total_hosts:
- self.percentage = 1.0
- else:
- self.percentage = (
- float(self.completed_hosts)
- /
- float(self.total_hosts)
- )
- if self.state == 'SUCCESSFUL':
- self.state = 'INSTALLING'
- self.ready = False
- self.message = (
- 'total %s, installing %s, completed: %s, error %s'
- ) % (
- self.total_hosts, self.installing_hosts,
- self.completed_hosts, self.failed_hosts
- )
- if self.failed_hosts:
- self.severity = 'ERROR'
-
- super(ClusterState, self).update()
- if self.state == 'INSTALLING':
- cluster.reinstall_distributed_system = False
-
-
-class Cluster(BASE, TimestampMixin, HelperMixin):
- """Cluster table."""
- __tablename__ = 'cluster'
-
- id = Column(Integer, primary_key=True)
- name = Column(String(80), nullable=False)
- reinstall_distributed_system = Column(Boolean, default=True)
- config_step = Column(String(80), default='')
- os_name = Column(String(80))
- flavor_name = Column(String(80), nullable=True)
- # flavor dict got from flavor id.
- flavor = Column(JSONEncoded, default={})
- os_config = Column(JSONEncoded, default={})
- package_config = Column(JSONEncoded, default={})
- deployed_os_config = Column(JSONEncoded, default={})
- deployed_package_config = Column(JSONEncoded, default={})
- config_validated = Column(Boolean, default=False)
- adapter_name = Column(String(80))
- creator_id = Column(Integer, ForeignKey('user.id'))
- owner = Column(String(80))
- clusterhosts = relationship(
- ClusterHost,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('cluster')
- )
- state = relationship(
- ClusterState,
- uselist=False,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('cluster')
- )
- __table_args__ = (
- UniqueConstraint('name', 'creator_id', name='constraint'),
- )
-
- def __init__(self, name, creator_id, **kwargs):
- self.name = name
- self.creator_id = creator_id
- self.state = ClusterState()
- super(Cluster, self).__init__(**kwargs)
-
- def __str__(self):
- return 'Cluster[%s:%s]' % (self.id, self.name)
-
- def update(self):
- creator = self.creator
- if creator:
- self.owner = creator.email
- if self.reinstall_distributed_system:
- if self.state in ['SUCCESSFUL', 'ERROR']:
- if self.config_validated:
- self.state.state = 'INITIALIZED'
- else:
- self.state.state = 'UNINITIALIZED'
- self.state.update()
- self.state.update()
- super(Cluster, self).update()
-
- def validate(self):
- # TODO(xicheng): some validation can be moved to column.
- super(Cluster, self).validate()
- creator = self.creator
- if not creator:
- raise exception.InvalidParameter(
- 'creator is not set in cluster %s' % self.id
- )
- os_name = self.os_name
- if not os_name:
- raise exception.InvalidParameter(
- 'os is not set in cluster %s' % self.id
- )
- adapter_name = self.adapter_name
- if not adapter_name:
- raise exception.InvalidParameter(
- 'adapter is not set in cluster %s' % self.id
- )
- flavor_name = self.flavor_name
- if flavor_name:
- if 'name' not in self.flavor:
- raise exception.InvalidParameter(
- 'key name does not exist in flavor %s' % (
- self.flavor
- )
- )
- if flavor_name != self.flavor['name']:
- raise exception.InvalidParameter(
- 'flavor name %s is not match '
- 'the name key in flavor %s' % (
- flavor_name, self.flavor
- )
- )
- else:
- if self.flavor:
- raise exception.InvalidParameter(
- 'flavor %s is not empty' % self.flavor
- )
-
- @property
- def os_id(self):
- return self.os_name
-
- @os_id.setter
- def os_id(self, value):
- self.os_name = value
-
- @property
- def adapter_id(self):
- return self.adapter_name
-
- @adapter_id.setter
- def adapter_id(self, value):
- self.adapter_name = value
-
- @property
- def flavor_id(self):
- if self.flavor_name:
- return '%s:%s' % (self.adapter_name, self.flavor_name)
- else:
- return None
-
- @flavor_id.setter
- def flavor_id(self, value):
- if value:
- _, flavor_name = value.split(':', 1)
- self.flavor_name = flavor_name
- else:
- self.flavor_name = value
-
- @property
- def patched_os_config(self):
- return self.os_config
-
- @patched_os_config.setter
- def patched_os_config(self, value):
- os_config = copy.deepcopy(self.os_config)
- self.os_config = util.merge_dict(os_config, value)
- logging.debug('patch cluster %s os config: %s', self.id, value)
- self.config_validated = False
-
- @property
- def put_os_config(self):
- return self.os_config
-
- @put_os_config.setter
- def put_os_config(self, value):
- os_config = copy.deepcopy(self.os_config)
- os_config.update(value)
- self.os_config = os_config
- logging.debug('put cluster %s os config: %s', self.id, value)
- self.config_validated = False
-
- @property
- def patched_package_config(self):
- return self.package_config
-
- @patched_package_config.setter
- def patched_package_config(self, value):
- package_config = copy.deepcopy(self.package_config)
- self.package_config = util.merge_dict(package_config, value)
- logging.debug('patch cluster %s package config: %s', self.id, value)
- self.config_validated = False
-
- @property
- def put_package_config(self):
- return self.package_config
-
- @put_package_config.setter
- def put_package_config(self, value):
- package_config = dict(self.package_config)
- package_config.update(value)
- self.package_config = package_config
- logging.debug('put cluster %s package config: %s', self.id, value)
- self.config_validated = False
-
- @property
- def distributed_system_installed(self):
- return self.state.state == 'SUCCESSFUL'
-
- def state_dict(self):
- return self.state.to_dict()
-
- def to_dict(self):
- dict_info = super(Cluster, self).to_dict()
- dict_info['distributed_system_installed'] = (
- self.distributed_system_installed
- )
- dict_info['os_id'] = self.os_id
- dict_info['adapter_id'] = self.adapter_id
- dict_info['flavor_id'] = self.flavor_id
- return dict_info
-
-
-# User, Permission relation table
-class UserPermission(BASE, HelperMixin, TimestampMixin):
- """User permission table."""
- __tablename__ = 'user_permission'
- id = Column(Integer, primary_key=True)
- user_id = Column(
- Integer,
- ForeignKey('user.id', onupdate='CASCADE', ondelete='CASCADE')
- )
- permission_id = Column(
- Integer,
- ForeignKey('permission.id', onupdate='CASCADE', ondelete='CASCADE')
- )
- __table_args__ = (
- UniqueConstraint('user_id', 'permission_id', name='constraint'),
- )
-
- def __init__(self, user_id, permission_id, **kwargs):
- self.user_id = user_id
- self.permission_id = permission_id
-
- def __str__(self):
- return 'UserPermission[%s:%s]' % (self.id, self.name)
-
- @hybrid_property
- def name(self):
- return self.permission.name
-
- def to_dict(self):
- dict_info = self.permission.to_dict()
- dict_info.update(super(UserPermission, self).to_dict())
- return dict_info
-
-
-class Permission(BASE, HelperMixin, TimestampMixin):
- """Permission table."""
- __tablename__ = 'permission'
-
- id = Column(Integer, primary_key=True)
- name = Column(String(80), unique=True, nullable=False)
- alias = Column(String(100))
- description = Column(Text)
- user_permissions = relationship(
- UserPermission,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('permission')
- )
-
- def __init__(self, name, **kwargs):
- self.name = name
- super(Permission, self).__init__(**kwargs)
-
- def __str__(self):
- return 'Permission[%s:%s]' % (self.id, self.name)
-
-
-class UserToken(BASE, HelperMixin):
- """user token table."""
- __tablename__ = 'user_token'
-
- id = Column(Integer, primary_key=True)
- user_id = Column(
- Integer,
- ForeignKey('user.id', onupdate='CASCADE', ondelete='CASCADE')
- )
- token = Column(String(256), unique=True, nullable=False)
- expire_timestamp = Column(DateTime, nullable=True)
-
- def __init__(self, token, **kwargs):
- self.token = token
- super(UserToken, self).__init__(**kwargs)
-
- def validate(self):
- # TODO(xicheng): some validation can be moved to column.
- super(UserToken, self).validate()
- if not self.user:
- raise exception.InvalidParameter(
- 'user is not set in token: %s' % self.token
- )
-
-
-class UserLog(BASE, HelperMixin):
- """User log table."""
- __tablename__ = 'user_log'
-
- id = Column(Integer, primary_key=True)
- user_id = Column(
- Integer,
- ForeignKey('user.id', onupdate='CASCADE', ondelete='CASCADE')
- )
- action = Column(Text)
- timestamp = Column(DateTime, default=lambda: datetime.datetime.now())
-
- @hybrid_property
- def user_email(self):
- return self.user.email
-
- def validate(self):
- # TODO(xicheng): some validation can be moved to column.
- super(UserLog, self).validate()
- if not self.user:
- raise exception.InvalidParameter(
- 'user is not set in user log: %s' % self.id
- )
-
-
-class User(BASE, HelperMixin, TimestampMixin):
- """User table."""
- __tablename__ = 'user'
-
- id = Column(Integer, primary_key=True)
- email = Column(String(80), unique=True, nullable=False)
- crypted_password = Column('password', String(225))
- firstname = Column(String(80))
- lastname = Column(String(80))
- is_admin = Column(Boolean, default=False)
- active = Column(Boolean, default=True)
- user_permissions = relationship(
- UserPermission,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('user')
- )
- user_logs = relationship(
- UserLog,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('user')
- )
- user_tokens = relationship(
- UserToken,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('user')
- )
- clusters = relationship(
- Cluster,
- backref=backref('creator')
- )
- hosts = relationship(
- Host,
- backref=backref('creator')
- )
-
- def __init__(self, email, **kwargs):
- self.email = email
- super(User, self).__init__(**kwargs)
-
- def __str__(self):
- return 'User[%s]' % self.email
-
- def validate(self):
- # TODO(xicheng): some validation can be moved to column.
- super(User, self).validate()
- if not self.crypted_password:
- raise exception.InvalidParameter(
- 'password is not set in user : %s' % self.email
- )
-
- @property
- def password(self):
- return '***********'
-
- @password.setter
- def password(self, password):
- # password stored in database is crypted.
- self.crypted_password = util.encrypt(password)
-
- @hybrid_property
- def permissions(self):
- permissions = []
- for user_permission in self.user_permissions:
- permissions.append(user_permission.permission)
-
- return permissions
-
- def to_dict(self):
- dict_info = super(User, self).to_dict()
- dict_info['permissions'] = [
- permission.to_dict()
- for permission in self.permissions
- ]
- return dict_info
-
-
-class SwitchMachine(BASE, HelperMixin, TimestampMixin):
- """Switch Machine table."""
- __tablename__ = 'switch_machine'
- switch_machine_id = Column(
- 'id', Integer, primary_key=True
- )
- switch_id = Column(
- Integer,
- ForeignKey('switch.id', onupdate='CASCADE', ondelete='CASCADE')
- )
- machine_id = Column(
- Integer,
- ForeignKey('machine.id', onupdate='CASCADE', ondelete='CASCADE')
- )
- owner_id = Column(Integer, ForeignKey('user.id'))
- port = Column(String(80), nullable=True)
- vlans = Column(JSONEncoded, default=[])
- __table_args__ = (
- UniqueConstraint('switch_id', 'machine_id', name='constraint'),
- )
-
- def __init__(self, switch_id, machine_id, **kwargs):
- self.switch_id = switch_id
- self.machine_id = machine_id
- super(SwitchMachine, self).__init__(**kwargs)
-
- def __str__(self):
- return 'SwitchMachine[%s port %s]' % (
- self.switch_machine_id, self.port
- )
-
- def validate(self):
- # TODO(xicheng): some validation can be moved to column.
- super(SwitchMachine, self).validate()
- if not self.switch:
- raise exception.InvalidParameter(
- 'switch is not set in %s' % self.id
- )
- if not self.machine:
- raise exception.Invalidparameter(
- 'machine is not set in %s' % self.id
- )
- if not self.port:
- raise exception.InvalidParameter(
- 'port is not set in %s' % self.id
- )
-
- @hybrid_property
- def mac(self):
- return self.machine.mac
-
- @hybrid_property
- def tag(self):
- return self.machine.tag
-
- @property
- def switch_ip(self):
- return self.switch.ip
-
- @hybrid_property
- def switch_ip_int(self):
- return self.switch.ip_int
-
- @switch_ip_int.expression
- def switch_ip_int(cls):
- return Switch.ip_int
-
- @hybrid_property
- def switch_vendor(self):
- return self.switch.vendor
-
- @switch_vendor.expression
- def switch_vendor(cls):
- return Switch.vendor
-
- @property
- def patched_vlans(self):
- return self.vlans
-
- @patched_vlans.setter
- def patched_vlans(self, value):
- if not value:
- return
- vlans = list(self.vlans)
- for item in value:
- if item not in vlans:
- vlans.append(item)
- self.vlans = vlans
-
- @property
- def filtered(self):
- """Check if switch machine should be filtered.
-
- port should be composed with <port_prefix><port_number><port_suffix>
- For each filter in switch machine filters,
- if filter_type is allow and port match the pattern, the switch
- machine is allowed to be got by api. If filter_type is deny and
- port match the pattern, the switch machine is not allowed to be got
- by api.
- If not filter is matched, if the last filter is allow, deny all
- unmatched switch machines, if the last filter is deny, allow all
- unmatched switch machines.
- If no filter defined, allow all switch machines.
- if ports defined in filter and 'all' in ports, the switch machine is
- matched. if ports defined in filter and 'all' not in ports,
- the switch machine with the port name in ports will be matched.
- If the port pattern matches
- <<port_prefix><port_number><port_suffix> and port number is in the
- range of [port_start, port_end], the switch machine is matched.
- """
- filters = self.switch.machine_filters
- port = self.port
- unmatched_allowed = True
- ports_pattern = re.compile(r'(\D*)(\d+)-(\d+)(\D*)')
- port_pattern = re.compile(r'(\D*)(\d+)(\D*)')
- port_match = port_pattern.match(port)
- if port_match:
- port_prefix = port_match.group(1)
- port_number = int(port_match.group(2))
- port_suffix = port_match.group(3)
- else:
- port_prefix = ''
- port_number = 0
- port_suffix = ''
- for port_filter in filters:
- filter_type = port_filter.get('filter_type', 'allow')
- denied = filter_type != 'allow'
- unmatched_allowed = denied
- if 'ports' in port_filter:
- if 'all' in port_filter['ports']:
- return denied
- if port in port_filter['ports']:
- return denied
- if port_match:
- for port_or_ports in port_filter['ports']:
- ports_match = ports_pattern.match(port_or_ports)
- if ports_match:
- filter_port_prefix = ports_match.group(1)
- filter_port_start = int(ports_match.group(2))
- filter_port_end = int(ports_match.group(3))
- filter_port_suffix = ports_match.group(4)
- if (
- filter_port_prefix == port_prefix and
- filter_port_suffix == port_suffix and
- filter_port_start <= port_number and
- port_number <= filter_port_end
- ):
- return denied
- else:
- filter_port_prefix = port_filter.get('port_prefix', '')
- filter_port_suffix = port_filter.get('port_suffix', '')
- if (
- port_match and
- port_prefix == filter_port_prefix and
- port_suffix == filter_port_suffix
- ):
- if (
- 'port_start' not in port_filter or
- port_number >= port_filter['port_start']
- ) and (
- 'port_end' not in port_filter or
- port_number <= port_filter['port_end']
- ):
- return denied
- return not unmatched_allowed
-
- def to_dict(self):
- dict_info = self.machine.to_dict()
- dict_info.update(super(SwitchMachine, self).to_dict())
- dict_info['switch_ip'] = self.switch.ip
- return dict_info
-
-
-class Machine(BASE, HelperMixin, TimestampMixin):
- """Machine table."""
- __tablename__ = 'machine'
- id = Column(Integer, primary_key=True)
- mac = Column(String(24), unique=True, nullable=False)
- ipmi_credentials = Column(JSONEncoded, default={})
- tag = Column(JSONEncoded, default={})
- location = Column(JSONEncoded, default={})
- owner_id = Column(Integer, ForeignKey('user.id'))
- machine_attributes = Column(JSONEncoded, default={})
-
- switch_machines = relationship(
- SwitchMachine,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('machine')
- )
- host = relationship(
- Host,
- uselist=False,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('machine')
- )
-
- def __init__(self, mac, **kwargs):
- self.mac = mac
- super(Machine, self).__init__(**kwargs)
-
- def __str__(self):
- return 'Machine[%s:%s]' % (self.id, self.mac)
-
- def validate(self):
- # TODO(xicheng): some validation can be moved to column.
- super(Machine, self).validate()
- try:
- netaddr.EUI(self.mac)
- except Exception:
- raise exception.InvalidParameter(
- 'mac address %s format uncorrect' % self.mac
- )
-
- @property
- def patched_ipmi_credentials(self):
- return self.ipmi_credentials
-
- @patched_ipmi_credentials.setter
- def patched_ipmi_credentials(self, value):
- if not value:
- return
- ipmi_credentials = copy.deepcopy(self.ipmi_credentials)
- self.ipmi_credentials = util.merge_dict(ipmi_credentials, value)
-
- @property
- def patched_tag(self):
- return self.tag
-
- @patched_tag.setter
- def patched_tag(self, value):
- if not value:
- return
- tag = copy.deepcopy(self.tag)
- tag.update(value)
- self.tag = value
-
- @property
- def patched_location(self):
- return self.location
-
- @patched_location.setter
- def patched_location(self, value):
- if not value:
- return
- location = copy.deepcopy(self.location)
- location.update(value)
- self.location = location
-
- def to_dict(self):
- # TODO(xicheng): move the filling of switches
- # to db/api.
- dict_info = {}
- dict_info['switches'] = [
- {
- 'switch_ip': switch_machine.switch_ip,
- 'port': switch_machine.port,
- 'vlans': switch_machine.vlans
- }
- for switch_machine in self.switch_machines
- if not switch_machine.filtered
- ]
- if dict_info['switches']:
- dict_info.update(dict_info['switches'][0])
- dict_info.update(super(Machine, self).to_dict())
- return dict_info
-
-
-class Switch(BASE, HelperMixin, TimestampMixin):
- """Switch table."""
- __tablename__ = 'switch'
- id = Column(Integer, primary_key=True)
- ip_int = Column('ip', BigInteger, unique=True, nullable=False)
- credentials = Column(JSONEncoded, default={})
- vendor = Column(String(256), nullable=True)
- state = Column(Enum('initialized', 'unreachable', 'notsupported',
- 'repolling', 'error', 'under_monitoring',
- name='switch_state'),
- ColumnDefault('initialized'))
- # filters is json formatted list, each element has following format:
- # keys: ['filter_type', 'ports', 'port_prefix', 'port_suffix',
- # 'port_start', 'port_end'].
- # each port name is divided into <port_prefix><port_number><port_suffix>
- # filter_type is one of ['allow', 'deny'], default is 'allow'
- # ports is a list of port name.
- # port_prefix is the prefix that filtered port should start with.
- # port_suffix is the suffix that filtered posrt should end with.
- # port_start is integer that the port number should start with.
- # port_end is the integer that the port number should end with.
- _filters = Column('filters', JSONEncoded, default=[])
- switch_machines = relationship(
- SwitchMachine,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('switch')
- )
-
- def __str__(self):
- return 'Switch[%s:%s]' % (self.id, self.ip)
-
- @classmethod
- def parse_filters(cls, filters):
- """parse filters set from outside to standard format.
-
- api can set switch filters with the flexible format, this
- function will parse the flexible format filters.
-
- Supported format:
- as string:
- allow ports ae10,ae20
- allow port_prefix ae port_start 30 port_end 40
- deny ports all
- as python object:
- [{
- 'filter_type': 'allow',
- 'ports': ['ae10', 'ae20']
- },{
- 'filter_type': 'allow',
- 'port_prefix': 'ae',
- 'port_suffix': '',
- 'port_start': 30,
- 'port_end': 40
- },{
- 'filter_type': 'deny',
- 'ports': ['all']
- }]
- """
- if isinstance(filters, basestring):
- filters = filters.replace('\r\n', '\n').replace('\n', ';')
- filters = [
- machine_filter for machine_filter in filters.split(';')
- if machine_filter
- ]
- if not isinstance(filters, list):
- filters = [filters]
- machine_filters = []
- for machine_filter in filters:
- if not machine_filter:
- continue
- if isinstance(machine_filter, basestring):
- filter_dict = {}
- filter_items = [
- item for item in machine_filter.split() if item
- ]
- if filter_items[0] in ['allow', 'deny']:
- filter_dict['filter_type'] = filter_items[0]
- filter_items = filter_items[1:]
- elif filter_items[0] not in [
- 'ports', 'port_prefix', 'port_suffix',
- 'port_start', 'port_end'
- ]:
- raise exception.InvalidParameter(
- 'unrecognized filter type %s' % filter_items[0]
- )
- while filter_items:
- if len(filter_items) >= 2:
- filter_dict[filter_items[0]] = filter_items[1]
- filter_items = filter_items[2:]
- else:
- filter_dict[filter_items[0]] = ''
- filter_items = filter_items[1:]
- machine_filter = filter_dict
- if not isinstance(machine_filter, dict):
- raise exception.InvalidParameter(
- 'filter %s is not dict' % machine_filter
- )
- if 'filter_type' in machine_filter:
- if machine_filter['filter_type'] not in ['allow', 'deny']:
- raise exception.InvalidParameter(
- 'filter_type should be `allow` or `deny` in %s' % (
- machine_filter
- )
- )
- if 'ports' in machine_filter:
- if isinstance(machine_filter['ports'], basestring):
- machine_filter['ports'] = [
- port_or_ports
- for port_or_ports in machine_filter['ports'].split(',')
- if port_or_ports
- ]
- if not isinstance(machine_filter['ports'], list):
- raise exception.InvalidParameter(
- '`ports` type is not list in filter %s' % (
- machine_filter
- )
- )
- for port_or_ports in machine_filter['ports']:
- if not isinstance(port_or_ports, basestring):
- raise exception.InvalidParameter(
- '%s type is not basestring in `ports` %s' % (
- port_or_ports, machine_filter['ports']
- )
- )
- for key in ['port_start', 'port_end']:
- if key in machine_filter:
- if isinstance(machine_filter[key], basestring):
- if machine_filter[key].isdigit():
- machine_filter[key] = int(machine_filter[key])
- if not isinstance(machine_filter[key], (int, long)):
- raise exception.InvalidParameter(
- '`%s` type is not int in filer %s' % (
- key, machine_filter
- )
- )
- machine_filters.append(machine_filter)
- return machine_filters
-
- @classmethod
- def format_filters(cls, filters):
- """format json formatted filters to string."""
- filter_strs = []
- for machine_filter in filters:
- filter_properties = []
- filter_properties.append(
- machine_filter.get('filter_type', 'allow')
- )
- if 'ports' in machine_filter:
- filter_properties.append(
- 'ports ' + ','.join(machine_filter['ports'])
- )
- if 'port_prefix' in machine_filter:
- filter_properties.append(
- 'port_prefix ' + machine_filter['port_prefix']
- )
- if 'port_suffix' in machine_filter:
- filter_properties.append(
- 'port_suffix ' + machine_filter['port_suffix']
- )
- if 'port_start' in machine_filter:
- filter_properties.append(
- 'port_start ' + str(machine_filter['port_start'])
- )
- if 'port_end' in machine_filter:
- filter_properties.append(
- 'port_end ' + str(machine_filter['port_end'])
- )
- filter_strs.append(' '.join(filter_properties))
- return ';'.join(filter_strs)
-
- def __init__(self, ip_int, **kwargs):
- self.ip_int = ip_int
- super(Switch, self).__init__(**kwargs)
-
- @property
- def ip(self):
- return str(netaddr.IPAddress(self.ip_int))
-
- @ip.setter
- def ip(self, ipaddr):
- self.ip_int = int(netaddr.IPAddress(ipaddr))
-
- @property
- def patched_credentials(self):
- return self.credentials
-
- @patched_credentials.setter
- def patched_credentials(self, value):
- if not value:
- return
- credentials = copy.deepcopy(self.credentials)
- self.credentials = util.merge_dict(credentials, value)
-
- @property
- def machine_filters(self):
- return self._filters
-
- @machine_filters.setter
- def machine_filters(self, value):
- if not value:
- return
- self._filters = self.parse_filters(value)
-
- @property
- def put_machine_filters(self):
- return self._filters
-
- @put_machine_filters.setter
- def put_machine_filters(self, value):
- if not value:
- return
- self._filters = self.parse_filters(value)
-
- @property
- def patched_machine_filters(self):
- return self._filters
-
- @patched_machine_filters.setter
- def patched_machine_filters(self, value):
- if not value:
- return
- filters = list(self.machine_filters)
- self._filters = self.parse_filters(value) + filters
-
- def to_dict(self):
- dict_info = super(Switch, self).to_dict()
- dict_info['ip'] = self.ip
- dict_info['filters'] = self.format_filters(self._filters)
- return dict_info
-
-
-class Subnet(BASE, TimestampMixin, HelperMixin):
- """network table."""
- __tablename__ = 'subnet'
-
- id = Column(Integer, primary_key=True)
- name = Column(String(80), unique=True, nullable=True)
- subnet = Column(String(80), unique=True, nullable=False)
-
- host_networks = relationship(
- HostNetwork,
- passive_deletes=True, passive_updates=True,
- cascade='all, delete-orphan',
- backref=backref('subnet')
- )
-
- def __init__(self, subnet, **kwargs):
- self.subnet = subnet
- super(Subnet, self).__init__(**kwargs)
-
- def __str__(self):
- return 'Subnet[%s:%s]' % (self.id, self.subnet)
-
- def to_dict(self):
- dict_info = super(Subnet, self).to_dict()
- if not self.name:
- dict_info['name'] = self.subnet
- return dict_info
-
-
-# TODO(grace): move this global variable into HealthCheckReport.
-HEALTH_REPORT_STATES = ('verifying', 'success', 'finished', 'error')
-
-
-class HealthCheckReport(BASE, HelperMixin):
- """Health check report table."""
- __tablename__ = 'health_check_report'
-
- cluster_id = Column(
- Integer,
- ForeignKey('cluster.id', onupdate='CASCADE', ondelete='CASCADE'),
- primary_key=True
- )
- name = Column(String(80), nullable=False, primary_key=True)
- display_name = Column(String(100))
- report = Column(JSONEncoded, default={})
- category = Column(String(80), default='')
- state = Column(
- Enum(*HEALTH_REPORT_STATES, name='report_state'),
- ColumnDefault('verifying'),
- nullable=False
- )
- error_message = Column(Text, default='')
-
- def __init__(self, cluster_id, name, **kwargs):
- self.cluster_id = cluster_id
- self.name = name
- if 'state' in kwargs and kwargs['state'] not in HEALTH_REPORT_STATES:
- err_msg = 'State value %s is not accepted.' % kwargs['state']
- raise exception.InvalidParameter(err_msg)
-
- super(HealthCheckReport, self).__init__(**kwargs)
-
- def __str__(self):
- return 'HealthCheckReport[cluster_id: %s, name: %s]' % (
- self.cluster_id, self.name
- )
diff --git a/compass-tasks/db/v1/model.py b/compass-tasks/db/v1/model.py
deleted file mode 100644
index d74e355..0000000
--- a/compass-tasks/db/v1/model.py
+++ /dev/null
@@ -1,724 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""database model."""
-from datetime import datetime
-from hashlib import md5
-import logging
-import simplejson as json
-import uuid
-
-from sqlalchemy import Column, ColumnDefault, Integer, String
-from sqlalchemy import Float, Enum, DateTime, ForeignKey, Text, Boolean
-from sqlalchemy import UniqueConstraint
-from sqlalchemy.orm import relationship, backref
-from sqlalchemy.ext.declarative import declarative_base
-from sqlalchemy.ext.hybrid import hybrid_property
-
-from compass.utils import util
-
-from flask.ext.login import UserMixin
-from itsdangerous import URLSafeTimedSerializer
-
-BASE = declarative_base()
-# TODO(grace) SECRET_KEY should be generated when installing compass
-# and save to a config file or DB
-SECRET_KEY = "abcd"
-
-# This is used for generating a token by user's ID and
-# decode the ID from this token
-login_serializer = URLSafeTimedSerializer(SECRET_KEY)
-
-
-class User(BASE, UserMixin):
- """User table."""
- __tablename__ = 'user'
- id = Column(Integer, primary_key=True)
- email = Column(String(80), unique=True)
- password = Column(String(225), default='')
- active = Column(Boolean, default=True)
-
- def __init__(self, email, password, **kwargs):
- self.email = email
- self.password = self._set_password(password)
-
- def __repr__(self):
- return '<User name: %s>' % self.email
-
- def _set_password(self, password):
- return self._hash_password(password)
-
- def get_password(self):
- return self.password
-
- def valid_password(self, password):
- return self.password == self._hash_password(password)
-
- def get_auth_token(self):
- return login_serializer.dumps(self.id)
-
- def is_active(self):
- return self.active
-
- def _hash_password(self, password):
- return md5(password).hexdigest()
-
-
-class SwitchConfig(BASE):
- """Swtich Config table.
-
- :param id: The unique identifier of the switch config.
- :param ip: The IP address of the switch.
- :param filter_port: The port of the switch which need to be filtered.
- """
- __tablename__ = 'switch_config'
- id = Column(Integer, primary_key=True)
- ip = Column(String(80))
- filter_port = Column(String(16))
- __table_args__ = (UniqueConstraint('ip', 'filter_port', name='filter1'), )
-
- def __init__(self, **kwargs):
- super(SwitchConfig, self).__init__(**kwargs)
-
-
-class Switch(BASE):
- """Switch table.
-
- :param id: the unique identifier of the switch. int as primary key.
- :param ip: the IP address of the switch.
- :param vendor_info: the name of the vendor
- :param credential_data: used for accessing and retrieving information
- from the switch. Store json format as string.
- :param state: Enum.'initialized/repolling': polling switch not complete to
- learn all MAC addresses of devices connected to the switch;
- 'unreachable': one of the final state, indicates that the
- switch is unreachable at this time, no MAC address could be
- retrieved from the switch.
- 'notsupported': one of the final state, indicates that the
- vendor found is not supported yet, no MAC address will be
- retrieved from the switch.
- 'error': one of the final state, indicates that something
- wrong happend.
- 'under_monitoring': one of the final state, indicates that
- MAC addresses has been learned successfully from the switch.
- :param err_msg: Error message when polling switch failed.
- :param machines: refer to list of Machine connected to the switch.
- """
- __tablename__ = 'switch'
-
- id = Column(Integer, primary_key=True)
- ip = Column(String(80), unique=True)
- credential_data = Column(Text)
- vendor_info = Column(String(256), nullable=True)
- state = Column(Enum('initialized', 'unreachable', 'notsupported',
- 'repolling', 'error', 'under_monitoring',
- name='switch_state'),
- default='initialized')
- err_msg = Column(Text)
-
- def __init__(self, **kwargs):
- super(Switch, self).__init__(**kwargs)
-
- def __repr__(self):
- return '<Switch ip: %r, credential: %r, vendor: %r, state: %s>'\
- % (self.ip, self.credential, self.vendor, self.state)
-
- @hybrid_property
- def vendor(self):
- """vendor property getter"""
- return self.vendor_info
-
- @vendor.setter
- def vendor(self, value):
- """vendor property setter"""
- self.vendor_info = value
-
- @property
- def credential(self):
- """credential data getter.
-
- :returns: python primitive dictionary object.
- """
- if self.credential_data:
- try:
- credential = json.loads(self.credential_data)
- return credential
- except Exception as error:
- logging.error('failed to load credential data %s: %s',
- self.id, self.credential_data)
- logging.exception(error)
- raise error
- else:
- return {}
-
- @credential.setter
- def credential(self, value):
- """credential property setter
-
- :param value: dict of configuration data needed to update.
- """
- if value:
- try:
- credential = {}
- if self.credential_data:
- credential = json.loads(self.credential_data)
-
- credential.update(value)
- self.credential_data = json.dumps(credential)
-
- except Exception as error:
- logging.error('failed to dump credential data %s: %s',
- self.id, value)
- logging.exception(error)
- raise error
-
- else:
- self.credential_data = json.dumps({})
-
- logging.debug('switch now is %s', self)
-
-
-class Machine(BASE):
- """Machine table.
-
- .. note::
- currently, we are taking care of management plane.
- Therefore, we assume one machine is connected to one switch.
-
- :param id: int, identity as primary key
- :param mac: string, the MAC address of the machine.
- :param switch_id: switch id that this machine connected on to.
- :param port: nth port of the switch that this machine connected.
- :param vlan: vlan id that this machine connected on to.
- :param update_timestamp: last time this entry got updated.
- :param switch: refer to the Switch the machine connects to.
- """
- __tablename__ = 'machine'
-
- id = Column(Integer, primary_key=True)
- mac = Column(String(24), default='')
- port = Column(String(16), default='')
- vlan = Column(Integer, default=0)
- update_timestamp = Column(DateTime, default=datetime.now,
- onupdate=datetime.now)
- switch_id = Column(Integer, ForeignKey('switch.id',
- onupdate='CASCADE',
- ondelete='SET NULL'))
- __table_args__ = (UniqueConstraint('mac', 'switch_id',
- name='unique_machine'),)
- switch = relationship('Switch', backref=backref('machines',
- lazy='dynamic'))
-
- def __init__(self, **kwargs):
- super(Machine, self).__init__(**kwargs)
-
- def __repr__(self):
- return '<Machine %r: port=%r vlan=%r switch=%r>' % (
- self.mac, self.port, self.vlan, self.switch)
-
-
-class HostState(BASE):
- """The state of the ClusterHost.
-
- :param id: int, identity as primary key.
- :param state: Enum. 'UNINITIALIZED': the host is ready to setup.
- 'INSTALLING': the host is not installing.
- 'READY': the host is setup.
- 'ERROR': the host has error.
- :param progress: float, the installing progress from 0 to 1.
- :param message: the latest installing message.
- :param severity: Enum, the installing message severity.
- ('INFO', 'WARNING', 'ERROR')
- :param update_timestamp: the lastest timestamp the entry got updated.
- :param host: refer to ClusterHost.
- :param os_progress: float, the installing progress of OS from 0 to 1.
- """
- __tablename__ = "host_state"
-
- id = Column(Integer, ForeignKey('cluster_host.id',
- onupdate='CASCADE',
- ondelete='CASCADE'),
- primary_key=True)
- state = Column(Enum('UNINITIALIZED', 'INSTALLING', 'READY', 'ERROR'),
- ColumnDefault('UNINITIALIZED'))
- progress = Column(Float, ColumnDefault(0.0))
- message = Column(Text)
- severity = Column(Enum('INFO', 'WARNING', 'ERROR'), ColumnDefault('INFO'))
- update_timestamp = Column(DateTime, default=datetime.now,
- onupdate=datetime.now)
- host = relationship('ClusterHost', backref=backref('state',
- uselist=False))
-
- os_progress = Column(Float, ColumnDefault(0.0))
- os_message = Column(Text)
- os_severity = Column(
- Enum('INFO', 'WARNING', 'ERROR'),
- ColumnDefault('INFO')
- )
- """
- this is added by Lei for separating os and package progress purposes
- os_state = Column(Enum('UNINITIALIZED', 'INSTALLING', 'OS_READY', 'ERROR'),
- ColumnDefault('UNINITIALIZED'))
- """
-
- def __init__(self, **kwargs):
- super(HostState, self).__init__(**kwargs)
-
- @hybrid_property
- def hostname(self):
- """hostname getter"""
- return self.host.hostname
-
- @hybrid_property
- def fullname(self):
- """fullname getter"""
- return self.host.fullname
-
- def __repr__(self):
- return (
- '<HostState %r: state=%r, progress=%s, '
- 'message=%s, severity=%s, os_progress=%s>'
- ) % (
- self.hostname, self.state, self.progress,
- self.message, self.severity, self.os_progress
- )
-
-
-class ClusterState(BASE):
- """The state of the Cluster.
-
- :param id: int, identity as primary key.
- :param state: Enum, 'UNINITIALIZED': the cluster is ready to setup.
- 'INSTALLING': the cluster is not installing.
- 'READY': the cluster is setup.
- 'ERROR': the cluster has error.
- :param progress: float, the installing progress from 0 to 1.
- :param message: the latest installing message.
- :param severity: Enum, the installing message severity.
- ('INFO', 'WARNING', 'ERROR').
- :param update_timestamp: the lastest timestamp the entry got updated.
- :param cluster: refer to Cluster.
- """
- __tablename__ = 'cluster_state'
- id = Column(Integer, ForeignKey('cluster.id',
- onupdate='CASCADE',
- ondelete='CASCADE'),
- primary_key=True)
- state = Column(Enum('UNINITIALIZED', 'INSTALLING', 'READY', 'ERROR'),
- ColumnDefault('UNINITIALIZED'))
- progress = Column(Float, ColumnDefault(0.0))
- message = Column(Text)
- severity = Column(Enum('INFO', 'WARNING', 'ERROR'), ColumnDefault('INFO'))
- update_timestamp = Column(DateTime, default=datetime.now,
- onupdate=datetime.now)
- cluster = relationship('Cluster', backref=backref('state',
- uselist=False))
-
- def __init__(self, **kwargs):
- super(ClusterState, self).__init__(**kwargs)
-
- @hybrid_property
- def clustername(self):
- """clustername getter"""
- return self.cluster.name
-
- def __repr__(self):
- return (
- '<ClusterState %r: state=%r, progress=%s, '
- 'message=%s, severity=%s>'
- ) % (
- self.clustername, self.state, self.progress,
- self.message, self.severity
- )
-
-
-class Cluster(BASE):
- """Cluster configuration information.
-
- :param id: int, identity as primary key.
- :param name: str, cluster name.
- :param mutable: bool, if the Cluster is mutable.
- :param security_config: str stores json formatted security information.
- :param networking_config: str stores json formatted networking information.
- :param partition_config: string stores json formatted parition information.
- :param adapter_id: the refer id in the Adapter table.
- :param raw_config: str stores json formatted other cluster information.
- :param adapter: refer to the Adapter.
- :param state: refer to the ClusterState.
- """
- __tablename__ = 'cluster'
-
- id = Column(Integer, primary_key=True)
- name = Column(String(80), unique=True)
- mutable = Column(Boolean, default=True)
- security_config = Column(Text)
- networking_config = Column(Text)
- partition_config = Column(Text)
- adapter_id = Column(Integer, ForeignKey('adapter.id',
- onupdate='CASCADE',
- ondelete='SET NULL'),
- nullable=True)
- raw_config = Column(Text)
- adapter = relationship("Adapter", backref=backref('clusters',
- lazy='dynamic'))
-
- def __init__(self, **kwargs):
- if 'name' not in kwargs or not kwargs['name']:
- kwargs['name'] = str(uuid.uuid4())
-
- super(Cluster, self).__init__(**kwargs)
-
- def __repr__(self):
- return '<Cluster %r: config=%r>' % (self.name, self.config)
-
- @property
- def partition(self):
- """partition getter"""
- if self.partition_config:
- try:
- return json.loads(self.partition_config)
- except Exception as error:
- logging.error('failed to load security config %s: %s',
- self.id, self.partition_config)
- logging.exception(error)
- raise error
- else:
- return {}
-
- @partition.setter
- def partition(self, value):
- """partition setter"""
- logging.debug('cluster %s set partition %s', self.id, value)
- if value:
- try:
- self.partition_config = json.dumps(value)
- except Exception as error:
- logging.error('failed to dump partition config %s: %s',
- self.id, value)
- logging.exception(error)
- raise error
- else:
- self.partition_config = None
-
- @property
- def security(self):
- """security getter"""
- if self.security_config:
- try:
- return json.loads(self.security_config)
- except Exception as error:
- logging.error('failed to load security config %s: %s',
- self.id, self.security_config)
- logging.exception(error)
- raise error
- else:
- return {}
-
- @security.setter
- def security(self, value):
- """security setter"""
- logging.debug('cluster %s set security %s', self.id, value)
- if value:
- try:
- self.security_config = json.dumps(value)
- except Exception as error:
- logging.error('failed to dump security config %s: %s',
- self.id, value)
- logging.exception(error)
- raise error
- else:
- self.security_config = None
-
- @property
- def networking(self):
- """networking getter"""
- if self.networking_config:
- try:
- return json.loads(self.networking_config)
- except Exception as error:
- logging.error('failed to load networking config %s: %s',
- self.id, self.networking_config)
- logging.exception(error)
- raise error
- else:
- return {}
-
- @networking.setter
- def networking(self, value):
- """networking setter."""
- logging.debug('cluster %s set networking %s', self.id, value)
- if value:
- try:
- self.networking_config = json.dumps(value)
- except Exception as error:
- logging.error('failed to dump networking config %s: %s',
- self.id, value)
- logging.exception(error)
- raise error
- else:
- self.networking_config = None
-
- @hybrid_property
- def config(self):
- """get config from security, networking, partition."""
- config = {}
- if self.raw_config:
- try:
- config = json.loads(self.raw_config)
- except Exception as error:
- logging.error('failed to load raw config %s: %s',
- self.id, self.raw_config)
- logging.exception(error)
- raise error
-
- util.merge_dict(config, {'security': self.security})
- util.merge_dict(config, {'networking': self.networking})
- util.merge_dict(config, {'partition': self.partition})
- util.merge_dict(config, {'clusterid': self.id,
- 'clustername': self.name})
- return config
-
- @config.setter
- def config(self, value):
- """set config to security, networking, partition."""
- logging.debug('cluster %s set config %s', self.id, value)
- if not value:
- self.security = None
- self.networking = None
- self.partition = None
- self.raw_config = None
- return
-
- self.security = value.get('security')
- self.networking = value.get('networking')
- self.partition = value.get('partition')
-
- try:
- self.raw_config = json.dumps(value)
- except Exception as error:
- logging.error('failed to dump raw config %s: %s',
- self.id, value)
- logging.exception(error)
- raise error
-
-
-class ClusterHost(BASE):
- """ClusterHost information.
-
- :param id: int, identity as primary key.
- :param machine_id: int, the id of the Machine.
- :param cluster_id: int, the id of the Cluster.
- :param mutable: if the ClusterHost information is mutable.
- :param hostname: str, host name.
- :param config_data: string, json formatted config data.
- :param cluster: refer to Cluster the host in.
- :param machine: refer to the Machine the host on.
- :param state: refer to HostState indicates the host state.
- """
- __tablename__ = 'cluster_host'
-
- id = Column(Integer, primary_key=True)
-
- machine_id = Column(Integer, ForeignKey('machine.id',
- onupdate='CASCADE',
- ondelete='CASCADE'),
- nullable=True, unique=True)
-
- cluster_id = Column(Integer, ForeignKey('cluster.id',
- onupdate='CASCADE',
- ondelete='SET NULL'),
- nullable=True)
-
- hostname = Column(String(80))
- config_data = Column(Text)
- mutable = Column(Boolean, default=True)
- __table_args__ = (UniqueConstraint('cluster_id', 'hostname',
- name='unique_host'),)
-
- cluster = relationship("Cluster",
- backref=backref('hosts', lazy='dynamic'))
- machine = relationship("Machine",
- backref=backref('host', uselist=False))
-
- def __init__(self, **kwargs):
- if 'hostname' not in kwargs or not kwargs['hostname']:
- kwargs['hostname'] = str(uuid.uuid4())
-
- super(ClusterHost, self).__init__(**kwargs)
-
- def __repr__(self):
- return '<ClusterHost %r: cluster=%r machine=%r>' % (
- self.hostname, self.cluster, self.machine)
-
- @hybrid_property
- def fullname(self):
- return '%s.%s' % (self.hostname, self.cluster.id)
-
- @property
- def config(self):
- """config getter."""
- config = {}
- try:
- if self.config_data:
- config.update(json.loads(self.config_data))
-
- config.update({
- 'hostid': self.id,
- 'hostname': self.hostname,
- })
- if self.cluster:
- config.update({
- 'clusterid': self.cluster.id,
- 'clustername': self.cluster.name,
- 'fullname': self.fullname,
- })
-
- if self.machine:
- util.merge_dict(
- config, {
- 'networking': {
- 'interfaces': {
- 'management': {
- 'mac': self.machine.mac
- }
- }
- },
- 'switch_port': self.machine.port,
- 'vlan': self.machine.vlan,
- })
- if self.machine.switch:
- util.merge_dict(
- config, {'switch_ip': self.machine.switch.ip})
-
- except Exception as error:
- logging.error('failed to load config %s: %s',
- self.hostname, self.config_data)
- logging.exception(error)
- raise error
-
- return config
-
- @config.setter
- def config(self, value):
- """config setter"""
- if not self.config_data:
- config = {
- }
- self.config_data = json.dumps(config)
-
- if value:
- try:
- config = json.loads(self.config_data)
- util.merge_dict(config, value)
-
- self.config_data = json.dumps(config)
- except Exception as error:
- logging.error('failed to dump config %s: %s',
- self.hostname, value)
- logging.exception(error)
- raise error
-
-
-class LogProgressingHistory(BASE):
- """host installing log history for each file.
-
- :param id: int, identity as primary key.
- :param pathname: str, the full path of the installing log file. unique.
- :param position: int, the position of the log file it has processed.
- :param partial_line: str, partial line of the log.
- :param progressing: float, indicate the installing progress between 0 to 1.
- :param message: str, str, the installing message.
- :param severity: Enum, the installing message severity.
- ('ERROR', 'WARNING', 'INFO')
- :param line_matcher_name: str, the line matcher name of the log processor.
- :param update_timestamp: datetime, the latest timestamp the entry updated.
- """
- __tablename__ = 'log_progressing_history'
- id = Column(Integer, primary_key=True)
- pathname = Column(String(80), unique=True)
- position = Column(Integer, ColumnDefault(0))
- partial_line = Column(Text)
- progress = Column(Float, ColumnDefault(0.0))
- message = Column(Text)
- severity = Column(Enum('ERROR', 'WARNING', 'INFO'), ColumnDefault('INFO'))
- line_matcher_name = Column(String(80), ColumnDefault('start'))
- update_timestamp = Column(DateTime, default=datetime.now,
- onupdate=datetime.now)
-
- def __init__(self, **kwargs):
- super(LogProgressingHistory, self).__init__(**kwargs)
-
- def __repr__(self):
- return (
- 'LogProgressingHistory[%r: position %r,'
- 'partial_line %r,progress %r,message %r,'
- 'severity %r]'
- ) % (
- self.pathname, self.position,
- self.partial_line,
- self.progress,
- self.message,
- self.severity
- )
-
-
-class Adapter(BASE):
- """Table stores ClusterHost installing Adapter information.
-
- :param id: int, identity as primary key.
- :param name: string, adapter name, unique.
- :param os: string, os name for installing the host.
- :param target_system: string, target system to be installed on the host.
- :param clusters: refer to the list of Cluster.
- """
- __tablename__ = 'adapter'
- id = Column(Integer, primary_key=True)
- name = Column(String(80), unique=True)
- os = Column(String(80))
- target_system = Column(String(80))
- __table_args__ = (
- UniqueConstraint('os', 'target_system', name='unique_adapter'),)
-
- def __init__(self, **kwargs):
- super(Adapter, self).__init__(**kwargs)
-
- def __repr__(self):
- return '<Adapter %r: os %r, target_system %r>' % (
- self.name, self.os, self.target_system
- )
-
-
-class Role(BASE):
- """The Role table stores avaiable roles of one target system.
-
- .. note::
- the host can be deployed to one or several roles in the cluster.
-
- :param id: int, identity as primary key.
- :param name: role name.
- :param target_system: str, the target_system.
- :param description: str, the description of the role.
- """
- __tablename__ = 'role'
- id = Column(Integer, primary_key=True)
- name = Column(String(80), unique=True)
- target_system = Column(String(80))
- description = Column(Text)
-
- def __init__(self, **kwargs):
- super(Role, self).__init__(**kwargs)
-
- def __repr__(self):
- return '<Role %r : target_system %r, description:%r>' % (
- self.name, self.target_system, self.description)
diff --git a/compass-tasks/db/validator.py b/compass-tasks/db/validator.py
deleted file mode 100644
index 730bb52..0000000
--- a/compass-tasks/db/validator.py
+++ /dev/null
@@ -1,195 +0,0 @@
-# Copyright 2014 Huawei Technologies Co. Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Validator methods."""
-import logging
-import netaddr
-import re
-import socket
-
-from compass.utils import setting_wrapper as setting
-from compass.utils import util
-
-
-def is_valid_ip(name, ip_addr, **kwargs):
- """Valid the format of an IP address."""
- if isinstance(ip_addr, list):
- return all([
- is_valid_ip(name, item, **kwargs) for item in ip_addr
- ])
- try:
- netaddr.IPAddress(ip_addr)
- except Exception:
- logging.debug('%s invalid ip addr %s', name, ip_addr)
- return False
- return True
-
-
-def is_valid_network(name, ip_network, **kwargs):
- """Valid the format of an Ip network."""
- if isinstance(ip_network, list):
- return all([
- is_valid_network(name, item, **kwargs) for item in ip_network
- ])
- try:
- netaddr.IPNetwork(ip_network)
- except Exception:
- logging.debug('%s invalid network %s', name, ip_network)
- return False
- return True
-
-
-def is_valid_netmask(name, ip_addr, **kwargs):
- """Valid the format of a netmask."""
- if isinstance(ip_addr, list):
- return all([
- is_valid_netmask(name, item, **kwargs) for item in ip_addr
- ])
- if not is_valid_ip(ip_addr):
- return False
- ip = netaddr.IPAddress(ip_addr)
- if ip.is_netmask():
- return True
- logging.debug('%s invalid netmask %s', name, ip_addr)
- return False
-
-
-def is_valid_gateway(name, ip_addr, **kwargs):
- """Valid the format of gateway."""
- if isinstance(ip_addr, list):
- return all([
- is_valid_gateway(name, item, **kwargs) for item in ip_addr
- ])
- if not is_valid_ip(ip_addr):
- return False
- ip = netaddr.IPAddress(ip_addr)
- if ip.is_private() or ip.is_public():
- return True
- logging.debug('%s invalid gateway %s', name, ip_addr)
- return False
-
-
-def is_valid_dns(name, dns, **kwargs):
- """Valid the format of DNS."""
- if isinstance(dns, list):
- return all([is_valid_dns(name, item, **kwargs) for item in dns])
- if is_valid_ip(dns):
- return True
- try:
- socket.gethostbyname_ex(dns)
- except Exception:
- logging.debug('%s invalid dns name %s', name, dns)
- return False
- return True
-
-
-def is_valid_url(name, url, **kwargs):
- """Valid the format of url."""
- if isinstance(url, list):
- return all([
- is_valid_url(name, item, **kwargs) for item in url
- ])
- if re.match(
- r'^(http|https|ftp)://([0-9A-Za-z_-]+)(\.[0-9a-zA-Z_-]+)*'
- r'(:\d+)?(/[0-9a-zA-Z_-]+)*$',
- url
- ):
- return True
- logging.debug(
- '%s invalid url %s', name, url
- )
- return False
-
-
-def is_valid_domain(name, domain, **kwargs):
- """Validate the format of domain."""
- if isinstance(domain, list):
- return all([
- is_valid_domain(name, item, **kwargs) for item in domain
- ])
- if re.match(
- r'^([0-9a-zA-Z_-]+)(\.[0-9a-zA-Z_-]+)*$',
- domain
- ):
- return True
- logging.debug(
- '%s invalid domain %s', name, domain
- )
- return False
-
-
-def is_valid_username(name, username, **kwargs):
- """Valid the format of username."""
- if bool(username):
- return True
- logging.debug(
- '%s username is empty', name
- )
-
-
-def is_valid_password(name, password, **kwargs):
- """Valid the format of password."""
- if bool(password):
- return True
- logging.debug('%s password is empty', name)
- return False
-
-
-def is_valid_partition(name, partition, **kwargs):
- """Valid the format of partition name."""
- if name != 'swap' and not name.startswith('/'):
- logging.debug(
- '%s is not started with / or swap', name
- )
- return False
- if 'size' not in partition and 'percentage' not in partition:
- logging.debug(
- '%s partition does not contain sie or percentage',
- name
- )
- return False
- return True
-
-
-def is_valid_percentage(name, percentage, **kwargs):
- """Valid the percentage."""
- if 0 <= percentage <= 100:
- return True
- logging.debug('%s invalid percentage %s', name, percentage)
-
-
-def is_valid_port(name, port, **kwargs):
- """Valid the format of port."""
- if 0 < port < 65536:
- return True
- logging.debug('%s invalid port %s', name, port)
-
-
-def is_valid_size(name, size, **kwargs):
- if re.match(r'^(\d+)(K|M|G|T)$', size):
- return True
- logging.debug('%s invalid size %s', name, size)
- return False
-
-
-VALIDATOR_GLOBALS = globals()
-VALIDATOR_LOCALS = locals()
-VALIDATOR_CONFIGS = util.load_configs(
- setting.VALIDATOR_DIR,
- config_name_suffix='.py',
- env_globals=VALIDATOR_GLOBALS,
- env_locals=VALIDATOR_LOCALS
-)
-for validator_config in VALIDATOR_CONFIGS:
- VALIDATOR_LOCALS.update(validator_config)