summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compass-tasks-k8s/Dockerfile6
-rw-r--r--compass-tasks-k8s/README.md1
-rw-r--r--compass-tasks-k8s/run.sh25
-rw-r--r--compass-tasks/Dockerfile12
-rw-r--r--compass-tasks/README.md1
-rw-r--r--compass-tasks/actions/__init__.py13
-rw-r--r--compass-tasks/actions/clean.py195
-rw-r--r--compass-tasks/actions/cli.py179
-rw-r--r--compass-tasks/actions/delete.py148
-rw-r--r--compass-tasks/actions/deploy.py182
-rw-r--r--compass-tasks/actions/health_check/__init__.py13
-rw-r--r--compass-tasks/actions/health_check/base.py57
-rw-r--r--compass-tasks/actions/health_check/check.py96
-rw-r--r--compass-tasks/actions/health_check/check_apache.py89
-rw-r--r--compass-tasks/actions/health_check/check_celery.py115
-rw-r--r--compass-tasks/actions/health_check/check_dhcp.py184
-rw-r--r--compass-tasks/actions/health_check/check_dns.py139
-rw-r--r--compass-tasks/actions/health_check/check_hds.py97
-rw-r--r--compass-tasks/actions/health_check/check_misc.py219
-rw-r--r--compass-tasks/actions/health_check/check_os_installer.py151
-rw-r--r--compass-tasks/actions/health_check/check_package_installer.py68
-rw-r--r--compass-tasks/actions/health_check/check_squid.py128
-rw-r--r--compass-tasks/actions/health_check/check_tftp.py96
-rw-r--r--compass-tasks/actions/health_check/utils.py114
-rw-r--r--compass-tasks/actions/install_callback.py181
-rw-r--r--compass-tasks/actions/patch.py69
-rw-r--r--compass-tasks/actions/poll_switch.py162
-rw-r--r--compass-tasks/actions/reinstall.py38
-rw-r--r--compass-tasks/actions/search.py46
-rw-r--r--compass-tasks/actions/update_progress.py298
-rw-r--r--compass-tasks/actions/util.py342
-rw-r--r--compass-tasks/apiclient/__init__.py0
-rwxr-xr-xcompass-tasks/apiclient/example.py463
-rw-r--r--compass-tasks/apiclient/restful.py1102
-rw-r--r--compass-tasks/apiclient/v1/__init__.py0
-rwxr-xr-xcompass-tasks/apiclient/v1/example.py305
-rw-r--r--compass-tasks/apiclient/v1/restful.py655
-rwxr-xr-xcompass-tasks/build.sh51
-rw-r--r--compass-tasks/db/__init__.py13
-rw-r--r--compass-tasks/db/api/__init__.py13
-rw-r--r--compass-tasks/db/api/adapter.py313
-rw-r--r--compass-tasks/db/api/adapter_holder.py155
-rw-r--r--compass-tasks/db/api/cluster.py2444
-rw-r--r--compass-tasks/db/api/database.py264
-rw-r--r--compass-tasks/db/api/health_check_report.py190
-rw-r--r--compass-tasks/db/api/host.py1120
-rw-r--r--compass-tasks/db/api/machine.py317
-rw-r--r--compass-tasks/db/api/metadata.py517
-rw-r--r--compass-tasks/db/api/metadata_holder.py731
-rw-r--r--compass-tasks/db/api/network.py160
-rw-r--r--compass-tasks/db/api/permission.py357
-rw-r--r--compass-tasks/db/api/switch.py1213
-rw-r--r--compass-tasks/db/api/user.py553
-rw-r--r--compass-tasks/db/api/user_log.py82
-rw-r--r--compass-tasks/db/api/utils.py1286
-rw-r--r--compass-tasks/db/callback.py204
-rw-r--r--compass-tasks/db/config_validation/__init__.py0
-rw-r--r--compass-tasks/db/config_validation/default_validator.py131
-rw-r--r--compass-tasks/db/config_validation/extension/__init__.py0
-rw-r--r--compass-tasks/db/config_validation/extension/openstack.py18
-rw-r--r--compass-tasks/db/exception.py116
-rw-r--r--compass-tasks/db/models.py1924
-rw-r--r--compass-tasks/db/v1/model.py724
-rw-r--r--compass-tasks/db/validator.py195
-rw-r--r--compass-tasks/deployment/__init__.py15
-rw-r--r--compass-tasks/deployment/deploy_manager.py237
-rw-r--r--compass-tasks/deployment/installers/__init__.py21
-rw-r--r--compass-tasks/deployment/installers/config_manager.py527
-rw-r--r--compass-tasks/deployment/installers/installer.py291
-rw-r--r--compass-tasks/deployment/installers/os_installers/__init__.py13
-rw-r--r--compass-tasks/deployment/installers/os_installers/cobbler/__init__.py13
-rw-r--r--compass-tasks/deployment/installers/os_installers/cobbler/cobbler.py449
-rw-r--r--compass-tasks/deployment/installers/pk_installers/__init__.py13
-rw-r--r--compass-tasks/deployment/installers/pk_installers/ansible_installer/__init__.py0
-rw-r--r--compass-tasks/deployment/installers/pk_installers/ansible_installer/ansible_installer.py441
-rw-r--r--compass-tasks/deployment/utils/__init__.py15
-rw-r--r--compass-tasks/deployment/utils/constants.py84
-rw-r--r--compass-tasks/hdsdiscovery/SNMP_CONFIG.md33
-rw-r--r--compass-tasks/hdsdiscovery/__init__.py13
-rw-r--r--compass-tasks/hdsdiscovery/base.py185
-rw-r--r--compass-tasks/hdsdiscovery/error.py26
-rw-r--r--compass-tasks/hdsdiscovery/hdmanager.py171
-rw-r--r--compass-tasks/hdsdiscovery/utils.py289
-rw-r--r--compass-tasks/hdsdiscovery/vendors/__init__.py13
-rw-r--r--compass-tasks/hdsdiscovery/vendors/appliance/__init__.py0
-rw-r--r--compass-tasks/hdsdiscovery/vendors/appliance/appliance.py34
-rw-r--r--compass-tasks/hdsdiscovery/vendors/appliance/plugins/__init__.py0
-rw-r--r--compass-tasks/hdsdiscovery/vendors/appliance/plugins/mac.py48
-rw-r--r--compass-tasks/hdsdiscovery/vendors/arista/__init__.py13
-rw-r--r--compass-tasks/hdsdiscovery/vendors/arista/arista.py33
-rw-r--r--compass-tasks/hdsdiscovery/vendors/arista/plugins/__init__.py13
-rw-r--r--compass-tasks/hdsdiscovery/vendors/arista/plugins/mac.py24
-rw-r--r--compass-tasks/hdsdiscovery/vendors/hp/__init__.py13
-rw-r--r--compass-tasks/hdsdiscovery/vendors/hp/hp.py33
-rw-r--r--compass-tasks/hdsdiscovery/vendors/hp/plugins/__init__.py13
-rw-r--r--compass-tasks/hdsdiscovery/vendors/hp/plugins/mac.py23
-rw-r--r--compass-tasks/hdsdiscovery/vendors/huawei/__init__.py13
-rw-r--r--compass-tasks/hdsdiscovery/vendors/huawei/huawei.py33
-rw-r--r--compass-tasks/hdsdiscovery/vendors/huawei/plugins/__init__.py13
-rw-r--r--compass-tasks/hdsdiscovery/vendors/huawei/plugins/mac.py63
-rw-r--r--compass-tasks/hdsdiscovery/vendors/ovswitch/__init__.py13
-rw-r--r--compass-tasks/hdsdiscovery/vendors/ovswitch/ovswitch.py76
-rw-r--r--compass-tasks/hdsdiscovery/vendors/ovswitch/plugins/__init__.py13
-rw-r--r--compass-tasks/hdsdiscovery/vendors/ovswitch/plugins/mac.py87
-rw-r--r--compass-tasks/hdsdiscovery/vendors/pica8/__init__.py13
-rw-r--r--compass-tasks/hdsdiscovery/vendors/pica8/pica8.py33
-rw-r--r--compass-tasks/hdsdiscovery/vendors/pica8/plugins/__init__.py13
-rw-r--r--compass-tasks/hdsdiscovery/vendors/pica8/plugins/mac.py24
-rw-r--r--compass-tasks/log_analyzor/__init__.py13
-rw-r--r--compass-tasks/log_analyzor/adapter_matcher.py126
-rw-r--r--compass-tasks/log_analyzor/environment.py29
-rw-r--r--compass-tasks/log_analyzor/file_matcher.py252
-rw-r--r--compass-tasks/log_analyzor/line_matcher.py206
-rw-r--r--compass-tasks/log_analyzor/progress_calculator.py208
-rw-r--r--compass-tasks/misc/Dockerfile53
-rw-r--r--compass-tasks/misc/compass_install.repo5
-rw-r--r--compass-tasks/requirements.txt23
-rw-r--r--compass-tasks/setup.py97
-rwxr-xr-xcompass-tasks/start.sh4
-rw-r--r--compass-tasks/supervisord.conf135
-rw-r--r--compass-tasks/tasks/__init__.py13
-rw-r--r--compass-tasks/tasks/client.py33
-rw-r--r--compass-tasks/tasks/tasks.py326
-rw-r--r--compass-tasks/utils/__init__.py13
-rw-r--r--compass-tasks/utils/celeryconfig_wrapper.py44
-rw-r--r--compass-tasks/utils/flags.py91
-rw-r--r--compass-tasks/utils/logsetting.py108
-rw-r--r--compass-tasks/utils/setting_wrapper.py175
-rw-r--r--compass-tasks/utils/util.py395
129 files changed, 24919 insertions, 0 deletions
diff --git a/compass-tasks-k8s/Dockerfile b/compass-tasks-k8s/Dockerfile
new file mode 100644
index 0000000..1203ffa
--- /dev/null
+++ b/compass-tasks-k8s/Dockerfile
@@ -0,0 +1,6 @@
+FROM huangxiangyu/compass-tasks:v0.3
+#FROM localbuild/compass-tasks
+
+ADD ./run.sh /root/
+RUN chmod +x /root/run.sh
+RUN /root/run.sh
diff --git a/compass-tasks-k8s/README.md b/compass-tasks-k8s/README.md
new file mode 100644
index 0000000..2dab2ee
--- /dev/null
+++ b/compass-tasks-k8s/README.md
@@ -0,0 +1 @@
+# compass-tasks-k8s
diff --git a/compass-tasks-k8s/run.sh b/compass-tasks-k8s/run.sh
new file mode 100644
index 0000000..516ead0
--- /dev/null
+++ b/compass-tasks-k8s/run.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2016-2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -x
+COMPASS_DIR=${BASH_SOURCE[0]%/*}
+
+rm -rf /opt/kargo_k8s
+git clone https://github.com/kubernetes-incubator/kubespray.git /opt/kargo_k8s
+
+cd /opt/kargo_k8s
+git checkout v2.2.0
+
+pip uninstall ansible -y
+
+pip install ansible==2.3.1.0
+
+
+
+
diff --git a/compass-tasks/Dockerfile b/compass-tasks/Dockerfile
new file mode 100644
index 0000000..3beaf57
--- /dev/null
+++ b/compass-tasks/Dockerfile
@@ -0,0 +1,12 @@
+FROM centos:7
+
+ADD . /root/compass-tasks
+
+RUN /root/compass-tasks/build.sh
+
+EXPOSE 6379
+
+VOLUME ["/var/ansible", "/etc/compass/machine_list", "/etc/compass/switch_list"]
+
+ENTRYPOINT ["/bin/bash", "-c"]
+CMD ["/usr/local/bin/start.sh"]
diff --git a/compass-tasks/README.md b/compass-tasks/README.md
new file mode 100644
index 0000000..952f4b5
--- /dev/null
+++ b/compass-tasks/README.md
@@ -0,0 +1 @@
+# compass-tasks \ No newline at end of file
diff --git a/compass-tasks/actions/__init__.py b/compass-tasks/actions/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/actions/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/actions/clean.py b/compass-tasks/actions/clean.py
new file mode 100644
index 0000000..a4e9bc9
--- /dev/null
+++ b/compass-tasks/actions/clean.py
@@ -0,0 +1,195 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to clean installers
+"""
+import logging
+import xmlrpclib
+
+from compass.actions import util
+
+try:
+ import chef
+except ImportError:
+ pass
+
+class CobblerInstaller(object):
+ """cobbler installer"""
+ CREDENTIALS = "credentials"
+ USERNAME = 'username'
+ PASSWORD = 'password'
+
+ INSTALLER_URL = "cobbler_url"
+
+ def __init__(self, settings):
+ username = settings[self.CREDENTIALS][self.USERNAME]
+ password = settings[self.CREDENTIALS][self.PASSWORD]
+ cobbler_url = settings[self.INSTALLER_URL]
+ try:
+ self.remote = xmlrpclib.Server(cobbler_url)
+ self.token = self.remote.login(username, password)
+ logging.info('cobbler %s client created', cobbler_url)
+ except Exception as error:
+ logging.error(
+ 'failed to login %s with (%s, %s)',
+ cobbler_url, username, password
+ )
+ logging.exception(error)
+
+ def clean(self):
+ systems = self.remote.get_systems()
+ for system in systems:
+ system_name = system['name']
+ try:
+ self.remote.remove_system(system_name, self.token)
+ logging.info('system %s is removed', system_name)
+ except Exception as error:
+ logging.error(
+ 'failed to remove system %s', system_name
+ )
+ logging.exception(error)
+
+
+class AnsibleInstaller(object):
+
+ def __init__(self, settings):
+ return
+
+ def clean(self):
+ pass
+
+
+class ChefInstaller(object):
+ DATABAGS = "databags"
+ CHEFSERVER_URL = "chef_url"
+ CHEFSERVER_DNS = "chef_server_dns"
+ CHEFSERVER_IP = "chef_server_ip"
+ KEY_DIR = "key_dir"
+ CLIENT = "client_name"
+
+ def __init__(self, settings):
+ installer_url = settings.get(self.CHEFSERVER_URL, None)
+ key_dir = settings.get(self.KEY_DIR, None)
+ client = settings.get(self.CLIENT, None)
+ try:
+ if installer_url and key_dir and client:
+ self.api = chef.ChefAPI(installer_url, key_dir, client)
+ else:
+ self.api = chef.autoconfigure()
+ logging.info(
+ 'chef client created %s(%s, %s)',
+ installer_url, key_dir, client
+ )
+ except Exception as error:
+ logging.error(
+ 'failed to create chef client %s(%s, %s)',
+ installer_url, key_dir, client
+ )
+ logging.exception(error)
+
+ def clean(self):
+ try:
+ for node_name in chef.Node.list(api=self.api):
+ node = chef.Node(node_name, api=self.api)
+ node.delete()
+ logging.info('delete node %s', node_name)
+ except Exception as error:
+ logging.error('failed to delete some nodes')
+ logging.exception(error)
+
+ try:
+ for client_name in chef.Client.list(api=self.api):
+ if client_name in ['chef-webui', 'chef-validator']:
+ continue
+ client = chef.Client(client_name, api=self.api)
+ client.delete()
+ logging.info('delete client %s', client_name)
+ except Exception as error:
+ logging.error('failed to delete some clients')
+ logging.exception(error)
+
+ try:
+ for env_name in chef.Environment.list(api=self.api):
+ if env_name == '_default':
+ continue
+ env = chef.Environment(env_name, api=self.api)
+ env.delete()
+ logging.info('delete env %s', env_name)
+ except Exception as error:
+ logging.error('failed to delete some envs')
+ logging.exception(error)
+
+ try:
+ for databag_name in chef.DataBag.list(api=self.api):
+ databag = chef.DataBag(databag_name, api=self.api)
+ for item_name, item in databag.items():
+ item.delete()
+ logging.info(
+ 'delete item %s from databag %s',
+ item_name, databag_name
+ )
+ except Exception as error:
+ logging.error('failed to delete some databag items')
+ logging.exception(error)
+
+
+OS_INSTALLERS = {
+ 'cobbler': CobblerInstaller
+}
+PK_INSTALLERS = {
+ 'chef_installer': ChefInstaller,
+ 'ansible_installer': AnsibleInstaller
+}
+
+
+def clean_os_installer(
+ os_installer_name, os_installer_settings
+):
+ with util.lock('serialized_action', timeout=100) as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to clean os installer'
+ )
+
+ if os_installer_name not in OS_INSTALLERS:
+ logging.error(
+ '%s not found in os_installers',
+ os_installer_name
+ )
+
+ os_installer = OS_INSTALLERS[os_installer_name](
+ os_installer_settings
+ )
+ os_installer.clean()
+
+
+def clean_package_installer(
+ package_installer_name, package_installer_settings
+):
+ with util.lock('serialized_action', timeout=100) as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to clean package installer'
+ )
+
+ if package_installer_name not in PK_INSTALLERS:
+ logging.error(
+ '%s not found in os_installers',
+ package_installer_name
+ )
+
+ package_installer = PK_INSTALLERS[package_installer_name](
+ package_installer_settings
+ )
+ package_installer.clean()
diff --git a/compass-tasks/actions/cli.py b/compass-tasks/actions/cli.py
new file mode 100644
index 0000000..c9058ed
--- /dev/null
+++ b/compass-tasks/actions/cli.py
@@ -0,0 +1,179 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Compass Command Line Interface"""
+import logging
+import subprocess
+import sys
+
+from compass.actions.health_check import check
+from compass.db.api import database
+
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+from compass.utils.util import pretty_print
+
+
+ACTION_MAP = {
+ "check": "apache celery dhcp dns hds misc os_installer "
+ "package_installer squid tftp".split(" "),
+ "refresh": "db sync".split(" "),
+}
+
+
+class BootCLI(object):
+ """CLI to do compass check."""
+
+ def __init__(self):
+ return
+
+ def run(self, args):
+ """cli takes the commands and calls respective modules."""
+ action = self.get_action(args)
+ if action is None:
+ self.print_help()
+ else:
+ module = self.get_module(action, args)
+ if module == "invalid":
+ self.print_help(action)
+ else:
+ method = "self.run_" + action + "(module)"
+ eval(method)
+
+ @classmethod
+ def get_action(cls, args):
+ """This method returns an action type.
+
+ .. note::
+ For 'compass check dhcp' command, it will return 'check'.
+ """
+ if len(args) == 1:
+ return None
+ elif args[1] in ACTION_MAP.keys():
+ return args[1]
+ return None
+
+ @classmethod
+ def get_module(cls, action, args):
+ """This method returns a module.
+
+ .. note::
+ For 'compass check dhcp' command, it will return 'dhcp'.
+ """
+ if len(args) <= 2:
+ return None
+ elif args[2] in ACTION_MAP[action]:
+ return args[2]
+ return "invalid"
+
+ def run_check(self, module=None):
+ """This provides a flexible sanity check.
+
+ .. note::
+ param module default set to None.
+ if parameter module is none. Compass checks all modules.
+ If module specified, Compass will only check such module.
+ """
+ if module is None:
+ pretty_print("Starting: Compass Health Check",
+ "==============================")
+ chk = check.BootCheck()
+ res = chk.run()
+ self.output_check_result(res)
+
+ else:
+ pretty_print("Checking Module: %s" % module,
+ "============================")
+ chk = check.BootCheck()
+ method = "chk._check_" + module + "()"
+ res = eval(method)
+ print "\n".join(msg for msg in res[1])
+
+ @classmethod
+ def output_check_result(cls, result):
+ """output check result."""
+ if result == {}:
+ return
+ pretty_print("\n",
+ "===============================",
+ "* Compass Health Check Report *",
+ "===============================")
+ successful = True
+ for key in result.keys():
+ if result[key][0] == 0:
+ successful = False
+ print "%s" % "\n".join(item for item in result[key][1])
+
+ print "===================="
+ if successful is True:
+ print "Compass Check completes. No problems found, all systems go"
+ sys.exit(0)
+ else:
+ print (
+ "Compass has ERRORS shown above. Please fix them before "
+ "deploying!")
+ sys.exit(1)
+
+ @classmethod
+ def run_refresh(cls, action=None):
+ """Run refresh."""
+ # TODO(xicheng): replace refresh.sh with refresh.py
+ if action is None:
+ pretty_print("Refreshing Compass...",
+ "=================")
+ subprocess.Popen(
+ ['/opt/compass/bin/refresh.sh'], shell=True)
+ elif action == "db":
+ pretty_print("Refreshing Compass Database...",
+ "===================")
+ subprocess.Popen(
+ ['/opt/compass/bin/manage_db.py createdb'], shell=True)
+ else:
+ pretty_print("Syncing with Installers...",
+ "================")
+ subprocess.Popen(
+ ['/opt/compass/bin/manage_db.py sync_from_installers'],
+ shell=True
+ )
+
+ @classmethod
+ def print_help(cls, module_help=""):
+ """print help."""
+ if module_help == "":
+ pretty_print("usage\n=====",
+ "compass <refresh|check>",
+ "type 'compass {action} --help' for detailed "
+ "command list")
+
+ elif module_help == "refresh":
+ pretty_print("usage\n=====",
+ "compass refresh [%s]" %
+ "|".join(action for action in ACTION_MAP['refresh']))
+
+ else:
+ pretty_print("usage\n=====",
+ "compass check [%s]" %
+ "|".join(action for action in ACTION_MAP['check']))
+ sys.exit(2)
+
+
+def main():
+ """Compass cli entry point."""
+ flags.init()
+ logsetting.init()
+ database.init()
+ cli = BootCLI()
+ output = cli.run(sys.argv)
+ return sys.exit(output)
diff --git a/compass-tasks/actions/delete.py b/compass-tasks/actions/delete.py
new file mode 100644
index 0000000..d89994d
--- /dev/null
+++ b/compass-tasks/actions/delete.py
@@ -0,0 +1,148 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to delete a given cluster
+"""
+import logging
+
+from compass.actions import util
+from compass.db.api import cluster as cluster_api
+from compass.db.api import host as host_api
+from compass.db.api import user as user_db
+from compass.deployment.deploy_manager import DeployManager
+from compass.deployment.utils import constants as const
+
+
+def delete_cluster(
+ cluster_id, host_id_list,
+ username=None, delete_underlying_host=False
+):
+ """Delete cluster and all clusterhosts on it.
+
+ :param cluster_id: id of the cluster.
+ :type cluster_id: int
+ :param host_id_list: list of host id.
+ :type host_id_list: list of int.
+
+ If delete_underlying_host is set, all underlying hosts will
+ be deleted.
+
+ .. note::
+ The function should be called out of database session.
+ """
+ with util.lock('serialized_action', timeout=100) as lock:
+ if not lock:
+ raise Exception('failed to acquire lock to delete cluster')
+
+ user = user_db.get_user_object(username)
+
+ cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user)
+ hosts_info = util.ActionHelper.get_hosts_info(
+ cluster_id, host_id_list, user)
+
+ deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
+
+ deploy_manager.remove_hosts(
+ package_only=not delete_underlying_host,
+ delete_cluster=True
+ )
+ util.ActionHelper.delete_cluster(
+ cluster_id, host_id_list, user,
+ delete_underlying_host
+ )
+
+
+def delete_cluster_host(
+ cluster_id, host_id,
+ username=None, delete_underlying_host=False
+):
+ """Delete clusterhost.
+
+ :param cluster_id: id of the cluster.
+ :type cluster_id: int
+ :param host_id: id of the host.
+ :type host_id: int
+
+ If delete_underlying_host is set, the underlying host
+ will be deleted too.
+
+ .. note::
+ The function should be called out of database session.
+ """
+ with util.lock('serialized_action', timeout=100) as lock:
+ if not lock:
+ raise Exception('failed to acquire lock to delete clusterhost')
+
+ user = user_db.get_user_object(username)
+ cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user)
+ hosts_info = util.ActionHelper.get_hosts_info(
+ cluster_id, [host_id], user)
+
+ deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
+
+ deploy_manager.remove_hosts(
+ package_only=not delete_underlying_host,
+ delete_cluster=False
+ )
+ util.ActionHelper.delete_cluster_host(
+ cluster_id, host_id, user,
+ delete_underlying_host
+ )
+
+
+def delete_host(
+ host_id, cluster_id_list, username=None
+):
+ """Delete host and all clusterhosts on it.
+
+ :param host_id: id of the host.
+ :type host_id: int
+
+ .. note::
+ The function should be called out of database session.
+ """
+ with util.lock('serialized_action', timeout=100) as lock:
+ if not lock:
+ raise Exception('failed to acquire lock to delete host')
+
+ user = user_db.get_user_object(username)
+ for cluster_id in cluster_id_list:
+ cluster_info = util.ActionHelper.get_cluster_info(
+ cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user)
+ hosts_info = util.ActionHelper.get_hosts_info(
+ cluster_id, [host_id], user)
+
+ deploy_manager = DeployManager(
+ adapter_info, cluster_info, hosts_info)
+
+ deploy_manager.remove_hosts(
+ package_only=True,
+ delete_cluster=False
+ )
+
+ util.ActionHelper.delete_host(
+ host_id, user
+ )
diff --git a/compass-tasks/actions/deploy.py b/compass-tasks/actions/deploy.py
new file mode 100644
index 0000000..53179f5
--- /dev/null
+++ b/compass-tasks/actions/deploy.py
@@ -0,0 +1,182 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to deploy a given cluster
+"""
+import logging
+
+from compass.actions import util
+from compass.db.api import cluster as cluster_db
+from compass.db.api import health_check_report as health_check_db
+from compass.db.api import user as user_db
+from compass.deployment.deploy_manager import DeployManager
+from compass.deployment.utils import constants as const
+
+
+def deploy(cluster_id, hosts_id_list, username=None):
+ """Deploy clusters.
+
+ :param cluster_hosts: clusters and hosts in each cluster to deploy.
+ :type cluster_hosts: dict of int or str to list of int or str
+
+ .. note::
+ The function should be called out of database session.
+ """
+ with util.lock('serialized_action', timeout=1000) as lock:
+ if not lock:
+ raise Exception('failed to acquire lock to deploy')
+
+ user = user_db.get_user_object(username)
+
+ cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user)
+ hosts_info = util.ActionHelper.get_hosts_info(
+ cluster_id, hosts_id_list, user)
+
+ deploy_successful = True
+ try:
+ deploy_manager = DeployManager(
+ adapter_info, cluster_info, hosts_info)
+ # deploy_manager.prepare_for_deploy()
+ logging.debug('Created deploy manager with %s %s %s'
+ % (adapter_info, cluster_info, hosts_info))
+ deployed_config = deploy_manager.deploy()
+ except Exception as error:
+ logging.exception(error)
+ deploy_successful = False
+
+ if deploy_successful:
+ util.ActionHelper.save_deployed_config(deployed_config, user)
+ util.ActionHelper.update_state(
+ cluster_id, hosts_id_list, user, state='INSTALLING'
+ )
+ else:
+ util.ActionHelper.update_state(
+ cluster_id, hosts_id_list, user, state='ERROR',
+ message='failed to start deployment', severity='ERROR'
+ )
+
+
+def redeploy(cluster_id, username=None):
+ """Deploy clusters.
+
+ :param cluster_hosts: clusters and hosts in each cluster to deploy.
+ :type cluster_hosts: dict of int or str to list of int or str
+ """
+ with util.lock('serialized_action') as lock:
+ if not lock:
+ raise Exception('failed to acquire lock to deploy')
+
+ user = user_db.get_user_object(username)
+ cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user)
+
+ cluster_hosts = cluster_db.list_cluster_hosts(cluster_id, user)
+ hosts_id_list = [host['id'] for host in cluster_hosts]
+
+ hosts_info = util.ActionHelper.get_hosts_info(
+ cluster_id, hosts_id_list, user)
+
+ deploy_successful = True
+ try:
+ deploy_manager = DeployManager(
+ adapter_info, cluster_info, hosts_info)
+ # deploy_manager.prepare_for_deploy()
+ deploy_manager.redeploy()
+ except Exception as error:
+ logging.exception(error)
+ deploy_successful = False
+ if deploy_successful:
+ util.ActionHelper.update_state(
+ cluster_id, hosts_id_list, user, state='INSTALLING',
+ )
+ else:
+ util.ActionHelper.update_state(
+ cluster_id, hosts_id_list, user, state='ERROR',
+ message='failed to start redeployment', severity='ERROR'
+ )
+
+
+def health_check(cluster_id, report_uri, username):
+ with util.lock('cluster_health_check') as lock:
+ if not lock:
+ raise Exception('failed to acquire lock to check health')
+
+ user = user_db.get_user_object(username)
+ cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user
+ )
+
+ deploy_manager = DeployManager(adapter_info, cluster_info, None)
+ try:
+ deploy_manager.check_cluster_health(report_uri)
+ except Exception as exc:
+ logging.error("health_check exception: ============= %s" % exc)
+ data = {'state': 'error', 'error_message': str(exc), 'report': {}}
+ reports = health_check_db.list_health_reports(
+ cluster_id, user=user)
+ if not reports:
+ # Exception before executing command remotely for health check.
+ # No reports names sending back yet. Create a report
+ name = 'pre_remote_health_check'
+ health_check_db.add_report_record(
+ cluster_id, name, user=user, **data
+ )
+
+ health_check_db.update_multi_reports(cluster_id, user=user, **data)
+
+
+class ServerPowerMgmt(object):
+ """Power management for bare-metal machines by IPMI command."""
+ @staticmethod
+ def poweron(machine_id, user):
+ """Power on the specified machine."""
+ pass
+
+ @staticmethod
+ def poweroff(machine_id, user):
+ pass
+
+ @staticmethod
+ def reset(machine_id, user):
+ pass
+
+
+class HostPowerMgmt(object):
+ """Power management for hosts installed OS by OS installer. OS installer
+
+ will poweron/poweroff/reset host.
+
+ """
+ @staticmethod
+ def poweron(host_id, user):
+ """Power on the specified host."""
+ pass
+
+ @staticmethod
+ def poweroff(host_id, user):
+ pass
+
+ @staticmethod
+ def reset(host_id, user):
+ pass
diff --git a/compass-tasks/actions/health_check/__init__.py b/compass-tasks/actions/health_check/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/actions/health_check/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/actions/health_check/base.py b/compass-tasks/actions/health_check/base.py
new file mode 100644
index 0000000..22b6fae
--- /dev/null
+++ b/compass-tasks/actions/health_check/base.py
@@ -0,0 +1,57 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Base class for Compass Health Check."""
+from compass.actions.health_check import utils as health_check_utils
+from compass.db.api import adapter as adapter_api
+from compass.utils import setting_wrapper as setting
+
+
+class BaseCheck(object):
+ """health check base class."""
+
+ def __init__(self):
+ self.config = setting
+ self.code = 1
+ self.messages = []
+ self.dist, self.version, self.release = health_check_utils.get_dist()
+ adapter_api.load_adapters_internal()
+ self.os_installer = self._get_os_installer()
+ self.package_installer = self._get_package_installer()
+
+ def _get_os_installer(self):
+ installer = adapter_api.OS_INSTALLERS.values()[0]
+ os_installer = {}
+ os_installer['name'] = health_check_utils.strip_name(
+ installer['name'])
+ os_installer.update(installer['settings'])
+ return os_installer
+
+ def _get_package_installer(self):
+ package_installer = {}
+ installer = adapter_api.PACKAGE_INSTALLERS.values()[0]
+ package_installer = {}
+ package_installer['name'] = health_check_utils.strip_name(
+ installer['name'])
+ package_installer.update(installer['settings'])
+ return package_installer
+
+ def _set_status(self, code, message):
+ """set status."""
+ self.code = code
+ self.messages.append(message)
+
+ def get_status(self):
+ """get status."""
+ return (self.code, self.messages)
diff --git a/compass-tasks/actions/health_check/check.py b/compass-tasks/actions/health_check/check.py
new file mode 100644
index 0000000..c1adbc6
--- /dev/null
+++ b/compass-tasks/actions/health_check/check.py
@@ -0,0 +1,96 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Main Entry Point of Compass Health Check."""
+from compass.actions.health_check import base
+from compass.actions.health_check import check_apache
+from compass.actions.health_check import check_celery
+from compass.actions.health_check import check_dhcp
+from compass.actions.health_check import check_dns
+from compass.actions.health_check import check_hds
+from compass.actions.health_check import check_misc
+from compass.actions.health_check import check_os_installer
+from compass.actions.health_check import check_package_installer
+from compass.actions.health_check import check_squid
+from compass.actions.health_check import check_tftp
+
+
+class BootCheck(base.BaseCheck):
+ """health check for all components."""
+
+ def run(self):
+ """do health check."""
+ status = {}
+ status['apache'] = self._check_apache()
+ status['celery'] = self._check_celery()
+ status['dhcp'] = self._check_dhcp()
+ status['dns'] = self._check_dns()
+ status['hds'] = self._check_hds()
+ status['os_installer'] = self._check_os_installer()
+ status['package_installer'] = self._check_package_installer()
+ status['squid'] = self._check_squid()
+ status['tftp'] = self._check_tftp()
+ status['other'] = self._check_misc()
+
+ return status
+
+ def _check_apache(self):
+ """do apache health check."""
+ checker = check_apache.ApacheCheck()
+ return checker.run()
+
+ def _check_celery(self):
+ """do celery health check."""
+ checker = check_celery.CeleryCheck()
+ return checker.run()
+
+ def _check_dhcp(self):
+ """do dhcp health check."""
+ checker = check_dhcp.DhcpCheck()
+ return checker.run()
+
+ def _check_dns(self):
+ """do dns health check."""
+ checker = check_dns.DnsCheck()
+ return checker.run()
+
+ def _check_hds(self):
+ """do hds health check."""
+ checker = check_hds.HdsCheck()
+ return checker.run()
+
+ def _check_os_installer(self):
+ """do os installer health check."""
+ checker = check_os_installer.OsInstallerCheck()
+ return checker.run()
+
+ def _check_package_installer(self):
+ """do package installer health check."""
+ checker = check_package_installer.PackageInstallerCheck()
+ return checker.run()
+
+ def _check_squid(self):
+ """do squid health check."""
+ checker = check_squid.SquidCheck()
+ return checker.run()
+
+ def _check_tftp(self):
+ """do tftp health check."""
+ checker = check_tftp.TftpCheck()
+ return checker.run()
+
+ def _check_misc(self):
+ """do misc health check."""
+ checker = check_misc.MiscCheck()
+ return checker.run()
diff --git a/compass-tasks/actions/health_check/check_apache.py b/compass-tasks/actions/health_check/check_apache.py
new file mode 100644
index 0000000..294d6f9
--- /dev/null
+++ b/compass-tasks/actions/health_check/check_apache.py
@@ -0,0 +1,89 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Health Check module for Apache service."""
+
+import socket
+import urllib2
+
+from compass.actions.health_check import base
+from compass.actions.health_check import utils as health_check_utils
+
+
+class ApacheCheck(base.BaseCheck):
+ """apache server health check class."""
+ NAME = "Apache Check"
+
+ def run(self):
+ """do the healthcheck."""
+ if self.dist in ("centos", "redhat", "fedora", "scientific linux"):
+ apache_service = 'httpd'
+ else:
+ apache_service = 'apache2'
+ self.check_apache_conf(apache_service)
+ print "[Done]"
+ self.check_apache_running(apache_service)
+ print "[Done]"
+ if self.code == 1:
+ self.messages.append(
+ "[%s]Info: Apache health check has completed. "
+ "No problems found, all systems go." % self.NAME)
+ return (self.code, self.messages)
+
+ def check_apache_conf(self, apache_service):
+ """Validates if Apache settings.
+
+ :param apache_service : service type of apache, os dependent.
+ e.g. httpd or apache2
+ :type apache_service : string
+
+ """
+ print "Checking Apache Config......",
+ conf_err_msg = health_check_utils.check_path(
+ self.NAME,
+ "/etc/%s/conf.d/ods-server.conf" % apache_service)
+ if not conf_err_msg == "":
+ self._set_status(0, conf_err_msg)
+
+ wsgi_err_msg = health_check_utils.check_path(
+ self.NAME,
+ '/var/www/compass/compass.wsgi')
+ if not wsgi_err_msg == "":
+ self._set_status(0, wsgi_err_msg)
+
+ return True
+
+ def check_apache_running(self, apache_service):
+ """Checks if Apache service is running on port 80."""
+
+ print "Checking Apache service......",
+ serv_err_msg = health_check_utils.check_service_running(self.NAME,
+ apache_service)
+ if not serv_err_msg == "":
+ self._set_status(0, serv_err_msg)
+ if 'http' != socket.getservbyport(80):
+ self._set_status(
+ 0,
+ "[%s]Error: Apache is not listening on port 80."
+ % self.NAME)
+ try:
+ html = urllib2.urlopen('http://localhost')
+ html.geturl()
+ except Exception:
+ self._set_status(
+ 0,
+ "[%s]Error: Apache is not listening on port 80."
+ % self.NAME)
+
+ return True
diff --git a/compass-tasks/actions/health_check/check_celery.py b/compass-tasks/actions/health_check/check_celery.py
new file mode 100644
index 0000000..2d8d27c
--- /dev/null
+++ b/compass-tasks/actions/health_check/check_celery.py
@@ -0,0 +1,115 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Health Check module for Celery."""
+import commands
+import os
+
+from celery.task.control import inspect
+
+from compass.actions.health_check import base
+from compass.actions.health_check import utils as health_check_utils
+
+
+class CeleryCheck(base.BaseCheck):
+ """celery health check class."""
+ NAME = "Celery Check."
+
+ def run(self):
+ """do health check."""
+ self.check_compass_celery_setting()
+ print "[Done]"
+ self.check_celery_backend()
+ print "[Done]"
+ if self.code == 1:
+ self.messages.append("[%s]Info: Celery health check "
+ "has completed. No problems found, "
+ "all systems go." % self.NAME)
+ return (self.code, self.messages)
+
+ def check_compass_celery_setting(self):
+ """Validates Celery settings."""
+
+ print "Checking Celery setting......",
+ setting_map = {
+ 'logfile': 'CELERY_LOGFILE',
+ 'configdir': 'CELERYCONFIG_DIR',
+ 'configfile': 'CELERYCONFIG_FILE',
+ }
+ unset = []
+ res = health_check_utils.validate_setting('Celery',
+ self.config,
+ 'CELERY_LOGFILE')
+ if res is False:
+ unset.append(setting_map["logfile"])
+ self._set_status(0, res)
+
+ res = health_check_utils.validate_setting('Celery',
+ self.config,
+ 'CELERYCONFIG_DIR')
+ if res is False:
+ unset.append(setting_map["configdir"])
+ self._set_status(0, res)
+
+ res = health_check_utils.validate_setting('Celery',
+ self.config,
+ 'CELERYCONFIG_FILE')
+ if res is False:
+ unset.append(setting_map["configdir"])
+ self._set_status(0, res)
+
+ if len(unset) != 0:
+ self._set_status(0,
+ "[%s]Error: Unset celery settings: %s"
+ " in /etc/compass/setting"
+ % (self.NAME, ', '.join(item for item in unset)))
+ return True
+
+ def check_celery_backend(self):
+ """Checks if Celery backend is running and configured properly."""
+
+ print "Checking Celery Backend......",
+ if 'celery worker' not in commands.getoutput('ps -ef'):
+ self._set_status(0, "[%s]Error: celery is not running" % self.NAME)
+ return True
+
+ if not os.path.exists('/etc/compass/celeryconfig'):
+ self._set_status(
+ 0,
+ "[%s]Error: No celery config file found for Compass"
+ % self.NAME)
+ return True
+
+ try:
+ insp = inspect()
+ celery_stats = inspect.stats(insp)
+ if not celery_stats:
+ self._set_status(
+ 0,
+ "[%s]Error: No running Celery workers were found."
+ % self.NAME)
+ except IOError as error:
+ self._set_status(
+ 0,
+ "[%s]Error: Failed to connect to the backend: %s"
+ % (self.NAME, str(error)))
+ from errno import errorcode
+ if (
+ len(error.args) > 0 and
+ errorcode.get(error.args[0]) == 'ECONNREFUSED'
+ ):
+ self.messages.append(
+ "[%s]Error: RabbitMQ server isn't running"
+ % self.NAME)
+ return True
diff --git a/compass-tasks/actions/health_check/check_dhcp.py b/compass-tasks/actions/health_check/check_dhcp.py
new file mode 100644
index 0000000..e3bae1e
--- /dev/null
+++ b/compass-tasks/actions/health_check/check_dhcp.py
@@ -0,0 +1,184 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Health Check module for DHCP service."""
+import commands
+import os
+import re
+import socket
+import xmlrpclib
+
+from compass.actions.health_check import base
+
+
+class DhcpCheck(base.BaseCheck):
+ """dhcp health check class."""
+
+ NAME = "DHCP Check"
+
+ def run(self):
+ """do health check."""
+ method_name = "self.check_" + self.os_installer['name'] + "_dhcp()"
+ return eval(method_name)
+
+ def check_cobbler_dhcp(self):
+ """Checks if Cobbler has taken over DHCP service."""
+
+ try:
+ remote = xmlrpclib.Server(
+ self.os_installer['cobbler_url'],
+ allow_none=True)
+ credentials = self.os_installer['credentials']
+ remote.login(
+ credentials['username'], credentials['password'])
+ except Exception:
+ self._set_status(
+ 0,
+ "[%s]Error: Cannot login to Cobbler with "
+ "the tokens provided in the config file" % self.NAME)
+ return (self.code, self.messages)
+
+ cobbler_settings = remote.get_settings()
+ if cobbler_settings['manage_dhcp'] == 0:
+ self.messages.append(
+ "[%s]Info: DHCP service is "
+ "not managed by Compass" % self.NAME)
+ self.code = 0
+ return (self.code, self.messages)
+
+ self.check_cobbler_dhcp_template()
+ print "[Done]"
+ self.check_dhcp_service()
+ self.check_dhcp_netmask()
+ print "[Done]"
+ if self.code == 1:
+ self.messages.append(
+ "[%s]Info: DHCP health check has completed. "
+ "No problems found, all systems go." % self.NAME)
+
+ return (self.code, self.messages)
+
+ def check_cobbler_dhcp_template(self):
+ """Validates Cobbler's DHCP template file."""
+ print "Checking DHCP template......",
+ if os.path.exists("/etc/cobbler/dhcp.template"):
+ var_map = {
+ "match_next_server": False,
+ "match_subnet": False,
+ "match_filename": False,
+ "match_range": False,
+ }
+
+ ip_regex = re.compile(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
+
+ dhcp_template = open("/etc/cobbler/dhcp.template")
+ for line in dhcp_template.readlines():
+ if line.find("next_server") != -1:
+ elmlist = line.split(" ")
+ for elm in elmlist:
+ if ";" in elm:
+ elm = elm[:-2]
+
+ if "$next_server" in elm or ip_regex.match(elm):
+ var_map["match_next_server"] = True
+
+ elif line.find("subnet") != -1 and line.find("{") != -1:
+ elmlist = line.split(" ")
+ for elm in elmlist:
+ if ip_regex.match(elm):
+ if elm[-1] == "0" and "255" not in elm:
+ var_map["match_subnet"] = True
+ elif elm[-1] != "0":
+ self.messages.append(
+ "[%s]Error: Subnet should be set "
+ "in the form of 192.168.0.0 in"
+ "/etc/cobbler/dhcp.template" % self.NAME)
+
+ elif line.find("filename") != -1:
+ var_map["match_filename"] = True
+ elif line.find("range dynamic-bootp") != -1:
+ elmlist = line.split(" ")
+ ip_count = 0
+ for elm in elmlist:
+ if ";" in elm and "\n" in elm:
+ elm = elm[:-2]
+
+ if ip_regex.match(elm):
+ ip_count += 1
+
+ if ip_count != 2:
+ self.messages.append(
+ "[%s]Error: DHCP range should be set "
+ "between two IP addresses in "
+ "/etc/cobbler/dhcp.template" % self.NAME)
+ else:
+ var_map["match_range"] = True
+
+ dhcp_template.close()
+ fails = []
+ for var in var_map.keys():
+ if var_map[var] is False:
+ fails.append(var)
+
+ if len(fails) != 0:
+ self._set_status(
+ 0,
+ "[%s]Info: DHCP template file "
+ "failed components: %s" % (
+ self.NAME, ' '.join(failed for failed in fails)))
+
+ else:
+ self._set_status(
+ 0,
+ "[%s]Error: DHCP template file doesn't exist, "
+ "health check failed." % self.NAME)
+
+ return True
+
+ def check_dhcp_netmask(self):
+ with open('/etc/dhcp/dhcpd.conf') as conf_reader:
+ lines = conf_reader.readlines()
+ for line in lines:
+ if re.search('^subnet', line):
+ elm_list = line.split(' ')
+ break
+ subnet_ip = elm_list[1]
+ netmask = elm_list[-2]
+ subnet_ip_elm = subnet_ip.split('.')
+ netmask_elm = netmask.split('.')
+ for index, digit in enumerate(subnet_ip_elm):
+ if int(digit) & int(netmask_elm[index]) != int(digit):
+ self._set_status(
+ 0,
+ "[%s]Info: DHCP subnet IP and "
+ "netmask do not match" % self.NAME)
+ break
+ return True
+
+ def check_dhcp_service(self):
+ """Checks if DHCP is running on port 67."""
+ print "Checking DHCP service......",
+ if not commands.getoutput('pgrep dhcp'):
+ self._set_status(
+ 0,
+ "[%s]Error: dhcp service does not "
+ "seem to be running" % self.NAME)
+
+ if socket.getservbyport(67) != 'bootps':
+ self._set_status(
+ 0,
+ "[%s]Error: bootps is not listening "
+ "on port 67" % self.NAME)
+
+ return True
diff --git a/compass-tasks/actions/health_check/check_dns.py b/compass-tasks/actions/health_check/check_dns.py
new file mode 100644
index 0000000..843d7e2
--- /dev/null
+++ b/compass-tasks/actions/health_check/check_dns.py
@@ -0,0 +1,139 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Health Check module for DNS service."""
+
+import commands
+import os
+import socket
+import xmlrpclib
+
+from compass.actions.health_check import base
+
+
+class DnsCheck(base.BaseCheck):
+ """dns health check class."""
+ NAME = "DNS Check"
+
+ def run(self):
+ """do health check."""
+ method_name = "self.check_" + self.os_installer['name'] + "_dns()"
+ return eval(method_name)
+
+ def check_cobbler_dns(self):
+ """Checks if Cobbler has taken over DNS service."""
+ try:
+ remote = xmlrpclib.Server(
+ self.os_installer['cobbler_url'],
+ allow_none=True)
+ credentials = self.os_installer['credentials']
+ remote.login(
+ credentials['username'], credentials['password'])
+ except Exception:
+ self._set_status(0,
+ "[%s]Error: Cannot login to Cobbler "
+ "with the tokens provided in the config file"
+ % self.NAME)
+ return (self.code, self.messages)
+
+ cobbler_settings = remote.get_settings()
+ if cobbler_settings['manage_dns'] == 0:
+ self.messages.append('[DNS]Info: DNS is not managed by Compass')
+ return (0, self.messages)
+ self.check_cobbler_dns_template()
+ print "[Done]"
+ self.check_dns_service()
+ print "[Done]"
+ if self.code == 1:
+ self.messages.append(
+ "[%s]Info: DNS health check has complated. "
+ "No problems found, all systems go." % self.NAME)
+ return (self.code, self.messages)
+
+ def check_cobbler_dns_template(self):
+ """Validates Cobbler's DNS template file."""
+
+ print "Checking DNS template......",
+ if os.path.exists("/etc/cobbler/named.template"):
+ var_map = {
+ "match_port": False,
+ "match_allow_query": False,
+ }
+ named_template = open("/etc/cobbler/named.template")
+ host_ip = socket.gethostbyname(socket.gethostname())
+ missing_query = []
+ for line in named_template.readlines():
+ if "listen-on port 53" in line and host_ip in line:
+ var_map["match_port"] = True
+
+ if "allow-query" in line:
+ for subnet in ["127.0.0.0/8"]:
+ if subnet not in line:
+ missing_query.append(subnet)
+
+ named_template.close()
+
+ if var_map["match_port"] is False:
+ self.messages.append(
+ "[%s]Error: named service port "
+ "and/or IP is misconfigured in "
+ "/etc/cobbler/named.template" % self.NAME)
+
+ if len(missing_query) != 0:
+ self.messages.append(
+ "[%s]Error: Missing allow_query values in "
+ "/etc/cobbler/named.template:%s" % (
+ self.NAME,
+ ', '.join(subnet for subnet in missing_query)))
+ else:
+ var_map["match_allow_query"] = True
+
+ fails = []
+ for var in var_map.keys():
+ if var_map[var] is False:
+ fails.append(var)
+
+ if len(fails) != 0:
+ self._set_status(
+ 0,
+ "[%s]Info: DNS template failed components: "
+ "%s" % (
+ self.NAME,
+ ' '.join(failed for failed in fails)))
+
+ else:
+ self._set_status(
+ 0,
+ "[%s]Error: named template file doesn't exist, "
+ "health check failed." % self.NAME)
+
+ return True
+
+ def check_dns_service(self):
+ """Checks if DNS is running on port 53."""
+
+ print "Checking DNS service......",
+ if 'named' not in commands.getoutput('ps -ef'):
+ self._set_status(
+ 0,
+ "[%s]Error: named service does not seem to be "
+ "running" % self.NAME)
+
+ if socket.getservbyport(53) != 'domain':
+ self._set_status(
+ 0,
+ "[%s]Error: domain service is not listening on port "
+ "53" % self.NAME)
+
+ return None
diff --git a/compass-tasks/actions/health_check/check_hds.py b/compass-tasks/actions/health_check/check_hds.py
new file mode 100644
index 0000000..d176f1f
--- /dev/null
+++ b/compass-tasks/actions/health_check/check_hds.py
@@ -0,0 +1,97 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Health Check module for Hardware Discovery."""
+import logging
+
+from compass.actions.health_check import base
+from compass.actions.health_check import utils as health_check_utils
+
+
+class HdsCheck(base.BaseCheck):
+ """hds health check class."""
+ NAME = "HDS Check"
+
+ def run(self):
+ """do health check."""
+ if self.dist in ("centos", "redhat", "fedora", "scientific linux"):
+ pkg_type = "yum"
+ else:
+ pkg_type = "apt"
+
+ try:
+ pkg_module = __import__(pkg_type)
+ except Exception:
+ self._set_status(
+ 0, "[%s]Error: No module named %s please install it first."
+ % (self.NAME, pkg_type)
+ )
+ return (self.code, self.messages)
+
+ logging.info('import %s: %s', pkg_type, pkg_module)
+ method_name = 'self.check_' + pkg_type + '_snmp(pkg_module)'
+ eval(method_name)
+ print "[Done]"
+ self.check_snmp_mibs()
+ print "[Done]"
+ if self.code == 1:
+ self.messages.append("[%s]Info: hds health check has complated. "
+ "No problems found, all systems go."
+ % self.NAME)
+
+ return (self.code, self.messages)
+
+ def check_yum_snmp(self, pkg_module):
+ """Check if SNMP yum dependencies are installed
+
+ :param pkg_module : python yum library
+ :type pkg_module : python module
+
+ """
+ print "Checking SNMP Packages......",
+ yum_base = pkg_module.YumBase()
+ uninstalled = []
+ for package in ['net-snmp-utils', 'net-snmp', 'net-snmp-python']:
+ if len(yum_base.rpmdb.searchNevra(name=package)) == 0:
+ self.messages.append("[%s]Error: %s package is required "
+ "for HDS" % (self.NAME, package))
+ uninstalled.append(package)
+
+ if len(uninstalled) != 0:
+ self._set_status(0, "[%s]Info: Uninstalled packages: %s"
+ % (self.NAME,
+ ', '.join(item for item in uninstalled)))
+
+ return True
+
+ def check_apt_snmp(self, pkg_module):
+ """do apt health check."""
+ return None
+
+ def check_snmp_mibs(self):
+ """Checks if SNMP MIB files are properly placed."""
+
+ print "Checking SNMP MIBs......",
+ conf_err_msg = health_check_utils.check_path(self.NAME,
+ '/etc/snmp/snmp.conf')
+ if not conf_err_msg == "":
+ self._set_status(0, conf_err_msg)
+
+ mibs_err_msg = health_check_utils.check_path(
+ self.NAME,
+ '/usr/local/share/snmp/mibs')
+ if not mibs_err_msg == "":
+ self._set_status(0, mibs_err_msg)
+
+ return True
diff --git a/compass-tasks/actions/health_check/check_misc.py b/compass-tasks/actions/health_check/check_misc.py
new file mode 100644
index 0000000..b8beb1b
--- /dev/null
+++ b/compass-tasks/actions/health_check/check_misc.py
@@ -0,0 +1,219 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Miscellaneous Health Check for Compass."""
+import logging
+
+from compass.actions.health_check import base
+from compass.actions.health_check import utils as health_check_utils
+
+
+class MiscCheck(base.BaseCheck):
+ """health check for misc."""
+ NAME = "Miscellaneous Check"
+
+ MISC_MAPPING = {
+ "yum": "rsyslog ntp iproute openssh-clients python git wget "
+ "python-setuptools "
+ "amqp mod_wsgi httpd squid "
+ "dhcp bind rsync yum-utils xinetd tftp-server gcc "
+ "net-snmp-utils net-snmp".split(" "),
+ "pip": "netaddr flask flask_script flask_restful amqplib "
+ "flask_sqlalchemy paramiko mock celery six discover daemon "
+ "unittest2 chef".split(" "),
+ "disable": "iptables ip6tables".split(" "),
+ "enable": "httpd squid xinetd dhcpd named sshd rsyslog cobblerd "
+ "ntpd compass-celeryd compass-progress-updated".split(" "),
+ }
+
+ def run(self):
+ """do health check."""
+ self.check_linux_dependencies()
+ print "[Done]"
+ self.check_pip_dependencies()
+ print "[Done]"
+ self.check_ntp()
+ print "[Done]"
+ self.check_rsyslogd()
+ print "[Done]"
+ self.check_chkconfig()
+ print "[Done]"
+ self.check_selinux()
+ print "[Done]"
+
+ if self.code == 1:
+ self.messages.append(
+ "[%s]Info: Miscellaneous check has completed "
+ "No problems found, all systems go." % self.NAME)
+ return (self.code, self.messages)
+
+ def check_linux_dependencies(self):
+ """Checks if dependencies are installed."""
+ print "Checking Linux dependencies....",
+ if self.dist in ("centos", "redhat", "fedora", "scientific linux"):
+ pkg_type = "yum"
+ else:
+ pkg_type = "apt"
+
+ try:
+ pkg_module = __import__(pkg_type)
+ except Exception:
+ self._set_status(
+ 0,
+ "[%s]Error: No module named %s, "
+ "please install it first." % (self.NAME, pkg_type))
+ return True
+
+ logging.info('import %s: %s', pkg_type, pkg_module)
+ method_name = 'self.check_' + pkg_type + '_dependencies(pkg_module)'
+ eval(method_name)
+
+ def check_yum_dependencies(self, pkg_module):
+ """Checks if yum dependencies are installed.
+
+ :param pkg_module : python yum library
+ :type pkg_module : python module
+
+ """
+ print "Checking Yum dependencies......",
+ yum_base = pkg_module.YumBase()
+ uninstalled = []
+ for package in self.MISC_MAPPING["yum"]:
+ if len(yum_base.rpmdb.searchNevra(name=package)) == 0:
+ self._set_status(
+ 0,
+ "[%s]Error: %s package is required"
+ % (self.NAME, package))
+ uninstalled.append(package)
+
+ if len(uninstalled) != 0:
+ self._set_status(
+ 0,
+ "[%s]Info: Uninstalled yum packages: %s"
+ % (self.NAME, ', '.join(item for item in uninstalled)))
+
+ return True
+
+ def check_pip_dependencies(self):
+ """Checks if required pip packages are installed."""
+ print "Checking pip dependencies......",
+ uninstalled = []
+ for module in self.MISC_MAPPING['pip']:
+ try:
+ __import__(module)
+ except Exception:
+ self._set_status(
+ 0,
+ "[%s]Error: pip package %s is requred"
+ % (self.NAME, module))
+ uninstalled.append(module)
+
+ if len(uninstalled) != 0:
+ self._set_status(
+ 0,
+ "[%s]Info: Uninstalled pip packages: %s"
+ % (self.NAME, ', '.join(item for item in uninstalled)))
+
+ return True
+
+ def check_ntp(self):
+ """Validates ntp configuration and service."""
+
+ print "Checking NTP......",
+ conf_err_msg = health_check_utils.check_path(self.NAME,
+ '/etc/ntp.conf')
+ if not conf_err_msg == "":
+ self._set_status(0, conf_err_msg)
+
+ serv_err_msg = health_check_utils.check_service_running(self.NAME,
+ 'ntpd')
+ if not serv_err_msg == "":
+ self._set_status(0, serv_err_msg)
+
+ return True
+
+ def check_rsyslogd(self):
+ """Validates rsyslogd configuration and service."""
+
+ print "Checking rsyslog......",
+ conf_err_msg = health_check_utils.check_path(self.NAME,
+ '/etc/rsyslog.conf')
+ if not conf_err_msg == "":
+ self._set_status(0, conf_err_msg)
+
+ dir_err_msg = health_check_utils.check_path(self.NAME,
+ '/etc/rsyslog.d/')
+ if not dir_err_msg == "":
+ self._set_status(0, dir_err_msg)
+
+ serv_err_msg = health_check_utils.check_service_running(self.NAME,
+ 'rsyslogd')
+ if not serv_err_msg == "":
+ self._set_status(0, serv_err_msg)
+
+ return True
+
+ def check_chkconfig(self):
+ """Check if required services are enabled on the start up."""
+
+ print "Checking chkconfig......",
+ serv_to_disable = []
+ for serv in self.MISC_MAPPING["disable"]:
+ if health_check_utils.check_chkconfig(serv) is True:
+ self._set_status(
+ 0,
+ "[%s]Error: %s is not disabled"
+ % (self.NAME, serv))
+ serv_to_disable.append(serv)
+
+ if len(serv_to_disable) != 0:
+ self._set_status(
+ 0,
+ "[%s]Info: You need to disable these services "
+ "on system start-up: %s"
+ % (self.NAME,
+ ", ".join(item for item in serv_to_disable)))
+
+ serv_to_enable = []
+ for serv in self.MISC_MAPPING["enable"]:
+ if health_check_utils.check_chkconfig(serv) is False:
+ self._set_status(
+ 0, "[%s]Error: %s is disabled" % (self.NAME, serv))
+ serv_to_enable.append(serv)
+
+ if len(serv_to_enable) != 0:
+ self._set_status(0, "[%s]Info: You need to enable these "
+ "services on system start-up: %s"
+ % (self.NAME,
+ ", ".join(item for item in serv_to_enable)))
+
+ return True
+
+ def check_selinux(self):
+ """Check if SELinux is disabled."""
+ print "Checking Selinux......",
+ disabled = False
+ with open("/etc/selinux/config") as selinux:
+ for line in selinux:
+ if "SELINUX=disabled" in line:
+ disabled = True
+ break
+
+ if disabled is False:
+ self._set_status(
+ 0,
+ "[%s]Selinux is not disabled, "
+ "please disable it in /etc/selinux/config." % self.NAME)
+
+ return True
diff --git a/compass-tasks/actions/health_check/check_os_installer.py b/compass-tasks/actions/health_check/check_os_installer.py
new file mode 100644
index 0000000..6ef9818
--- /dev/null
+++ b/compass-tasks/actions/health_check/check_os_installer.py
@@ -0,0 +1,151 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Compass Health Check module for OS Installer."""
+
+import os
+import xmlrpclib
+
+from compass.actions.health_check import base
+
+
+class OsInstallerCheck(base.BaseCheck):
+ """os installer health check."""
+ NAME = "OS Installer Check"
+
+ def run(self):
+ """do health check."""
+ method_name = 'self.' + self.os_installer['name'] + '_check()'
+ return eval(method_name)
+
+ def cobbler_check(self):
+ """Runs cobbler check from xmlrpc client."""
+ try:
+ remote = xmlrpclib.Server(
+ self.os_installer['cobbler_url'],
+ allow_none=True)
+ credentials = self.os_installer['credentials']
+ token = remote.login(
+ credentials['username'], credentials['password'])
+ except Exception:
+ self.code = 0
+ self.messages.append(
+ "[%s]Error: Cannot login to Cobbler with "
+ "the tokens provided in the config file"
+ % self.NAME)
+ self.messages.append(
+ "[%s]Error: Failed to connect to Cobbler "
+ "API, please check if /etc/cobbler/setting "
+ "is properly configured" % self.NAME)
+ return (self.code, self.messages)
+
+ check_result = remote.check(token)
+
+ for index, message in enumerate(check_result):
+ if "SELinux" in message:
+ check_result.pop(index)
+
+ if len(check_result) != 0:
+ self.code = 0
+ for error_msg in check_result:
+ self.messages.append("[%s]Error: " % self.NAME + error_msg)
+
+ if len(remote.get_distros()) == 0:
+ self._set_status(0,
+ "[%s]Error: No Cobbler distros found" % self.NAME)
+
+ if len(remote.get_profiles()) == 0:
+ self._set_status(0,
+ "[%s]Error: No Cobbler profiles found"
+ % self.NAME)
+
+ found_ppa = False
+ if len(remote.get_repos()) != 0:
+ for repo in remote.get_repos():
+ if 'ppa_repo' in repo['mirror']:
+ found_ppa = True
+ break
+
+ if found_ppa is False:
+ self._set_status(0,
+ "[%s]Error: No repository ppa_repo found"
+ % self.NAME)
+
+ path_map = {
+ 'match_kickstart': (
+ '/var/lib/cobbler/kickstarts/',
+ ['default.ks', 'default.seed']
+ ),
+ 'match_snippets': (
+ '/var/lib/cobbler/snippets/',
+ [
+ 'kickstart_done',
+ 'kickstart_start',
+ 'kickstart_pre_partition_disks',
+ 'kickstart_partition_disks',
+ 'kickstart_pre_anamon',
+ 'kickstart_post_anamon',
+ 'kickstart_pre_install_network_config',
+ 'kickstart_network_config',
+ 'kickstart_post_install_network_config',
+ 'kickstart_chef',
+ 'kickstart_ntp',
+ 'kickstart_yum_repo_config',
+ 'preseed_pre_partition_disks',
+ 'preseed_partition_disks',
+ 'preseed_pre_anamon',
+ 'preseed_post_anamon',
+ 'preseed_pre_install_network_config',
+ 'preseed_network_config',
+ 'preseed_post_install_network_config',
+ 'preseed_chef',
+ 'preseed_ntp',
+ 'preseed_apt_repo_config',
+ ]
+ ),
+ 'match_ks_mirror': (
+ '/var/www/cobbler/',
+ ['ks_mirror']
+ ),
+ 'match_repo_mirror': (
+ '/var/www/cobbler/',
+ ['repo_mirror']
+ ),
+ 'match_iso': (
+ '/var/lib/cobbler/',
+ ['iso']
+ ),
+ }
+ not_exists = []
+ for key in path_map.keys():
+ for path in path_map[key][1]:
+ if not os.path.exists(path_map[key][0] + path):
+ not_exists.append(path_map[key][0] + path)
+
+ if len(not_exists) != 0:
+ self._set_status(
+ 0,
+ "[%s]Error: These locations do not exist: "
+ "%s" % (
+ self.NAME,
+ ', '.join(item for item in not_exists)
+ )
+ )
+
+ if self.code == 1:
+ self.messages.append(
+ "[%s]Info: OS Installer health check has completed."
+ " No problems found, all systems go." % self.NAME)
+
+ return (self.code, self.messages)
diff --git a/compass-tasks/actions/health_check/check_package_installer.py b/compass-tasks/actions/health_check/check_package_installer.py
new file mode 100644
index 0000000..efcd8e8
--- /dev/null
+++ b/compass-tasks/actions/health_check/check_package_installer.py
@@ -0,0 +1,68 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Health Check module for Package Installer."""
+import logging
+import os
+import requests
+
+from compass.actions.health_check import base
+from compass.actions.health_check import utils as health_check_utils
+
+
+class PackageInstallerCheck(base.BaseCheck):
+ """package installer health check class."""
+ NAME = "Package Installer Check"
+
+ def run(self):
+ """do health check."""
+ method_name = "self." + self.package_installer['name'] + "_check()"
+ return eval(method_name)
+
+ def chef_check(self):
+ """Checks chef setting, cookbooks and roles."""
+ self.check_chef_config_dir()
+ print "[Done]"
+ if self.code == 1:
+ self.messages.append(
+ "[%s]Info: Package installer health check "
+ "has completed. No problems found, all systems "
+ "go." % self.NAME)
+
+ return (self.code, self.messages)
+
+ def check_chef_config_dir(self):
+ """Validates chef configuration directories."""
+
+ print "Checking Chef configurations......",
+ message = health_check_utils.check_path(self.NAME, '/etc/chef-server/')
+ if not message == "":
+ self._set_status(0, message)
+
+ message = health_check_utils.check_path(self.NAME, '/opt/chef-server/')
+ if not message == "":
+ self._set_status(0, message)
+
+ return None
+
+ def ansible_check(self):
+ """Placeholder for ansible check."""
+ print "Checking ansible......"
+ print ("[Done]")
+ self.code == 1
+ self.messages.append(
+ "[%s]Info: Package installer health check "
+ "has completed. No problems found, all systems "
+ "go." % self.NAME)
+ return (self.code, self.messages)
diff --git a/compass-tasks/actions/health_check/check_squid.py b/compass-tasks/actions/health_check/check_squid.py
new file mode 100644
index 0000000..5628a63
--- /dev/null
+++ b/compass-tasks/actions/health_check/check_squid.py
@@ -0,0 +1,128 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Health Check module for Squid service."""
+import commands
+import os
+import pwd
+import socket
+
+from compass.actions.health_check import base
+from compass.actions.health_check import utils as health_check_utils
+
+
+class SquidCheck(base.BaseCheck):
+ """Squid health check class."""
+ NAME = "Squid Check"
+
+ def run(self):
+ """do health check."""
+ self.check_squid_files()
+ print "[Done]"
+ self.check_squid_service()
+ print "[Done]"
+ if self.code == 1:
+ self.messages.append(
+ "[%s]Info: Squid health check has completed. "
+ "No problems found, all systems go." % self.NAME)
+ return (self.code, self.messages)
+
+ def check_squid_files(self):
+ """Validates squid config, cache directory and ownership."""
+ print "Checking Squid Files......",
+ var_map = {
+ 'match_squid_conf': False,
+ 'match_squid_cache': False,
+ 'match_squid_ownership': False,
+ }
+
+ conf_err_msg = health_check_utils.check_path(
+ self.NAME,
+ "/etc/squid/squid.conf")
+ if not conf_err_msg == "":
+ self._set_status(0, conf_err_msg)
+ elif int(oct(os.stat('/etc/squid/squid.conf').st_mode)) < 100644:
+ self._set_status(
+ 0,
+ "[%s]Error: squid.conf has incorrect "
+ "file permissions" % self.NAME)
+ else:
+ var_map['match_squid_conf'] = True
+
+ squid_path_err_msg = health_check_utils.check_path(
+ self.NAME, '/var/squid/')
+ if not squid_path_err_msg == "":
+ self._set_status(0, squid_path_err_msg)
+ elif health_check_utils.check_path(
+ self.NAME,
+ '/var/squid/cache'
+ ) != "":
+ self._set_status(
+ 0,
+ health_check_utils.check_path(
+ self.NAME,
+ '/var/squid/cache'
+ )
+ )
+ else:
+ var_map['match_squid_cache'] = True
+ uid = os.stat('/var/squid/').st_uid
+ gid = os.stat('/var/squid/').st_gid
+ if uid != gid or pwd.getpwuid(23).pw_name != 'squid':
+ self._set_status(
+ 0,
+ "[%s]Error: /var/squid directory ownership "
+ "misconfigured" % self.NAME)
+ else:
+ var_map['match_squid_ownership'] = True
+
+ fails = []
+ for key in var_map.keys():
+ if var_map[key] is False:
+ fails.append(key)
+
+ if len(fails) != 0:
+ self.messages.append(
+ "[%s]Info: Failed components for squid config: "
+ "%s" % (
+ self.NAME,
+ ', '.join(item for item in fails)
+ )
+ )
+ return True
+
+ def check_squid_service(self):
+ """Checks if squid is running on port 3128."""
+
+ print "Checking Squid service......",
+ if 'squid' not in commands.getoutput('ps -ef'):
+ self._set_status(
+ 0,
+ "[%s]Error: squid service does not seem "
+ "running" % self.NAME)
+
+ try:
+ if 'squid' != socket.getservbyport(3128):
+ self._set_status(
+ 0,
+ "[%s]Error: squid is not listening on "
+ "3128" % self.NAME)
+
+ except Exception:
+ self._set_status(
+ 0,
+ "[%s]Error: No service is listening on 3128, "
+ "squid failed" % self.NAME)
+
+ return True
diff --git a/compass-tasks/actions/health_check/check_tftp.py b/compass-tasks/actions/health_check/check_tftp.py
new file mode 100644
index 0000000..7ca6405
--- /dev/null
+++ b/compass-tasks/actions/health_check/check_tftp.py
@@ -0,0 +1,96 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Health Check module for TFTP service."""
+import os
+import socket
+import xmlrpclib
+
+from compass.actions.health_check import base
+from compass.actions.health_check import utils as health_check_utils
+
+
+class TftpCheck(base.BaseCheck):
+ """tftp health check class."""
+ NAME = "TFTP Check"
+
+ def run(self):
+ """do health check."""
+ method_name = "self.check_" + self.os_installer['name'] + "_tftp()"
+ return eval(method_name)
+
+ def check_cobbler_tftp(self):
+ """Checks if Cobbler manages TFTP service.
+
+ :note: we assume TFTP service is running at the
+ same machine where this health check runs at
+ """
+
+ try:
+ remote = xmlrpclib.Server(
+ self.os_installer['cobbler_url'],
+ allow_none=True)
+ credentials = self.os_installer['credentials']
+ remote.login(
+ credentials['username'], credentials['password'])
+ except Exception:
+ self._set_status(
+ 0,
+ "[%s]Error: Cannot login to Cobbler with the tokens "
+ " provided in the config file" % self.NAME)
+ return (self.code, self.messages)
+
+ cobbler_settings = remote.get_settings()
+ if cobbler_settings['manage_tftp'] == 0:
+ self.messages.append(
+ '[TFTP]Info: tftp service is not managed by Compass')
+ return (0, self.messages)
+ self.check_tftp_dir()
+ print "[Done]"
+ self.check_tftp_service()
+ print "[Done]"
+ if self.code == 1:
+ self.messages.append(
+ "[%s]Info: tftp service health check has completed. "
+ "No problems found, all systems go." % self.NAME)
+
+ return (self.code, self.messages)
+
+ def check_tftp_dir(self):
+ """Validates TFTP directories and configurations."""
+ print "Checking TFTP directories......",
+ if not os.path.exists('/var/lib/tftpboot/'):
+ self._set_status(
+ 0,
+ "[%s]Error: No tftp-boot libraries found, "
+ "please check if tftp server is properly "
+ "installed/managed" % self.NAME)
+
+ return True
+
+ def check_tftp_service(self):
+ """Checks if TFTP is running on port 69."""
+ print "Checking TFTP services......",
+ serv_err_msg = health_check_utils.check_service_running(self.NAME,
+ 'xinetd')
+ if not serv_err_msg == "":
+ self._set_status(0, serv_err_msg)
+
+ if 'tftp' != socket.getservbyport(69):
+ self._set_status(
+ 0,
+ "[%s]Error: tftp doesn't seem to be listening "
+ "on Port 60." % self.NAME)
+
+ return True
diff --git a/compass-tasks/actions/health_check/utils.py b/compass-tasks/actions/health_check/utils.py
new file mode 100644
index 0000000..369c5b6
--- /dev/null
+++ b/compass-tasks/actions/health_check/utils.py
@@ -0,0 +1,114 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Compass Health Check heavy-lifting utilities"""
+import commands
+import os
+import platform
+import re
+
+
+def validate_setting(module, setting, param):
+ """Checks if a Compass setting exists in the config file.
+
+ :param module : module name to be checked
+ :type module : string
+ :param setting : compass setting wrapper
+ :type setting : python module
+ :param param : settings defined in compass config file
+ :type param : string
+
+ """
+ if hasattr(setting, param):
+ return True
+ else:
+ err_msg = "[%s]Error: no %s defined" % (module, param)
+ return err_msg
+
+
+def get_dist():
+ """Returns the operating system related information."""
+
+ os_version, version, release = platform.linux_distribution()
+ return (os_version.lower().strip(), version, release.lower().strip())
+
+
+def check_path(module_name, path):
+ """Checks if a directory or file exisits.
+
+ :param module_name : module name to be checked
+ :type module_name : string
+ :param path : path of the directory of file
+ :type path : string
+
+ """
+ err_msg = ""
+ if not os.path.exists(path):
+ err_msg = (
+ "[%s]Error: %s does not exist, "
+ "please check your configurations.") % (module_name, path)
+ return err_msg
+
+
+def check_service_running(module_name, service_name):
+ """Checks if a certain service is running.
+
+ :param module_name : module name to be checked
+ :type module_name : string
+ :param service_name : service name to be checked
+ :type service_name : string
+
+ """
+ err_msg = ""
+ if service_name not in commands.getoutput('ps -ef'):
+ err_msg = "[%s]Error: %s is not running." % (
+ module_name, service_name)
+
+ return err_msg
+
+
+def check_chkconfig(service_name):
+ """Checks if a service is enabled at the start up.
+
+ :param service_name : service name to be checked
+ :type service_name : string
+
+ """
+ chk_on = False
+ for service in os.listdir('/etc/rc3.d/'):
+ if service_name in service and 'S' in service:
+ chk_on = True
+ break
+
+ return chk_on
+
+
+def strip_name(name):
+ """Reformats names."""
+ if not any([s in name for s in "(,),-,_".split(',')]):
+ return name
+
+ paren_regex = re.compile("(.*?)\s*\(")
+ dash_regex = re.compile("(.*?)\s*\-")
+ under_dash_regex = re.compile("(.*?)\s*\_")
+
+ r1 = paren_regex.match(name)
+ r2 = dash_regex.match(name)
+ r3 = under_dash_regex.match(name)
+ shortest = 'AVeryLongStringForDefualt'
+ for r in [r1, r2, r3]:
+ if r and len(r.group(1)) < len(shortest):
+ shortest = r.group(1)
+
+ return shortest
diff --git a/compass-tasks/actions/install_callback.py b/compass-tasks/actions/install_callback.py
new file mode 100644
index 0000000..14d2639
--- /dev/null
+++ b/compass-tasks/actions/install_callback.py
@@ -0,0 +1,181 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to receive installation callback.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import logging
+
+from compass.actions import util
+from compass.db.api import cluster as cluster_api
+from compass.db.api import host as host_api
+from compass.db.api import user as user_db
+from compass.deployment.deploy_manager import DeployManager
+from compass.deployment.utils import constants as const
+
+
+def os_installed(
+ host_id, clusterhosts_ready, clusters_os_ready,
+ username=None
+):
+ """Callback when os is installed.
+
+ :param host_id: host that os is installed.
+ :type host_id: integer
+ :param clusterhosts_ready: the clusterhosts that should trigger ready.
+ :param clusters_os_ready: the cluster that should trigger os ready.
+
+ .. note::
+ The function should be called out of database session.
+ """
+ with util.lock('serialized_action') as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to '
+ 'do the post action after os installation'
+ )
+ logging.info(
+ 'os installed on host %s '
+ 'with cluster host ready %s cluster os ready %s',
+ host_id, clusterhosts_ready, clusters_os_ready
+ )
+ if username:
+ user = user_db.get_user_object(username)
+ else:
+ user = None
+ os_installed_triggered = False
+ for cluster_id, clusterhost_ready in clusterhosts_ready.items():
+ if not clusterhost_ready and os_installed_triggered:
+ continue
+ cluster_id = int(cluster_id)
+ cluster_info = util.ActionHelper.get_cluster_info(
+ cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user)
+ hosts_info = util.ActionHelper.get_hosts_info(
+ cluster_id, [host_id], user)
+
+ deploy_manager = DeployManager(
+ adapter_info, cluster_info, hosts_info)
+
+ if not os_installed_triggered:
+ deploy_manager.os_installed()
+ util.ActionHelper.host_ready(host_id, True, user)
+ os_installed_triggered = True
+
+ if clusterhost_ready:
+ # deploy_manager.cluster_os_installed()
+ util.ActionHelper.cluster_host_ready(
+ cluster_id, host_id, False, user
+ )
+
+ if util.ActionHelper.is_cluster_os_ready(cluster_id, user):
+ logging.info("deploy_manager begin cluster_os_installed")
+ deploy_manager.cluster_os_installed()
+
+
+def package_installed(
+ cluster_id, host_id, cluster_ready,
+ host_ready, username=None
+):
+ """Callback when package is installed.
+
+ :param cluster_id: cluster id.
+ :param host_id: host id.
+ :param cluster_ready: if the cluster should trigger ready.
+ :param host_ready: if the host should trigger ready.
+
+ .. note::
+ The function should be called out of database session.
+ """
+ with util.lock('serialized_action') as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to '
+ 'do the post action after package installation'
+ )
+ logging.info(
+ 'package installed on cluster %s host %s '
+ 'with cluster ready %s host ready %s',
+ cluster_id, host_id, cluster_ready, host_ready
+ )
+
+ if username:
+ user = user_db.get_user_object(username)
+ else:
+ user = None
+ cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user)
+ hosts_info = util.ActionHelper.get_hosts_info(
+ cluster_id, [host_id], user)
+
+ deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
+
+ deploy_manager.package_installed()
+ util.ActionHelper.cluster_host_ready(cluster_id, host_id, True, user)
+ if cluster_ready:
+ util.ActionHelper.cluster_ready(cluster_id, False, user)
+ if host_ready:
+ util.ActionHelper.host_ready(host_id, False, user)
+
+
+def cluster_installed(
+ cluster_id, clusterhosts_ready,
+ username=None
+):
+ """Callback when cluster is installed.
+
+ :param cluster_id: cluster id
+ :param clusterhosts_ready: clusterhosts that should trigger ready.
+
+ .. note::
+ The function should be called out of database session.
+ """
+ with util.lock('serialized_action') as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to '
+ 'do the post action after cluster installation'
+ )
+ logging.info(
+ 'package installed on cluster %s with clusterhosts ready %s',
+ cluster_id, clusterhosts_ready
+ )
+ if username:
+ user = user_db.get_user_object(username)
+ else:
+ user = None
+ cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user)
+ hosts_info = util.ActionHelper.get_hosts_info(
+ cluster_id, clusterhosts_ready.keys(), user)
+
+ deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
+
+ deploy_manager.cluster_installed()
+ util.ActionHelper.cluster_ready(cluster_id, True, user)
+ for host_id, clusterhost_ready in clusterhosts_ready.items():
+ if clusterhost_ready:
+ util.ActionHelper.cluster_host_ready(
+ cluster_id, host_id, False, user
+ )
diff --git a/compass-tasks/actions/patch.py b/compass-tasks/actions/patch.py
new file mode 100644
index 0000000..6d29be6
--- /dev/null
+++ b/compass-tasks/actions/patch.py
@@ -0,0 +1,69 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to patch an existing cluster
+"""
+import logging
+import simplejson as json
+
+from compass.actions import util
+from compass.db.api import cluster as cluster_db
+from compass.db.api import user as user_db
+from compass.deployment.deploy_manager import Patcher
+from compass.deployment.utils import constants as const
+
+
+def patch(cluster_id, username=None):
+ """Patch cluster.
+
+ :param cluster_id: id of the cluster
+ :type cluster_id: int
+
+ .. note::
+ The function should be called out of database session.
+ """
+ with util.lock('serialized_action', timeout=1000) as lock:
+ if not lock:
+ raise Exception('failed to acquire lock to deploy')
+
+ user = user_db.get_user_object(username)
+ cluster_hosts = cluster_db.list_cluster_hosts(cluster_id, user)
+ hosts_id_list = [host['id'] for host in cluster_hosts]
+ cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
+ adapter_id = cluster_info[const.ADAPTER_ID]
+
+ adapter_info = util.ActionHelper.get_adapter_info(
+ adapter_id, cluster_id, user)
+ hosts_info = util.ActionHelper.get_hosts_info(
+ cluster_id, hosts_id_list, user)
+ patch_successful = True
+ try:
+ patcher = Patcher(
+ adapter_info, cluster_info, hosts_info, cluster_hosts)
+ patched_config = patcher.patch()
+ except Exception as error:
+ logging.exception(error)
+ patch_successful = False
+
+ if patch_successful:
+ clean_payload = '{"patched_roles": []}'
+ clean_payload = json.loads(clean_payload)
+ for cluster_host in cluster_hosts:
+ cluster_db.update_cluster_host(
+ cluster_id, cluster_host['id'], user, **clean_payload)
+ logging.info(
+ "cleaning up patched roles for host id: %s",
+ cluster_host['id']
+ )
+ logging.info("Patch successful: %s", patched_config)
diff --git a/compass-tasks/actions/poll_switch.py b/compass-tasks/actions/poll_switch.py
new file mode 100644
index 0000000..5c29b01
--- /dev/null
+++ b/compass-tasks/actions/poll_switch.py
@@ -0,0 +1,162 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to provider function to poll switch."""
+import logging
+import netaddr
+
+from compass.actions import util
+from compass.db.api import database
+from compass.db.api import switch as switch_api
+from compass.db.api import user as user_api
+from compass.hdsdiscovery.hdmanager import HDManager
+
+
+def _poll_switch(ip_addr, credentials, req_obj='mac', oper="SCAN"):
+ """Poll switch by ip addr.
+
+
+ Args:
+ ip_addr: ip addr of the switch.
+ credentials: credentials of the switch.
+
+ Returns: switch attributes dict and list of machine attributes dict.
+ """
+ under_monitoring = 'under_monitoring'
+ unreachable = 'unreachable'
+ polling_error = 'error'
+ hdmanager = HDManager()
+ vendor, state, err_msg = hdmanager.get_vendor(ip_addr, credentials)
+ if not vendor:
+ logging.info("*****error_msg: %s****", err_msg)
+ logging.error('no vendor found or match switch %s', ip_addr)
+ return (
+ {
+ 'vendor': vendor, 'state': state, 'err_msg': err_msg
+ }, {
+ }
+ )
+
+ logging.debug(
+ 'hdmanager learn switch from %s', ip_addr
+ )
+ results = []
+ try:
+ results = hdmanager.learn(
+ ip_addr, credentials, vendor, req_obj, oper
+ )
+ except Exception as error:
+ logging.exception(error)
+ state = unreachable
+ err_msg = (
+ 'SNMP walk for querying MAC addresses timedout'
+ )
+ return (
+ {
+ 'vendor': vendor, 'state': state, 'err_msg': err_msg
+ }, {
+ }
+ )
+
+ logging.info("pollswitch %s result: %s", ip_addr, results)
+ if not results:
+ logging.error(
+ 'no result learned from %s', ip_addr
+ )
+ state = polling_error
+ err_msg = 'No result learned from SNMP walk'
+ return (
+ {'vendor': vendor, 'state': state, 'err_msg': err_msg},
+ {}
+ )
+
+ logging.info('poll switch result: %s' % str(results))
+ machine_dicts = {}
+ for machine in results:
+ mac = machine['mac']
+ port = machine['port']
+ vlan = int(machine['vlan'])
+ if vlan:
+ vlans = [vlan]
+ else:
+ vlans = []
+ if mac not in machine_dicts:
+ machine_dicts[mac] = {'mac': mac, 'port': port, 'vlans': vlans}
+ else:
+ machine_dicts[mac]['port'] = port
+ machine_dicts[mac]['vlans'].extend(vlans)
+
+ logging.debug('update switch %s state to under monitoring', ip_addr)
+ state = under_monitoring
+ return (
+ {'vendor': vendor, 'state': state, 'err_msg': err_msg},
+ machine_dicts.values()
+ )
+
+
+def poll_switch(poller_email, ip_addr, credentials,
+ req_obj='mac', oper="SCAN"):
+ """Query switch and update switch machines.
+
+ .. note::
+ When polling switch succeeds, for each mac it got from polling switch,
+ A Machine record associated with the switch is added to the database.
+
+ :param ip_addr: switch ip address.
+ :type ip_addr: str
+ :param credentials: switch crednetials.
+ :type credentials: dict
+ :param req_obj: the object requested to query from switch.
+ :type req_obj: str
+ :param oper: the operation to query the switch.
+ :type oper: str, should be one of ['SCAN', 'GET', 'SET']
+
+ .. note::
+ The function should be called out of database session scope.
+ """
+ poller = user_api.get_user_object(poller_email)
+ ip_int = long(netaddr.IPAddress(ip_addr))
+ with util.lock('poll switch %s' % ip_addr, timeout=120) as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to poll switch %s' % ip_addr
+ )
+
+ # TODO(grace): before repoll the switch, set the state to repolling.
+ # and when the poll switch is timeout, set the state to error.
+ # the frontend should only consider some main state like INTIALIZED,
+ # ERROR and SUCCESSFUL, REPOLLING is as an intermediate state to
+ # indicate the switch is in learning the mac of the machines connected
+ # to it.
+ logging.debug('poll switch: %s', ip_addr)
+ switch_dict, machine_dicts = _poll_switch(
+ ip_addr, credentials, req_obj=req_obj, oper=oper
+ )
+ switches = switch_api.list_switches(ip_int=ip_int, user=poller)
+ if not switches:
+ logging.error('no switch found for %s', ip_addr)
+ return
+
+ for switch in switches:
+ for machine_dict in machine_dicts:
+ logging.info('add machine: %s', machine_dict)
+ machine_dict['owner_id'] = poller.id
+ switch_api.add_switch_machine(
+ switch['id'], False, user=poller, **machine_dict
+ )
+ switch_api.update_switch(
+ switch['id'],
+ user=poller,
+ **switch_dict
+ )
diff --git a/compass-tasks/actions/reinstall.py b/compass-tasks/actions/reinstall.py
new file mode 100644
index 0000000..62d1bcb
--- /dev/null
+++ b/compass-tasks/actions/reinstall.py
@@ -0,0 +1,38 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to reinstall a given cluster
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import logging
+
+from compass.actions import util
+from compass.db.api import database
+
+
+def reinstall(cluster_hosts):
+ """Reinstall clusters.
+
+ :param cluster_hosts: clusters and hosts in each cluster to reinstall.
+ :type cluster_hosts: dict of int or str to list of int or str
+
+ .. note::
+ The function should be called out of database session.
+ """
+ with util.lock('serialized_action') as lock:
+ if not lock:
+ raise Exception(
+ 'failed to acquire lock to reinstall')
+ logging.debug('reinstall cluster_hosts: %s', cluster_hosts)
diff --git a/compass-tasks/actions/search.py b/compass-tasks/actions/search.py
new file mode 100644
index 0000000..73ce1d9
--- /dev/null
+++ b/compass-tasks/actions/search.py
@@ -0,0 +1,46 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to search configs of given clusters
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import logging
+
+from compass.actions import util
+from compass.config_management.utils.config_manager import ConfigManager
+from compass.db.api import database
+
+
+def search(cluster_hosts, cluster_propreties_match,
+ cluster_properties_name, host_properties_match,
+ host_properties_name):
+ """search clusters.
+
+ :param cluster_hosts: clusters and hosts in each cluster to search.
+ :type cluster_hosts: dict of int or str to list of int or str
+
+ .. note::
+ The function should be called out of database session.
+ """
+ logging.debug('search cluster_hosts: %s', cluster_hosts)
+ with database.session():
+ cluster_hosts, os_versions, target_systems = (
+ util.update_cluster_hosts(cluster_hosts))
+ manager = ConfigManager()
+ return manager.filter_cluster_and_hosts(
+ cluster_hosts, os_versions,
+ target_systems, cluster_propreties_match,
+ cluster_properties_name, host_properties_match,
+ host_properties_name)
diff --git a/compass-tasks/actions/update_progress.py b/compass-tasks/actions/update_progress.py
new file mode 100644
index 0000000..67a9963
--- /dev/null
+++ b/compass-tasks/actions/update_progress.py
@@ -0,0 +1,298 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to update status and installing progress of the given cluster.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import logging
+
+from compass.actions import util
+from compass.db.api import adapter_holder as adapter_api
+from compass.db.api import cluster as cluster_api
+from compass.db.api import host as host_api
+from compass.db.api import user as user_api
+from compass.log_analyzor import progress_calculator
+from compass.utils import setting_wrapper as setting
+
+
+def update_progress():
+ """Update status and installing progress of the given cluster.
+
+ :param cluster_hosts: clusters and hosts in each cluster to update.
+ :type cluster_hosts: dict of int or str to list of int or str
+
+ .. note::
+ The function should be called out of the database session scope.
+ In the function, it will update the database cluster_state and
+ host_state table for the deploying cluster and hosts.
+
+ The function will also query log_progressing_history table to get
+ the lastest installing progress and the position of log it has
+ processed in the last run. The function uses these information to
+ avoid recalculate the progress from the beginning of the log file.
+ After the progress got updated, these information will be stored back
+ to the log_progressing_history for next time run.
+ """
+ with util.lock('log_progressing', timeout=60, blocking=False) as lock:
+ if not lock:
+ logging.error(
+ 'failed to acquire lock to calculate installation progress'
+ )
+ return
+
+ logging.info('update installing progress')
+
+ user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
+ hosts = host_api.list_hosts(user=user)
+ host_mapping = {}
+ for host in hosts:
+ if 'id' not in host:
+ logging.error('id is not in host %s', host)
+ continue
+ host_id = host['id']
+ if 'os_name' not in host:
+ logging.error('os_name is not in host %s', host)
+ continue
+ if 'os_installer' not in host:
+ logging.error('os_installer is not in host %s', host)
+ continue
+ host_dirname = setting.HOST_INSTALLATION_LOGDIR_NAME
+ if host_dirname not in host:
+ logging.error(
+ '%s is not in host %s', host_dirname, host
+ )
+ continue
+ host_state = host_api.get_host_state(host_id, user=user)
+ if 'state' not in host_state:
+ logging.error('state is not in host state %s', host_state)
+ continue
+ if host_state['state'] == 'INSTALLING':
+ host_log_histories = host_api.get_host_log_histories(
+ host_id, user=user
+ )
+ host_log_history_mapping = {}
+ for host_log_history in host_log_histories:
+ if 'filename' not in host_log_history:
+ logging.error(
+ 'filename is not in host log history %s',
+ host_log_history
+ )
+ continue
+ host_log_history_mapping[
+ host_log_history['filename']
+ ] = host_log_history
+ host_mapping[host_id] = (
+ host, host_state, host_log_history_mapping
+ )
+ else:
+ logging.info(
+ 'ignore host state %s since it is not in installing',
+ host_state
+ )
+ adapters = adapter_api.list_adapters(user=user)
+ adapter_mapping = {}
+ for adapter in adapters:
+ if 'id' not in adapter:
+ logging.error(
+ 'id not in adapter %s', adapter
+ )
+ continue
+ if 'package_installer' not in adapter:
+ logging.info(
+ 'package_installer not in adapter %s', adapter
+ )
+ continue
+ adapter_id = adapter['id']
+ adapter_mapping[adapter_id] = adapter
+ clusters = cluster_api.list_clusters(user=user)
+ cluster_mapping = {}
+ for cluster in clusters:
+ if 'id' not in cluster:
+ logging.error('id not in cluster %s', cluster)
+ continue
+ cluster_id = cluster['id']
+ if 'adapter_id' not in cluster:
+ logging.error(
+ 'adapter_id not in cluster %s',
+ cluster
+ )
+ continue
+ cluster_state = cluster_api.get_cluster_state(
+ cluster_id,
+ user=user
+ )
+ if 'state' not in cluster_state:
+ logging.error('state not in cluster state %s', cluster_state)
+ continue
+ cluster_mapping[cluster_id] = (cluster, cluster_state)
+ clusterhosts = cluster_api.list_clusterhosts(user=user)
+ clusterhost_mapping = {}
+ for clusterhost in clusterhosts:
+ if 'clusterhost_id' not in clusterhost:
+ logging.error(
+ 'clusterhost_id not in clusterhost %s',
+ clusterhost
+ )
+ continue
+ clusterhost_id = clusterhost['clusterhost_id']
+ if 'cluster_id' not in clusterhost:
+ logging.error(
+ 'cluster_id not in clusterhost %s',
+ clusterhost
+ )
+ continue
+ cluster_id = clusterhost['cluster_id']
+ if cluster_id not in cluster_mapping:
+ logging.info(
+ 'ignore clusterhost %s '
+ 'since the cluster_id '
+ 'is not in cluster_mapping %s',
+ clusterhost, cluster_mapping
+ )
+ continue
+ cluster, _ = cluster_mapping[cluster_id]
+ if 'flavor_name' not in cluster:
+ logging.error(
+ 'flavor_name is not in clusterhost %s related cluster',
+ clusterhost
+ )
+ continue
+ clusterhost_dirname = setting.CLUSTERHOST_INATALLATION_LOGDIR_NAME
+ if clusterhost_dirname not in clusterhost:
+ logging.error(
+ '%s is not in clusterhost %s',
+ clusterhost_dirname, clusterhost
+ )
+ continue
+ adapter_id = cluster['adapter_id']
+ if adapter_id not in adapter_mapping:
+ logging.info(
+ 'ignore clusterhost %s '
+ 'since the adapter_id %s '
+ 'is not in adaper_mapping %s',
+ clusterhost, adapter_id, adapter_mapping
+ )
+ continue
+ adapter = adapter_mapping[adapter_id]
+ if 'package_installer' not in adapter:
+ logging.info(
+ 'ignore clusterhost %s '
+ 'since the package_installer is not define '
+ 'in adapter %s',
+ clusterhost, adapter
+ )
+ continue
+ package_installer = adapter['package_installer']
+ clusterhost['package_installer'] = package_installer
+ clusterhost['adapter_name'] = adapter['name']
+ clusterhost_state = cluster_api.get_clusterhost_self_state(
+ clusterhost_id, user=user
+ )
+ if 'state' not in clusterhost_state:
+ logging.error(
+ 'state not in clusterhost_state %s',
+ clusterhost_state
+ )
+ continue
+ if clusterhost_state['state'] == 'INSTALLING':
+ clusterhost_log_histories = (
+ cluster_api.get_clusterhost_log_histories(
+ clusterhost_id, user=user
+ )
+ )
+ clusterhost_log_history_mapping = {}
+ for clusterhost_log_history in clusterhost_log_histories:
+ if 'filename' not in clusterhost_log_history:
+ logging.error(
+ 'filename not in clusterhost_log_history %s',
+ clusterhost_log_history
+ )
+ continue
+ clusterhost_log_history_mapping[
+ clusterhost_log_history['filename']
+ ] = clusterhost_log_history
+ clusterhost_mapping[clusterhost_id] = (
+ clusterhost, clusterhost_state,
+ clusterhost_log_history_mapping
+ )
+ else:
+ logging.info(
+ 'ignore clusterhost state %s '
+ 'since it is not in installing',
+ clusterhost_state
+ )
+
+ progress_calculator.update_host_progress(
+ host_mapping)
+ for host_id, (host, host_state, host_log_history_mapping) in (
+ host_mapping.items()
+ ):
+ host_api.update_host_state(
+ host_id, user=user,
+ percentage=host_state.get('percentage', 0),
+ message=host_state.get('message', ''),
+ severity=host_state.get('severity', 'INFO')
+ )
+ for filename, host_log_history in (
+ host_log_history_mapping.items()
+ ):
+ host_api.add_host_log_history(
+ host_id, filename=filename, user=user,
+ position=host_log_history.get('position', 0),
+ percentage=host_log_history.get('percentage', 0),
+ partial_line=host_log_history.get('partial_line', ''),
+ message=host_log_history.get('message', ''),
+ severity=host_log_history.get('severity', 'INFO'),
+ line_matcher_name=host_log_history.get(
+ 'line_matcher_name', 'start'
+ )
+ )
+ progress_calculator.update_clusterhost_progress(
+ clusterhost_mapping)
+ for (
+ clusterhost_id,
+ (clusterhost, clusterhost_state, clusterhost_log_history_mapping)
+ ) in (
+ clusterhost_mapping.items()
+ ):
+ cluster_api.update_clusterhost_state(
+ clusterhost_id, user=user,
+ percentage=clusterhost_state.get('percentage', 0),
+ message=clusterhost_state.get('message', ''),
+ severity=clusterhost_state.get('severity', 'INFO')
+ )
+ for filename, clusterhost_log_history in (
+ clusterhost_log_history_mapping.items()
+ ):
+ cluster_api.add_clusterhost_log_history(
+ clusterhost_id, user=user, filename=filename,
+ position=clusterhost_log_history.get('position', 0),
+ percentage=clusterhost_log_history.get('percentage', 0),
+ partial_line=clusterhost_log_history.get(
+ 'partial_line', ''),
+ message=clusterhost_log_history.get('message', ''),
+ severity=clusterhost_log_history.get('severity', 'INFO'),
+ line_matcher_name=(
+ clusterhost_log_history.get(
+ 'line_matcher_name', 'start'
+ )
+ )
+ )
+ progress_calculator.update_cluster_progress(
+ cluster_mapping)
+ for cluster_id, (cluster, cluster_state) in cluster_mapping.items():
+ cluster_api.update_cluster_state(
+ cluster_id, user=user
+ )
diff --git a/compass-tasks/actions/util.py b/compass-tasks/actions/util.py
new file mode 100644
index 0000000..4d9f855
--- /dev/null
+++ b/compass-tasks/actions/util.py
@@ -0,0 +1,342 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to provide util for actions
+
+ .. moduleauthor:: Xiaodong Wang ,xiaodongwang@huawei.com>
+"""
+import logging
+import redis
+
+from contextlib import contextmanager
+
+from compass.db.api import adapter_holder as adapter_db
+from compass.db.api import cluster as cluster_db
+from compass.db.api import host as host_db
+from compass.db.api import machine as machine_db
+from compass.deployment.utils import constants as const
+
+
+@contextmanager
+def lock(lock_name, blocking=True, timeout=10):
+ """acquire a lock to do some actions.
+
+ The lock is acquired by lock_name among the whole distributed
+ systems.
+ """
+ # TODO(xicheng): in future we should explicitly told which redis
+ # server we want to talk to make the lock works on distributed
+ # systems.
+ redis_instance = redis.Redis()
+ instance_lock = redis_instance.lock(lock_name, timeout=timeout)
+ owned = False
+ try:
+ locked = instance_lock.acquire(blocking=blocking)
+ if locked:
+ owned = True
+ logging.debug('acquired lock %s', lock_name)
+ yield instance_lock
+ else:
+ logging.info('lock %s is already hold', lock_name)
+ yield None
+
+ except Exception as error:
+ logging.info(
+ 'redis fails to acquire the lock %s', lock_name)
+ logging.exception(error)
+ yield None
+
+ finally:
+ if owned:
+ instance_lock.acquired_until = 0
+ instance_lock.release()
+ logging.debug('released lock %s', lock_name)
+ else:
+ logging.debug('nothing to release %s', lock_name)
+
+
+class ActionHelper(object):
+
+ @staticmethod
+ def get_adapter_info(adapter_id, cluster_id, user):
+ """Get adapter information. Return a dictionary as below,
+
+ {
+ "id": 1,
+ "name": "xxx",
+ "flavors": [
+ {
+ "flavor_name": "xxx",
+ "roles": ['xxx', 'yyy', ...],
+ "template": "xxx.tmpl"
+ },
+ ...
+ ],
+ "metadata": {
+ "os_config": {
+ ...
+ },
+ "package_config": {
+ ...
+ }
+ },
+ "os_installer": {
+ "name": "cobbler",
+ "settings": {....}
+ },
+ "pk_installer": {
+ "name": "chef",
+ "settings": {....}
+ },
+ ...
+ }
+ To view a complete output, please refer to backend doc.
+ """
+
+ adapter_info = adapter_db.get_adapter(adapter_id, user=user)
+ metadata = cluster_db.get_cluster_metadata(cluster_id, user=user)
+ adapter_info.update({const.METADATA: metadata})
+
+ for flavor_info in adapter_info[const.FLAVORS]:
+ roles = flavor_info[const.ROLES]
+ flavor_info[const.ROLES] = ActionHelper._get_role_names(roles)
+
+ return adapter_info
+
+ @staticmethod
+ def _get_role_names(roles):
+ return [role[const.NAME] for role in roles]
+
+ @staticmethod
+ def get_cluster_info(cluster_id, user):
+ """Get cluster information.Return a dictionary as below,
+
+ {
+ "id": 1,
+ "adapter_id": 1,
+ "os_version": "CentOS-6.5-x86_64",
+ "name": "cluster_01",
+ "flavor": {
+ "flavor_name": "zzz",
+ "template": "xx.tmpl",
+ "roles": [...]
+ }
+ "os_config": {..},
+ "package_config": {...},
+ "deployed_os_config": {},
+ "deployed_package_config": {},
+ "owner": "xxx"
+ }
+ """
+
+ cluster_info = cluster_db.get_cluster(cluster_id, user=user)
+
+ # convert roles retrieved from db into a list of role names
+ roles_info = cluster_info.setdefault(
+ const.FLAVOR, {}).setdefault(const.ROLES, [])
+ cluster_info[const.FLAVOR][const.ROLES] = \
+ ActionHelper._get_role_names(roles_info)
+
+ # get cluster config info
+ cluster_config = cluster_db.get_cluster_config(cluster_id, user=user)
+ cluster_info.update(cluster_config)
+
+ deploy_config = cluster_db.get_cluster_deployed_config(cluster_id,
+ user=user)
+ cluster_info.update(deploy_config)
+
+ return cluster_info
+
+ @staticmethod
+ def get_hosts_info(cluster_id, hosts_id_list, user):
+ """Get hosts information. Return a dictionary as below,
+
+ {
+ "hosts": {
+ 1($host_id): {
+ "reinstall_os": True,
+ "mac": "xxx",
+ "name": "xxx",
+ "roles": [xxx, yyy]
+ },
+ "networks": {
+ "eth0": {
+ "ip": "192.168.1.1",
+ "netmask": "255.255.255.0",
+ "is_mgmt": True,
+ "is_promiscuous": False,
+ "subnet": "192.168.1.0/24"
+ },
+ "eth1": {...}
+ },
+ "os_config": {},
+ "package_config": {},
+ "deployed_os_config": {},
+ "deployed_package_config": {}
+ },
+ 2: {...},
+ ....
+ }
+ }
+ """
+
+ hosts_info = {}
+ for host_id in hosts_id_list:
+ info = cluster_db.get_cluster_host(cluster_id, host_id, user=user)
+ logging.debug("checking on info %r %r" % (host_id, info))
+
+ info[const.ROLES] = ActionHelper._get_role_names(info[const.ROLES])
+
+ # TODO(grace): Is following line necessary??
+ info.setdefault(const.ROLES, [])
+
+ config = cluster_db.get_cluster_host_config(cluster_id,
+ host_id,
+ user=user)
+ info.update(config)
+
+ networks = info[const.NETWORKS]
+ networks_dict = {}
+ # Convert networks from list to dictionary format
+ for entry in networks:
+ nic_info = {}
+ nic_info = {
+ entry[const.NIC]: {
+ const.IP_ADDR: entry[const.IP_ADDR],
+ const.NETMASK: entry[const.NETMASK],
+ const.MGMT_NIC_FLAG: entry[const.MGMT_NIC_FLAG],
+ const.PROMISCUOUS_FLAG: entry[const.PROMISCUOUS_FLAG],
+ const.SUBNET: entry[const.SUBNET]
+ }
+ }
+ networks_dict.update(nic_info)
+
+ info[const.NETWORKS] = networks_dict
+
+ hosts_info[host_id] = info
+
+ return hosts_info
+
+ @staticmethod
+ def save_deployed_config(deployed_config, user):
+ """Save deployed config."""
+ cluster_config = deployed_config[const.CLUSTER]
+ cluster_id = cluster_config[const.ID]
+ del cluster_config[const.ID]
+
+ cluster_db.update_cluster_deployed_config(cluster_id, user=user,
+ **cluster_config)
+
+ hosts_id_list = deployed_config[const.HOSTS].keys()
+ for host_id in hosts_id_list:
+ config = deployed_config[const.HOSTS][host_id]
+ cluster_db.update_cluster_host_deployed_config(cluster_id,
+ host_id,
+ user=user,
+ **config)
+
+ @staticmethod
+ def update_state(
+ cluster_id, host_id_list, user, **kwargs
+ ):
+ # update all clusterhosts state
+ for host_id in host_id_list:
+ cluster_db.update_cluster_host_state(
+ cluster_id,
+ host_id,
+ user=user,
+ **kwargs
+ )
+
+ # update cluster state
+ cluster_db.update_cluster_state(
+ cluster_id,
+ user=user,
+ **kwargs
+ )
+
+ @staticmethod
+ def delete_cluster(
+ cluster_id, host_id_list, user, delete_underlying_host=False
+ ):
+ """Delete cluster.
+
+ If delete_underlying_host is set, underlying hosts will also
+ be deleted.
+ """
+ if delete_underlying_host:
+ for host_id in host_id_list:
+ host_db.del_host(
+ host_id, True, True, user=user
+ )
+ cluster_db.del_cluster(
+ cluster_id, True, True, user=user
+ )
+
+ @staticmethod
+ def delete_cluster_host(
+ cluster_id, host_id, user, delete_underlying_host=False
+ ):
+ """Delete clusterhost.
+
+ If delete_underlying_host set, also delete underlying host.
+ """
+ if delete_underlying_host:
+ host_db.del_host(
+ host_id, True, True, user=user
+ )
+ cluster_db.del_cluster_host(
+ cluster_id, host_id, True, True, user=user
+ )
+
+ @staticmethod
+ def delete_host(host_id, user):
+ host_db.del_host(
+ host_id, True, True, user=user
+ )
+
+ @staticmethod
+ def host_ready(host_id, from_database_only, user):
+ """Trigger host ready."""
+ host_db.update_host_state_internal(
+ host_id, from_database_only=from_database_only,
+ user=user, ready=True
+ )
+
+ @staticmethod
+ def cluster_host_ready(
+ cluster_id, host_id, from_database_only, user
+ ):
+ """Trigger clusterhost ready."""
+ cluster_db.update_cluster_host_state_internal(
+ cluster_id, host_id, from_database_only=from_database_only,
+ user=user, ready=True
+ )
+
+ @staticmethod
+ def is_cluster_os_ready(cluster_id, user=None):
+ return cluster_db.is_cluster_os_ready(cluster_id, user=user)
+
+ @staticmethod
+ def cluster_ready(cluster_id, from_database_only, user):
+ """Trigger cluster ready."""
+ cluster_db.update_cluster_state_internal(
+ cluster_id, from_database_only=from_database_only,
+ user=user, ready=True
+ )
+
+ @staticmethod
+ def get_machine_IPMI(machine_id, user):
+ machine_info = machine_db.get_machine(machine_id, user=user)
+ return machine_info[const.IPMI_CREDS]
diff --git a/compass-tasks/apiclient/__init__.py b/compass-tasks/apiclient/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-tasks/apiclient/__init__.py
diff --git a/compass-tasks/apiclient/example.py b/compass-tasks/apiclient/example.py
new file mode 100755
index 0000000..4c01b98
--- /dev/null
+++ b/compass-tasks/apiclient/example.py
@@ -0,0 +1,463 @@
+#!/usr/bin/python
+# copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Example code to deploy a cluster by compass client api."""
+import os
+import re
+import sys
+import time
+
+# from compass.apiclient.restful import Client
+from restful import Client
+
+COMPASS_SERVER_URL = 'http://localhost/api'
+COMPASS_LOGIN_EMAIL = 'admin@huawei.com'
+COMPASS_LOGIN_PASSWORD = 'admin'
+SWITCH_IP = '172.29.8.40'
+SWITCH_SNMP_VERSION = '2c'
+SWITCH_SNMP_COMMUNITY = 'public'
+CLUSTER_NAME = 'test_cluster'
+HOST_NAME_PREFIX = 'host'
+SERVICE_USERNAME = 'service'
+SERVICE_PASSWORD = 'service'
+CONSOLE_USERNAME = 'console'
+CONSOLE_PASSWORD = 'console'
+HA_VIP = ''
+
+MANAGEMENT_IP_START = '10.145.88.130'
+MANAGEMENT_IP_END = '10.145.88.254'
+MANAGEMENT_IP_GATEWAY = '10.145.88.1'
+MANAGEMENT_NETMASK = '255.255.255.0'
+MANAGEMENT_NIC = 'eth0'
+MANAGEMENT_PROMISC = 0
+TENANT_IP_START = '192.168.10.130'
+TENANT_IP_END = '192.168.10.255'
+TENANT_IP_GATEWAY = '192.168.10.1'
+TENANT_NETMASK = '255.255.255.0'
+TENANT_NIC = 'eth0'
+TENANT_PROMISC = 0
+PUBLIC_IP_START = '12.234.32.130'
+PUBLIC_IP_END = '12.234.32.255'
+PUBLIC_IP_GATEWAY = '12.234.32.1'
+PUBLIC_NETMASK = '255.255.255.0'
+PUBLIC_NIC = 'eth1'
+PUBLIC_PROMISC = 1
+STORAGE_IP_START = '172.16.100.130'
+STORAGE_IP_END = '172.16.100.255'
+STORAGE_NETMASK = '255.255.255.0'
+STORAGE_IP_GATEWAY = '172.16.100.1'
+STORAGE_NIC = 'eth0'
+STORAGE_PROMISC = 0
+HOME_PERCENTAGE = 5
+TMP_PERCENTAGE = 5
+VAR_PERCENTAGE = 10
+HOST_OS = 'CentOS-6.5-x86_64'
+
+
+PRESET_VALUES = {
+ 'LANGUAGE': 'EN',
+ 'TIMEZONE': 'GMT',
+ 'HTTPS_PROXY': 'http://10.145.89.100:3128',
+ 'NO_PROXY': ['127.0.0.1'],
+ 'DOMAIN': 'ods.com',
+ 'NAMESERVERS': ['10.145.89.100'],
+ 'NTP_SERVER': '10.145.89.100',
+ 'GATEWAY': '10.145.88.1',
+ 'PROXY': 'http://10.145.89.100:3128',
+ 'OS_NAME_PATTERN': 'CentOS.*',
+ 'ADAPTER_NAME': 'openstack_icehouse',
+ 'FLAVOR_PATTERN': 'allinone.*',
+ 'ROLES_LIST': ['allinone-compute'],
+ 'MACHINES_TO_ADD': ['00:0c:29:a7:ea:4b'],
+ 'BUILD_TIMEOUT': 60,
+ 'SEARCH_PATH': ['ods.com'],
+ 'SERVER_USERNAME': 'root',
+ 'SERVER_PASSWORD': 'root'
+}
+for v in PRESET_VALUES:
+ if v in os.environ.keys():
+ PRESET_VALUES[v] = os.environ.get(v)
+ print (v + PRESET_VALUES[v] + " is set by env variables")
+ else:
+ print (PRESET_VALUES[v])
+
+# instantiate a client
+client = Client(COMPASS_SERVER_URL)
+
+# login
+status, response = client.login(COMPASS_LOGIN_EMAIL, COMPASS_LOGIN_PASSWORD)
+print '============================================================'
+print 'login status: %s response: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
+
+# list all switches
+status, response = client.list_switches()
+print '============================================================='
+print 'get all switches status: %s response: %s' % (status, response)
+
+# add a switch
+status, response = client.add_switch(
+ SWITCH_IP,
+ SWITCH_SNMP_VERSION,
+ SWITCH_SNMP_COMMUNITY
+)
+print '============================================'
+print 'adding a switch..status: %s, response: %s' % (status, response)
+
+# if switch already exists, get one from all switches
+switch = None
+if status < 400:
+ switch = response
+else:
+ status, response = client.list_switches()
+ print '========================================='
+ print 'list switches status %s response %s' % (status, response)
+ if status >= 400:
+ sys.exit(1)
+ for switch_ in response:
+ if switch_['ip'] == SWITCH_IP:
+ switch = switch_
+ break
+
+switch_id = switch['id']
+switch_ip = switch['ip']
+print '======================'
+print 'switch has been set as %s' % switch_ip
+
+# wait till switch state becomes under_monitoring
+while switch['state'] != 'under_monitoring':
+ print 'waiting for state to become under_monitoring'
+ client.poll_switch(switch_id)
+ status, resp = client.get_switch(switch_id)
+ print '====================================='
+ print 'poll switch status %s response %s' % (status, resp)
+ switch = resp
+ print 'switch is in state: %s' % switch['state']
+ time.sleep(5)
+
+print '========================================='
+print 'switch state now is %s' % (switch['state'])
+
+# create a machine list
+machine_macs = {}
+machines = {}
+for machine in PRESET_VALUES['MACHINES_TO_ADD']:
+ status, response = client.list_machines(mac=machine)
+ print '============================================'
+ print 'list machines status %s response %s' % (status, response)
+ if status >= 400:
+ sys.exit(1)
+ if status == 200 and response != []:
+ machine_id = response[0]['id']
+ machine_macs[machine_id] = response[0]['mac']
+ machines = response
+
+print '================================='
+print 'found machines are : %s' % machines
+
+machines_to_add = PRESET_VALUES['MACHINES_TO_ADD']
+if set(machine_macs.values()) != set(machines_to_add):
+ print 'only found macs %s while expected are %s' % (
+ machine_macs.values(), machines_to_add)
+ sys.exit(1)
+
+# list all adapters
+status, response = client.list_adapters()
+print '==============================='
+print 'all adapters are: %s' % response
+if status >= 400:
+ sys.exit(1)
+
+adapters = response
+adapter_id = None
+os_id = None
+flavor_id = None
+adapter_name = PRESET_VALUES['ADPATER_NAME']
+os_pattern = re.compile(PRESET_VALUES['OS_NAME_PATTERN'])
+flavor_pattern = re.compile(PRESET_VALUES['FLAVOR_PATTERN'])
+for adapter in adapters:
+ if adapter_name == adapter['name']:
+ adapter_id = adapter['id']
+ for supported_os in adapter['supported_oses']:
+ if os_pattern.match(supported_os['name']):
+ os_id = supported_os['id']
+ break
+ for flavor in adapter['flavors']:
+ if flavor_pattern.match(flavor['name']):
+ flavor_id = flavor['id']
+ if adapter_id and os_id and flavor_id:
+ break
+
+print '======================================================='
+print 'using adapter %s os %s flavor %s to deploy cluster' % (
+ adapter_id, os_id, flavor_id
+)
+
+# add a cluster
+status, response = client.add_cluster(
+ CLUSTER_NAME,
+ adapter_id,
+ os_id,
+ flavor_id
+)
+print '==============================================================='
+print 'add cluster %s status %s: %s' % (CLUSTER_NAME, status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, response = client.list_clusters(name=CLUSTER_NAME)
+print '================================================================'
+print 'list clusters %s status %s: %s' % (CLUSTER_NAME, status, response)
+if status >= 400:
+ sys.exit(1)
+
+cluster = response[0]
+cluster_id = cluster['id']
+
+print '=================='
+print 'cluster is %s' % cluster
+
+# Add hosts to the cluster
+machines_dict = {}
+machine_id_list = []
+for machine in machines:
+ id_mapping = {}
+ id_mapping['machine_id'] = machine['id']
+ machine_id_list.append(id_mapping)
+
+machines_dict['machines'] = machine_id_list
+
+status, response = client.add_hosts_to_cluster(
+ cluster_id, machines_dict
+)
+print '==================================='
+print 'add hosts %s to cluster status %s response %s' % (
+ machines_dict, status, response)
+if status >= 400:
+ sys.exit(1)
+
+# Add two subnets
+subnet_1 = '10.145.89.0/24'
+subnet_2 = '192.168.100.0/24'
+
+status, response = client.add_subnet(subnet_1)
+print '=================='
+print 'add subnet %s status %s: %s' % (subnet_1, status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, response = client.add_subnet(subnet_2)
+print '=================='
+print 'add subnet %s status %s: %s' % (subnet_2, status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, subnet1 = client.list_subnets(subnet=subnet_1)
+print '==========================================================='
+print 'list subnet %s status %s: %s' % (subnet_1, status, subnet1)
+if status >= 400:
+ sys.exit(1)
+
+status, subnet2 = client.list_subnets(subnet=subnet_2)
+print '==========================================================='
+print 'list subnet %s status %s: %s' % (subnet_2, status, subnet2)
+if status >= 400:
+ sys.exit(1)
+
+subnet1_id = subnet1[0]['id']
+subnet2_id = subnet2[0]['id']
+print '========================'
+print 'subnet1 has id: %s, subnet is %s' % (subnet1_id, subnet1)
+print 'subnet2 has id: %s, subnet is %s' % (subnet2_id, subnet2)
+
+# Add host network
+status, response = client.list_cluster_hosts(cluster_id)
+print '================================================'
+print 'list cluster hosts status %s: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
+
+host = response[0]
+host_id = host['id']
+print '=================='
+print 'host is: %s' % host
+
+status, response = client.add_host_network(
+ host_id,
+ 'eth0',
+ '10.145.89.200',
+ subnet1_id,
+ is_mgmt=True
+)
+print '======================='
+print 'add eth0 network status %s: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, response = client.add_host_network(
+ host_id,
+ 'eth1',
+ '192.168.100.200',
+ subnet2_id,
+ is_promiscuous=True
+)
+print '======================='
+print 'add eth1 network status %s: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
+
+# Update os config to cluster
+cluster_os_config = {
+ 'general': {
+ 'language': PRESET_VALUES['LANGUAGE'],
+ 'timezone': PRESET_VALUES['TIMEZONE'],
+ 'http_proxy': PRESET_VALUES['PROXY'],
+ 'https_proxy': PRESET_VALUES['HTTPS_PROXY'],
+ 'no_proxy': PRESET_VALUES['NO_PROXY'],
+ 'ntp_server': PRESET_VALUES['NTP_SERVER'],
+ 'dns_servers': PRESET_VALUES['NAMESERVERS'],
+ 'domain': PRESET_VALUES['DOMAIN'],
+ 'search_path': PRESET_VALUES['SEARCH_PATH'],
+ 'default_gateway': PRESET_VALUES['GATEWAY']
+ },
+ 'server_credentials': {
+ 'username': PRESET_VALUES['SERVER_USERNAME'],
+ 'password': PRESET_VALUES['SERVER_PASSWORD']
+ },
+ 'partition': {
+ '/var': {
+ 'percentage': VAR_PERCENTAGE,
+ },
+ '/home': {
+ 'percentage': HOME_PERCENTAGE,
+ }
+ }
+}
+
+
+cluster_package_config = {
+ 'security': {
+ 'service_credentials': {
+ 'image': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'compute': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'dashboard': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'identity': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'metering': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'rabbitmq': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'volume': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ },
+ 'mysql': {
+ 'username': SERVICE_USERNAME,
+ 'password': SERVICE_PASSWORD
+ }
+ },
+ 'console_credentials': {
+ 'admin': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'compute': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'dashboard': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'image': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'metering': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'network': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'object-store': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ },
+ 'volume': {
+ 'username': CONSOLE_USERNAME,
+ 'password': CONSOLE_PASSWORD
+ }
+ }
+ },
+ 'network_mapping': {
+ 'management': MANAGEMENT_NIC,
+ 'tenant': TENANT_NIC,
+ 'storage': STORAGE_NIC,
+ 'public': PUBLIC_NIC
+ }
+}
+
+status, response = client.update_cluster_config(
+ cluster_id,
+ cluster_os_config,
+ cluster_package_config
+)
+
+print '======================================='
+print 'cluster %s update status %s: %s' % (
+ cluster_id, status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, response = client.update_cluster_host(
+ cluster_id, host_id, roles=PRESET_VALUES['ROLES_LIST'])
+print '================================================='
+print 'update cluster host %s/%s status %s: %s' % (
+ cluster_id, host_id, status, response)
+if status >= 400:
+ sys.exit(1)
+
+# Review and deploy
+status, response = client.review_cluster(
+ cluster_id, review={'hosts': [host_id]})
+print '======================================='
+print 'reviewing cluster status %s: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
+
+status, response = client.deploy_cluster(
+ cluster_id, deploy={'hosts': [host_id]})
+print '======================================='
+print 'deploy cluster status %s: %s' % (status, response)
+if status >= 400:
+ sys.exit(1)
diff --git a/compass-tasks/apiclient/restful.py b/compass-tasks/apiclient/restful.py
new file mode 100644
index 0000000..bb82922
--- /dev/null
+++ b/compass-tasks/apiclient/restful.py
@@ -0,0 +1,1102 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Compass api client library.
+"""
+
+import json
+import logging
+import requests
+
+
+class Client(object):
+ """compass restful api wrapper"""
+
+ def __init__(self, url, headers=None, proxies=None, stream=None):
+ logging.info('create api client %s', url)
+ self.url_ = url
+ self.session_ = requests.Session()
+
+ if headers:
+ self.session_.headers.update(headers)
+ self.session_.headers.update({
+ 'Accept': 'application/json'
+ })
+
+ if proxies is not None:
+ self.session_.proxies = proxies
+
+ if stream is not None:
+ self.session_.stream = stream
+
+ def __del__(self):
+ self.session_.close()
+
+ @classmethod
+ def _get_response(cls, resp):
+ response_object = {}
+ try:
+ response_object = resp.json()
+ except Exception as error:
+ logging.error('failed to load object from %s: %s',
+ resp.url, resp.content)
+ logging.exception(error)
+ response_object['status'] = 'Json Parsing Failed'
+ response_object['message'] = resp.content
+
+ return resp.status_code, response_object
+
+ def _get(self, req_url, data=None):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('get %s with data %s', url, data)
+ if data:
+ resp = self.session_.get(url, params=data)
+ else:
+ resp = self.session_.get(url)
+
+ return self._get_response(resp)
+
+ def _post(self, req_url, data=None):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('post %s with data %s', url, data)
+ if data:
+ resp = self.session_.post(url, json.dumps(data))
+ else:
+ resp = self.session_.post(url)
+
+ return self._get_response(resp)
+
+ def _put(self, req_url, data=None):
+ """encapsulate put method."""
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('put %s with data %s', url, data)
+ if data:
+ resp = self.session_.put(url, json.dumps(data))
+ else:
+ resp = self.session_.put(url)
+
+ return self._get_response(resp)
+
+ def _patch(self, req_url, data=None):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('patch %s with data %s', url, data)
+ if data:
+ resp = self.session_.patch(url, json.dumps(data))
+ else:
+ resp = self.session_.patch(url)
+
+ return self._get_response(resp)
+
+ def _delete(self, req_url):
+ url = '%s%s' % (self.url_, req_url)
+ logging.debug('delete %s', url)
+ return self._get_response(self.session_.delete(url))
+
+ def login(self, email, password):
+ credential = {}
+ credential['email'] = email
+ credential['password'] = password
+ return self._post('/users/login', data=credential)
+
+ def get_token(self, email, password):
+ credential = {}
+ credential['email'] = email
+ credential['password'] = password
+ status, resp = self._post('/users/token', data=credential)
+ if status < 400:
+ self.session_.headers.update({'X-Auth-Token': resp['token']})
+ return status, resp
+
+ def get_users(self):
+ users = self._get('/users')
+ return users
+
+ def list_switches(
+ self,
+ switch_ips=None,
+ switch_ip_networks=None):
+ """list switches."""
+ params = {}
+ if switch_ips:
+ params['switchIp'] = switch_ips
+
+ if switch_ip_networks:
+ params['switchIpNetwork'] = switch_ip_networks
+
+ switchlist = self._get('/switches', data=params)
+ return switchlist
+
+ def get_switch(self, switch_id):
+ return self._get('/switches/%s' % switch_id)
+
+ def add_switch(
+ self,
+ switch_ip,
+ version=None,
+ community=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['ip'] = switch_ip
+ data['credentials'] = {}
+ if version:
+ data['credentials']['version'] = version
+
+ if community:
+ data['credentials']['community'] = community
+
+ return self._post('/switches', data=data)
+
+ def update_switch(self, switch_id, state='initialized',
+ version='2c', community='public', raw_data={}):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ data['credentials'] = {}
+ if version:
+ data['credentials']['version'] = version
+
+ if community:
+ data['credentials']['community'] = community
+
+ if state:
+ data['state'] = state
+
+ return self._put('/switches/%s' % switch_id, data=data)
+
+ def delete_switch(self, switch_id):
+ return self._delete('/switches/%s' % switch_id)
+
+ def list_switch_machines(self, switch_id, port=None, vlans=None,
+ tag=None, location=None):
+ data = {}
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._get('/switches/%s/machines' % switch_id, data=data)
+
+ def get_switch_machine(self, switch_id, machine_id):
+ return self._get('/switches/%s/machines/%s' % (switch_id, machine_id))
+
+ def list_switch_machines_hosts(self, switch_id, port=None, vlans=None,
+ mac=None, tag=None, location=None,
+ os_name=None, os_id=None):
+
+ data = {}
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if os_id:
+ data['os_id'] = os_id
+
+ return self._get('/switches/%s/machines-hosts' % switch_id, data=data)
+
+ def add_switch_machine(self, switch_id, mac=None, port=None,
+ vlans=None, ipmi_credentials=None,
+ tag=None, location=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if mac:
+ data['mac'] = mac
+
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._post('/switches/%s/machines' % switch_id, data=data)
+
+ def update_switch_machine(self, switch_id, machine_id, port=None,
+ vlans=None, ipmi_credentials=None, tag=None,
+ location=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._put('/switches/%s/machines/%s' %
+ (switch_id, machine_id), data=data)
+
+ def delete_switch_machine(self, switch_id, machine_id):
+ return self._delete('/switches/%s/machines/%s' %
+ (switch_id, machine_id))
+
+ # test these
+ def poll_switch(self, switch_id):
+ data = {}
+ data['find_machines'] = None
+ return self._post('/switches/%s/action' % switch_id, data=data)
+
+ def add_group_switch_machines(self, switch_id, group_machine_ids):
+ data = {}
+ data['add_machines'] = group_machine_ids
+ return self._post('/switches/%s/action' % switch_id, data=data)
+
+ def remove_group_switch_machines(self, switch_id, group_machine_ids):
+ data = {}
+ data['remove_machines'] = group_machine_ids
+ return self._post('/switches/%s/action' % switch_id, data=data)
+
+ def update_group_switch_machines(self, switch_id, group_machines):
+ data = {}
+ data['set_machines'] = group_machines
+ return self._post('/switches/%s/action' % switch_id, data=data)
+ # end
+
+ def list_switchmachines(self, switch_ip_int=None, port=None, vlans=None,
+ mac=None, tag=None, location=None):
+ data = {}
+ if switch_ip_int:
+ data['switch_ip_int'] = switch_ip_int
+
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._get('/switch-machines', data=data)
+
+ def list_switchmachines_hosts(self, switch_ip_int=None, port=None,
+ vlans=None, mac=None, tag=None,
+ location=None, os_name=None, os_id=None):
+
+ data = {}
+ if switch_ip_int:
+ data['switch_ip_int'] = switch_ip_int
+
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if os_id:
+ data['os_id'] = os_id
+
+ return self._get('/switches-machines-hosts', data=data)
+
+ def show_switchmachine(self, switchmachine_id):
+ return self._get('/switch-machines/%s' % switchmachine_id)
+
+ def update_switchmachine(self, switchmachine_id,
+ port=None, vlans=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if port:
+ data['port'] = port
+
+ if vlans:
+ data['vlans'] = vlans
+
+ return self._put('/switch-machines/%s' % switchmachine_id, data=data)
+
+ def patch_switchmachine(self, switchmachine_id,
+ vlans=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ elif vlans:
+ data['vlans'] = vlans
+
+ return self._patch('/switch-machines/%s' % switchmachine_id, data=data)
+
+ def delete_switchmachine(self, switchmachine_id):
+ return self._delete('/switch-machines/%s' % switchmachine_id)
+
+ def list_machines(self, mac=None, tag=None, location=None):
+ data = {}
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._get('/machines', data=data)
+
+ def get_machine(self, machine_id):
+ data = {}
+ if id:
+ data['id'] = id
+
+ return self._get('/machines/%s' % machine_id, data=data)
+
+ def update_machine(self, machine_id, ipmi_credentials=None, tag=None,
+ location=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._put('/machines/%s' % machine_id, data=data)
+
+ def patch_machine(self, machine_id, ipmi_credentials=None,
+ tag=None, location=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ipmi_credentials:
+ data['ipmi_credentials'] = ipmi_credentials
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ return self._patch('/machines/%s' % machine_id, data=data)
+
+ def delete_machine(self, machine_id):
+ return self._delete('machines/%s' % machine_id)
+
+ def list_subnets(self, subnet=None, name=None):
+ data = {}
+ if subnet:
+ data['subnet'] = subnet
+
+ if name:
+ data['name'] = name
+
+ return self._get('/subnets', data=data)
+
+ def get_subnet(self, subnet_id):
+ return self._get('/subnets/%s' % subnet_id)
+
+ def add_subnet(self, subnet, name=None, raw_data=None):
+ data = {}
+ data['subnet'] = subnet
+ if raw_data:
+ data.update(raw_data)
+ else:
+ if name:
+ data['name'] = name
+
+ return self._post('/subnets', data=data)
+
+ def update_subnet(self, subnet_id, subnet=None,
+ name=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if subnet:
+ data['subnet'] = subnet
+
+ if name:
+ data['name'] = name
+ return self._put('/subnets/%s' % subnet_id, data=data)
+
+ def delete_subnet(self, subnet_id):
+ return self._delete('/subnets/%s' % subnet_id)
+
+ def list_adapters(self, name=None):
+ data = {}
+ if name:
+ data['name'] = name
+
+ return self._get('/adapters', data=data)
+
+ def get_adapter(self, adapter_id):
+ return self._get('/adapters/%s' % adapter_id)
+
+ def get_adapter_roles(self, adapter_id):
+ return self._get('/adapters/%s/roles' % adapter_id)
+
+ def get_adapter_metadata(self, adapter_id):
+ return self._get('/adapters/%s/metadata' % adapter_id)
+
+ def get_os_metadata(self, os_id):
+ return self._get('/oses/%s/metadata' % os_id)
+
+ def list_clusters(self, name=None, os_name=None,
+ owner=None,
+ adapter_id=None):
+ data = {}
+ if name:
+ data['name'] = name
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if owner:
+ data['owner'] = owner
+
+ if adapter_id:
+ data['adapter_id'] = adapter_id
+
+ return self._get('/clusters', data=data)
+
+ def get_cluster(self, cluster_id):
+ return self._get('/clusters/%s' % cluster_id)
+
+ def add_cluster(self, name, adapter_id, os_id,
+ flavor_id=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if flavor_id:
+ data['flavor_id'] = flavor_id
+ data['name'] = name
+ data['adapter_id'] = adapter_id
+ data['os_id'] = os_id
+
+ return self._post('/clusters', data=data)
+
+ def update_cluster(self, cluster_id, name=None,
+ reinstall_distributed_system=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if name:
+ data['name'] = name
+
+ if reinstall_distributed_system:
+ data['reinstall_distributed_system'] = (
+ reinstall_distributed_system
+ )
+ return self._put('/clusters/%s' % cluster_id, data=data)
+
+ def delete_cluster(self, cluster_id):
+ return self._delete('/clusters/%s' % cluster_id)
+
+ def get_cluster_config(self, cluster_id):
+ return self._get('/clusters/%s/config' % cluster_id)
+
+ def get_cluster_metadata(self, cluster_id):
+ return self._get('/clusters/%s/metadata' % cluster_id)
+
+ def update_cluster_config(self, cluster_id, os_config=None,
+ package_config=None, config_step=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ if config_step:
+ data['config_step'] = config_step
+
+ return self._put('/clusters/%s/config' % cluster_id, data=data)
+
+ def patch_cluster_config(self, cluster_id, os_config=None,
+ package_config=None, config_step=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ if config_step:
+ data['config_step'] = config_step
+
+ return self._patch('/clusters/%s/config' % cluster_id, data=data)
+
+ def delete_cluster_config(self, cluster_id):
+ return self._delete('/clusters/%s/config' % cluster_id)
+
+ # test these
+ def add_hosts_to_cluster(self, cluster_id, hosts):
+ data = {}
+ data['add_hosts'] = hosts
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def set_hosts_in_cluster(self, cluster_id, hosts):
+ data = {}
+ data['set_hosts'] = hosts
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def remove_hosts_from_cluster(self, cluster_id, hosts):
+ data = {}
+ data['remove_hosts'] = hosts
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def review_cluster(self, cluster_id, review={}):
+ data = {}
+ data['review'] = review
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def deploy_cluster(self, cluster_id, deploy={}):
+ data = {}
+ data['deploy'] = deploy
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def redeploy_cluster(self, cluster_id, deploy={}):
+ data = {}
+ data['redeploy'] = deploy
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def get_cluster_state(self, cluster_id):
+ return self._get('/clusters/%s/state' % cluster_id)
+
+ def list_cluster_hosts(self, cluster_id):
+ return self._get('/clusters/%s/hosts' % cluster_id)
+
+ def list_clusterhosts(self):
+ return self._get('/clusterhosts')
+
+ def get_cluster_host(self, cluster_id, host_id):
+ return self._get('/clusters/%s/hosts/%s' % (cluster_id, host_id))
+
+ def get_clusterhost(self, clusterhost_id):
+ return self._get('/clusterhosts/%s' % clusterhost_id)
+
+ def add_cluster_host(self, cluster_id, machine_id=None, name=None,
+ reinstall_os=None, raw_data=None):
+ data = {}
+ data['machine_id'] = machine_id
+ if raw_data:
+ data.update(raw_data)
+ else:
+ if name:
+ data['name'] = name
+
+ if reinstall_os:
+ data['reinstall_os'] = reinstall_os
+
+ return self._post('/clusters/%s/hosts' % cluster_id, data=data)
+
+ def delete_cluster_host(self, cluster_id, host_id):
+ return self._delete('/clusters/%s/hosts/%s' %
+ (cluster_id, host_id))
+
+ def delete_clusterhost(self, clusterhost_id):
+ return self._delete('/clusterhosts/%s' % clusterhost_id)
+
+ def get_cluster_host_config(self, cluster_id, host_id):
+ return self._get('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id))
+
+ def get_clusterhost_config(self, clusterhost_id):
+ return self._get('/clusterhosts/%s/config' % clusterhost_id)
+
+ def update_cluster_host_config(self, cluster_id, host_id,
+ os_config=None,
+ package_config=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._put('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id), data=data)
+
+ def update_clusterhost_config(self, clusterhost_id, os_config=None,
+ package_config=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._put('/clusterhosts/%s/config' % clusterhost_id,
+ data=data)
+
+ def patch_cluster_host_config(self, cluster_id, host_id,
+ os_config=None,
+ package_config=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._patch('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id), data=data)
+
+ def patch_clusterhost_config(self, clusterhost_id, os_config=None,
+ package_config=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+
+ else:
+ if os_config:
+ data['os_config'] = os_config
+
+ if package_config:
+ data['package_config'] = package_config
+
+ return self._patch('/clusterhosts/%s' % clusterhost_id, data=data)
+
+ def delete_cluster_host_config(self, cluster_id, host_id):
+ return self._delete('/clusters/%s/hosts/%s/config' %
+ (cluster_id, host_id))
+
+ def delete_clusterhost_config(self, clusterhost_id):
+ return self._delete('/clusterhosts/%s/config' % clusterhost_id)
+
+ def get_cluster_host_state(self, cluster_id, host_id):
+ return self._get('/clusters/%s/hosts/%s/state' %
+ (cluster_id, host_id))
+
+ def update_cluster_host(self, cluster_id, host_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._put('/clusters/%s/hosts/%s' %
+ (cluster_id, host_id), data=data)
+
+ def update_clusterhost(self, clusterhost_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._put('/clusterhosts/%s' % clusterhost_id, data=data)
+
+ def patch_cluster_host(self, cluster_id, host_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._patch('/clusters/%s/hosts/%s' %
+ (cluster_id, host_id), data=data)
+
+ def patch_clusterhost(self, clusterhost_id,
+ roles=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if roles:
+ data['roles'] = roles
+
+ return self._patch('/clusterhosts/%s' % clusterhost_id, data=data)
+
+ def get_clusterhost_state(self, clusterhost_id):
+ return self._get('/clusterhosts/%s/state' % clusterhost_id)
+
+ def update_cluster_host_state(self, cluster_id, host_id, state=None,
+ percentage=None, message=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if state:
+ data['state'] = state
+
+ if percentage:
+ data['percentage'] = percentage
+
+ if message:
+ data['message'] = message
+
+ return self._put('/clusters/%s/hosts/%s/state' % (cluster_id, host_id),
+ data=data)
+
+ def update_clusterhost_state(self, clusterhost_id, state=None,
+ percentage=None, message=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if state:
+ data['state'] = state
+
+ if percentage:
+ data['percentage'] = percentage
+
+ if message:
+ data['message'] = message
+
+ return self._put('/clusterhosts/%s/state' % clusterhost_id, data=data)
+
+ def list_hosts(self, name=None, os_name=None, owner=None, mac=None):
+ data = {}
+ if name:
+ data['name'] = name
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if owner:
+ data['owner'] = owner
+
+ if mac:
+ data['mac'] = mac
+
+ return self._get('/hosts', data=data)
+
+ def get_host(self, host_id):
+ return self._get('/hosts/%s' % host_id)
+
+ def list_machines_or_hosts(self, mac=None, tag=None,
+ location=None, os_name=None,
+ os_id=None):
+ data = {}
+ if mac:
+ data['mac'] = mac
+
+ if tag:
+ data['tag'] = tag
+
+ if location:
+ data['location'] = location
+
+ if os_name:
+ data['os_name'] = os_name
+
+ if os_id:
+ data['os_id'] = os_id
+
+ return self._get('/machines-hosts', data=data)
+
+ def get_machine_or_host(self, host_id):
+ return self._get('/machines-hosts/%s' % host_id)
+
+ def update_host(self, host_id, name=None,
+ reinstall_os=None, raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if name:
+ data['name'] = name
+
+ if reinstall_os:
+ data['reinstall_os'] = reinstall_os
+
+ return self._put('/hosts/%s' % host_id, data=data)
+
+ def delete_host(self, host_id):
+ return self._delete('/hosts/%s' % host_id)
+
+ def get_host_clusters(self, host_id):
+ return self._get('/hosts/%s/clusters' % host_id)
+
+ def get_host_config(self, host_id):
+ return self._get('/hosts/%s/config' % host_id)
+
+ def update_host_config(self, host_id, os_config, raw_data=None):
+ data = {}
+ data['os_config'] = os_config
+ if raw_data:
+ data.update(raw_data)
+
+ return self._put('/hosts/%s/config' % host_id, data=data)
+
+ def patch_host_config(self, host_id, os_config, raw_data=None):
+ data = {}
+ data['os_config'] = os_config
+ if raw_data:
+ data.update(raw_data)
+
+ return self._patch('/hosts/%s/config' % host_id, data=data)
+
+ def delete_host_config(self, host_id):
+ return self._delete('/hosts/%s/config' % host_id)
+
+ def list_host_networks(self, host_id, interface=None, ip=None,
+ subnet=None, is_mgmt=None, is_promiscuous=None):
+ data = {}
+ if interface:
+ data['interface'] = interface
+
+ if ip:
+ data['ip'] = ip
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._get('/hosts/%s/networks' % host_id, data=data)
+
+ def list_all_host_networks(self, interface=None, ip=None, subnet=None,
+ is_mgmt=None, is_promiscuous=None):
+ data = {}
+ if interface:
+ data['interface'] = interface
+
+ if ip:
+ data['ip'] = ip
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._get('/host-networks', data=data)
+
+ def get_host_network(self, host_id, host_network_id):
+ return self._get('/hosts/%s/networks/%s' %
+ (host_id, host_network_id))
+
+ def get_network_for_all_hosts(self, host_network_id):
+ return self._get('/host-networks/%s' % host_network_id)
+
+ def add_host_network(self, host_id, interface, ip, subnet_id,
+ is_mgmt=None, is_promiscuous=None,
+ raw_data=None):
+ data = {}
+ data['interface'] = interface
+ data['ip'] = ip
+ data['subnet_id'] = subnet_id
+ if raw_data:
+ data.update(raw_data)
+ else:
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._post('/hosts/%s/networks' % host_id, data=data)
+
+ def update_host_network(self, host_id, host_network_id,
+ ip=None, subnet_id=None, subnet=None,
+ is_mgmt=None, is_promiscuous=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ip:
+ data['ip'] = ip
+
+ if subnet_id:
+ data['subnet_id'] = subnet_id
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._put('/hosts/%s/networks/%s' %
+ (host_id, host_network_id), data=data)
+
+ def update_hostnetwork(self, host_network_id, ip=None,
+ subnet_id=None, subnet=None,
+ is_mgmt=None, is_promiscuous=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if ip:
+ data['ip'] = ip
+
+ if subnet_id:
+ data['subnet_id'] = subnet_id
+
+ if subnet:
+ data['subnet'] = subnet
+
+ if is_mgmt:
+ data['is_mgmt'] = is_mgmt
+
+ if is_promiscuous:
+ data['is_promiscuous'] = is_promiscuous
+
+ return self._put('/host-networks/%s' % host_network_id,
+ data=data)
+
+ def delete_host_network(self, host_id, host_network_id):
+ return self._delete('/hosts/%s/networks/%s',
+ (host_id, host_network_id))
+
+ def delete_hostnetwork(self, host_network_id):
+ return self._delete('/host-networks/%s' % host_network_id)
+
+ def get_host_state(self, host_id):
+ return self._get('/hosts/%s/state' % host_id)
+
+ def update_host_state(self, host_id, state=None,
+ percentage=None, message=None,
+ raw_data=None):
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if state:
+ data['state'] = state
+
+ if percentage:
+ data['percentage'] = percentage
+
+ if message:
+ data['message'] = message
+
+ return self._put('/hosts/%s/state' % host_id, date=data)
+
+ def poweron_host(self, host_id):
+ data = {}
+ data['poweron'] = True
+
+ return self._post('/hosts/%s/action' % host_id, data=data)
+
+ def poweroff_host(self, host_id):
+ data = {}
+ data['poweroff'] = True
+
+ return self._post('/hosts/%s/action' % host_id, data=data)
+
+ def reset_host(self, host_id):
+ data = {}
+ data['reset'] = True
+
+ return self._post('/hosts/%s/action' % host_id, data=data)
+
+ def clusterhost_ready(self, clusterhost_name):
+ data = {}
+ data['ready'] = True
+
+ return self._post('/clusterhosts/%s/state_internal' %
+ clusterhost_name, data=data)
diff --git a/compass-tasks/apiclient/v1/__init__.py b/compass-tasks/apiclient/v1/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-tasks/apiclient/v1/__init__.py
diff --git a/compass-tasks/apiclient/v1/example.py b/compass-tasks/apiclient/v1/example.py
new file mode 100755
index 0000000..6f7a7f7
--- /dev/null
+++ b/compass-tasks/apiclient/v1/example.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Example code to deploy a cluster by compass client api."""
+import os
+import re
+import requests
+import sys
+import time
+
+from compass.apiclient.restful import Client
+
+
+COMPASS_SERVER_URL = 'http://127.0.0.1/api'
+SWITCH_IP = '10.145.81.220'
+SWITCH_SNMP_VERSION = 'v2c'
+SWITCH_SNMP_COMMUNITY = 'public'
+# MACHINES_TO_ADD = ['00:11:20:30:40:01']
+CLUSTER_NAME = 'cluster2'
+HOST_NAME_PREFIX = 'host'
+SERVER_USERNAME = 'root'
+SERVER_PASSWORD = 'root'
+SERVICE_USERNAME = 'service'
+SERVICE_PASSWORD = 'service'
+CONSOLE_USERNAME = 'console'
+CONSOLE_PASSWORD = 'console'
+HA_VIP = ''
+# NAMESERVERS = '192.168.10.6'
+SEARCH_PATH = 'ods.com'
+# GATEWAY = '192.168.10.6'
+# PROXY = 'http://192.168.10.6:3128'
+# NTP_SERVER = '192.168.10.6'
+MANAGEMENT_IP_START = '192.168.10.130'
+MANAGEMENT_IP_END = '192.168.10.254'
+MANAGEMENT_IP_GATEWAY = '192.168.10.1'
+MANAGEMENT_NETMASK = '255.255.255.0'
+MANAGEMENT_NIC = 'eth0'
+MANAGEMENT_PROMISC = 0
+TENANT_IP_START = '192.168.10.100'
+TENANT_IP_END = '192.168.10.255'
+TENANT_IP_GATEWAY = '192.168.10.1'
+TENANT_NETMASK = '255.255.255.0'
+TENANT_NIC = 'eth0'
+TENANT_PROMISC = 0
+PUBLIC_IP_START = '12.234.32.100'
+PUBLIC_IP_END = '12.234.32.255'
+PUBLIC_IP_GATEWAY = '12.234.32.1'
+PUBLIC_NETMASK = '255.255.255.0'
+PUBLIC_NIC = 'eth1'
+PUBLIC_PROMISC = 1
+STORAGE_IP_START = '172.16.100.100'
+STORAGE_IP_END = '172.16.100.255'
+STORAGE_NETMASK = '255.255.255.0'
+STORAGE_IP_GATEWAY = '172.16.100.1'
+STORAGE_NIC = 'eth0'
+STORAGE_PROMISC = 0
+HOME_PERCENTAGE = 5
+TMP_PERCENTAGE = 5
+VAR_PERCENTAGE = 10
+# ROLES_LIST = [['os-dashboard']]
+
+PRESET_VALUES = {
+ 'NAMESERVERS': '192.168.10.1',
+ 'NTP_SERVER': '192.168.10.1',
+ 'GATEWAY': '192.168.10.1',
+ 'PROXY': 'http://192.168.10.1:3128',
+ 'ROLES_LIST': 'os-dashboard',
+ 'MACHINES_TO_ADD': '00:11:20:30:40:01',
+ 'BUILD_TIMEOUT': 60
+}
+for v in PRESET_VALUES:
+ if v in os.environ.keys():
+ PRESET_VALUES[v] = os.environ.get(v)
+ print (v + PRESET_VALUES[v] + " is set by env variables")
+ else:
+ print (PRESET_VALUES[v])
+
+# get apiclient object.
+client = Client(COMPASS_SERVER_URL)
+
+
+# get all switches.
+status, resp = client.get_switches()
+print 'get all switches status: %s resp: %s' % (status, resp)
+
+# add a switch.
+status, resp = client.add_switch(
+ SWITCH_IP, version=SWITCH_SNMP_VERSION,
+ community=SWITCH_SNMP_COMMUNITY)
+
+print 'add a switch status: %s resp: %s' % (status, resp)
+
+if status < 400:
+ switch = resp['switch']
+else:
+ status, resp = client.get_switches()
+ print 'get all switches status: %s resp: %s' % (status, resp)
+ switch = None
+ for switch in resp['switches']:
+ if switch['ip'] == SWITCH_IP:
+ break
+
+switch_id = switch['id']
+switch_ip = switch['ip']
+
+
+# if the switch is not in under_monitoring, wait for the poll switch task
+# update the swich information and change the switch state.
+while switch['state'] != 'under_monitoring':
+ print 'waiting for the switch into under_monitoring'
+ status, resp = client.get_switch(switch_id)
+ print 'get switch %s status: %s, resp: %s' % (switch_id, status, resp)
+ switch = resp['switch']
+ time.sleep(10)
+
+
+# get machines connected to the switch.
+status, resp = client.get_machines(switch_id=switch_id)
+print 'get all machines under switch %s status: %s, resp: %s' % (
+ switch_id, status, resp)
+machines = {}
+MACHINES_TO_ADD = PRESET_VALUES['MACHINES_TO_ADD'].split()
+for machine in resp['machines']:
+ mac = machine['mac']
+ if mac in MACHINES_TO_ADD:
+ machines[machine['id']] = mac
+
+print 'machine to add: %s' % machines
+
+if set(machines.values()) != set(MACHINES_TO_ADD):
+ print 'only found macs %s while expected are %s' % (
+ machines.values(), MACHINES_TO_ADD)
+ sys.exit(1)
+
+
+# get adapters.
+status, resp = client.get_adapters()
+print 'get all adapters status: %s, resp: %s' % (status, resp)
+adapter_ids = []
+for adapter in resp['adapters']:
+ adapter_ids.append(adapter['id'])
+
+adapter_id = adapter_ids[0]
+print 'adpater for deploying a cluster: %s' % adapter_id
+
+
+# add a cluster.
+status, resp = client.add_cluster(
+ cluster_name=CLUSTER_NAME, adapter_id=adapter_id)
+print 'add cluster %s status: %s, resp: %s' % (CLUSTER_NAME, status, resp)
+cluster = resp['cluster']
+cluster_id = cluster['id']
+
+# add hosts to the cluster.
+status, resp = client.add_hosts(
+ cluster_id=cluster_id,
+ machine_ids=machines.keys())
+print 'add hosts to cluster %s status: %s, resp: %s' % (
+ cluster_id, status, resp)
+host_ids = []
+for host in resp['cluster_hosts']:
+ host_ids.append(host['id'])
+
+print 'added hosts: %s' % host_ids
+
+
+# set cluster security
+status, resp = client.set_security(
+ cluster_id, server_username=SERVER_USERNAME,
+ server_password=SERVER_PASSWORD,
+ service_username=SERVICE_USERNAME,
+ service_password=SERVICE_PASSWORD,
+ console_username=CONSOLE_USERNAME,
+ console_password=CONSOLE_PASSWORD)
+print 'set security config to cluster %s status: %s, resp: %s' % (
+ cluster_id, status, resp)
+
+
+# set cluster networking
+status, resp = client.set_networking(
+ cluster_id,
+ nameservers=PRESET_VALUES["NAMESERVERS"],
+ search_path=SEARCH_PATH,
+ gateway=PRESET_VALUES["GATEWAY"],
+ proxy=PRESET_VALUES["PROXY"],
+ ntp_server=PRESET_VALUES["NTP_SERVER"],
+ ha_vip=HA_VIP,
+ management_ip_start=MANAGEMENT_IP_START,
+ management_ip_end=MANAGEMENT_IP_END,
+ management_netmask=MANAGEMENT_NETMASK,
+ management_nic=MANAGEMENT_NIC,
+ management_gateway=MANAGEMENT_IP_GATEWAY,
+ management_promisc=MANAGEMENT_PROMISC,
+ tenant_ip_start=TENANT_IP_START,
+ tenant_ip_end=TENANT_IP_END,
+ tenant_netmask=TENANT_NETMASK,
+ tenant_nic=TENANT_NIC,
+ tenant_gateway=TENANT_IP_GATEWAY,
+ tenant_promisc=TENANT_PROMISC,
+ public_ip_start=PUBLIC_IP_START,
+ public_ip_end=PUBLIC_IP_END,
+ public_netmask=PUBLIC_NETMASK,
+ public_nic=PUBLIC_NIC,
+ public_gateway=PUBLIC_IP_GATEWAY,
+ public_promisc=PUBLIC_PROMISC,
+ storage_ip_start=STORAGE_IP_START,
+ storage_ip_end=STORAGE_IP_END,
+ storage_netmask=STORAGE_NETMASK,
+ storage_nic=STORAGE_NIC,
+ storage_gateway=STORAGE_IP_GATEWAY,
+ storage_promisc=STORAGE_PROMISC)
+print 'set networking config to cluster %s status: %s, resp: %s' % (
+ cluster_id, status, resp)
+
+
+# set partiton of each host in cluster
+status, resp = client.set_partition(
+ cluster_id,
+ home_percentage=HOME_PERCENTAGE,
+ tmp_percentage=TMP_PERCENTAGE,
+ var_percentage=VAR_PERCENTAGE)
+print 'set partition config to cluster %s status: %s, resp: %s' % (
+ cluster_id, status, resp)
+
+
+# set each host config in cluster.
+ROLES_LIST = [PRESET_VALUES['ROLES_LIST'].split()]
+for host_id in host_ids:
+ if ROLES_LIST:
+ roles = ROLES_LIST.pop(0)
+ else:
+ roles = []
+ status, resp = client.update_host_config(
+ host_id, hostname='%s%s' % (HOST_NAME_PREFIX, host_id),
+ roles=roles)
+ print 'set roles to host %s status: %s, resp: %s' % (
+ host_id, status, resp)
+
+
+# deploy cluster.
+status, resp = client.deploy_hosts(cluster_id)
+print 'deploy cluster %s status: %s, resp: %s' % (cluster_id, status, resp)
+
+
+# get intalling progress.
+BUILD_TIMEOUT = float(PRESET_VALUES['BUILD_TIMEOUT'])
+timeout = time.time() + BUILD_TIMEOUT * 60
+while True:
+ status, resp = client.get_cluster_installing_progress(cluster_id)
+ print 'get cluster %s installing progress status: %s, resp: %s' % (
+ cluster_id, status, resp)
+ progress = resp['progress']
+ if (
+ progress['state'] not in ['UNINITIALIZED', 'INSTALLING'] or
+ progress['percentage'] >= 1.0
+ ):
+ break
+ if (
+ time.time() > timeout
+ ):
+ raise Exception("Timeout! The system is not ready in time.")
+
+ for host_id in host_ids:
+ status, resp = client.get_host_installing_progress(host_id)
+ print 'get host %s installing progress status: %s, resp: %s' % (
+ host_id, status, resp)
+
+ time.sleep(60)
+
+
+status, resp = client.get_dashboard_links(cluster_id)
+print 'get cluster %s dashboardlinks status: %s, resp: %s' % (
+ cluster_id, status, resp)
+dashboardlinks = resp['dashboardlinks']
+if not dashboardlinks.keys():
+ raise Exception("Dashboard link is not found!")
+for x in dashboardlinks.keys():
+ if x in ("os-dashboard", "os-controller"):
+ dashboardurl = dashboardlinks.get(x)
+ if dashboardurl is None:
+ raise Exception("No dashboard link is found")
+ r = requests.get(dashboardurl, verify=False)
+ r.raise_for_status()
+ match = re.search(
+ r'(?m)(http://\d+\.\d+\.\d+\.\d+:5000/v2\.0)', r.text)
+ if match:
+ print 'dashboard login page can be downloaded'
+ break
+ print (
+ 'dashboard login page failed to be downloaded\n'
+ 'the context is:\n%s\n') % r.text
+ raise Exception("os-dashboard is not properly installed!")
diff --git a/compass-tasks/apiclient/v1/restful.py b/compass-tasks/apiclient/v1/restful.py
new file mode 100644
index 0000000..3fb235c
--- /dev/null
+++ b/compass-tasks/apiclient/v1/restful.py
@@ -0,0 +1,655 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Compass api client library.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import json
+import logging
+import requests
+
+
+class Client(object):
+ """wrapper for compass restful api.
+
+ .. note::
+ Every api client method returns (status as int, resp as dict).
+ If the api succeeds, the status is 2xx, the resp includes
+ {'status': 'OK'} and other keys depend on method.
+ If the api fails, the status is 4xx, the resp includes {
+ 'status': '...', 'message': '...'}
+ """
+
+ def __init__(self, url, headers=None, proxies=None, stream=None):
+ """Restful api client initialization.
+
+ :param url: url to the compass web service.
+ :type url: str.
+ :param headers: http header sent in each restful request.
+ :type headers: dict of header name (str) to heade value (str).
+ :param proxies: the proxy address for each protocol.
+ :type proxies: dict of protocol (str) to proxy url (str).
+ :param stream: wether the restful response should be streamed.
+ :type stream: bool.
+ """
+ self.url_ = url
+ self.session_ = requests.Session()
+ if headers:
+ self.session_.headers = headers
+
+ if proxies is not None:
+ self.session_.proxies = proxies
+
+ if stream is not None:
+ self.session_.stream = stream
+
+ def __del__(self):
+ self.session_.close()
+
+ @classmethod
+ def _get_response(cls, resp):
+ """decapsulate the resp to status code and python formatted data."""
+ resp_obj = {}
+ try:
+ resp_obj = resp.json()
+ except Exception as error:
+ logging.error('failed to load object from %s: %s',
+ resp.url, resp.content)
+ logging.exception(error)
+ resp_obj['status'] = 'Json Parsing Failure'
+ resp_obj['message'] = resp.content
+
+ return resp.status_code, resp_obj
+
+ def _get(self, relative_url, params=None):
+ """encapsulate get method."""
+ url = '%s%s' % (self.url_, relative_url)
+ if params:
+ resp = self.session_.get(url, params=params)
+ else:
+ resp = self.session_.get(url)
+
+ return self._get_response(resp)
+
+ def _post(self, relative_url, data=None):
+ """encapsulate post method."""
+ url = '%s%s' % (self.url_, relative_url)
+ if data:
+ resp = self.session_.post(url, json.dumps(data))
+ else:
+ resp = self.session_.post(url)
+
+ return self._get_response(resp)
+
+ def _put(self, relative_url, data=None):
+ """encapsulate put method."""
+ url = '%s%s' % (self.url_, relative_url)
+ if data:
+ resp = self.session_.put(url, json.dumps(data))
+ else:
+ resp = self.session_.put(url)
+
+ return self._get_response(resp)
+
+ def _delete(self, relative_url):
+ """encapsulate delete method."""
+ url = '%s%s' % (self.url_, relative_url)
+ return self._get_response(self.session_.delete(url))
+
+ def get_switches(self, switch_ips=None, switch_networks=None, limit=None):
+ """List details for switches.
+
+ .. note::
+ The switches can be filtered by switch_ips, siwtch_networks and
+ limit. These params can be None or missing. If the param is None
+ or missing, that filter will be ignored.
+
+ :param switch_ips: Filter switch(es) with IP(s).
+ :type switch_ips: list of str. Each is as 'xxx.xxx.xxx.xxx'.
+ :param switch_networks: Filter switche(es) with network(s).
+ :type switch_networks: list of str. Each is as 'xxx.xxx.xxx.xxx/xx'.
+ :param limit: int, The maximum number of switches to return.
+ :type limit: int. 0 means unlimited.
+ """
+ params = {}
+ if switch_ips:
+ params['switchIp'] = switch_ips
+
+ if switch_networks:
+ params['switchIpNetwork'] = switch_networks
+
+ if limit:
+ params['limit'] = limit
+ return self._get('/switches', params=params)
+
+ def get_switch(self, switch_id):
+ """Lists details for a specified switch.
+
+ :param switch_id: switch id.
+ :type switch_id: int.
+ """
+ return self._get('/switches/%s' % switch_id)
+
+ def add_switch(self, switch_ip, version=None, community=None,
+ username=None, password=None, raw_data=None):
+ """Create a switch with specified details.
+
+ .. note::
+ It will trigger switch polling if successful. During
+ the polling, MAC address of the devices connected to the
+ switch will be learned by SNMP or SSH.
+
+ :param switch_ip: the switch IP address.
+ :type switch_ip: str, as xxx.xxx.xxx.xxx.
+ :param version: SNMP version when using SNMP to poll switch.
+ :type version: str, one in ['v1', 'v2c', 'v3']
+ :param community: SNMP community when using SNMP to poll switch.
+ :type community: str, usually 'public'.
+ :param username: SSH username when using SSH to poll switch.
+ :type username: str.
+ :param password: SSH password when using SSH to poll switch.
+ :type password: str.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['switch'] = {}
+ data['switch']['ip'] = switch_ip
+ data['switch']['credential'] = {}
+ if version:
+ data['switch']['credential']['version'] = version
+
+ if community:
+ data['switch']['credential']['community'] = community
+
+ if username:
+ data['switch']['credential']['username'] = username
+
+ if password:
+ data['switch']['credential']['password'] = password
+
+ return self._post('/switches', data=data)
+
+ def update_switch(self, switch_id, ip_addr=None,
+ version=None, community=None,
+ username=None, password=None,
+ raw_data=None):
+ """Updates a switch with specified details.
+
+ .. note::
+ It will trigger switch polling if successful. During
+ the polling, MAC address of the devices connected to the
+ switch will be learned by SNMP or SSH.
+
+ :param switch_id: switch id
+ :type switch_id: int.
+ :param ip_addr: the switch ip address.
+ :type ip_addr: str, as 'xxx.xxx.xxx.xxx' format.
+ :param version: SNMP version when using SNMP to poll switch.
+ :type version: str, one in ['v1', 'v2c', 'v3'].
+ :param community: SNMP community when using SNMP to poll switch.
+ :type community: str, usually be 'public'.
+ :param username: username when using SSH to poll switch.
+ :type username: str.
+ :param password: password when using SSH to poll switch.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['switch'] = {}
+ if ip_addr:
+ data['switch']['ip'] = ip_addr
+
+ data['switch']['credential'] = {}
+ if version:
+ data['switch']['credential']['version'] = version
+
+ if community:
+ data['switch']['credential']['community'] = community
+
+ if username:
+ data['switch']['credential']['username'] = username
+
+ if password:
+ data['switch']['credential']['password'] = password
+
+ return self._put('/switches/%s' % switch_id, data=data)
+
+ def delete_switch(self, switch_id):
+ """Not implemented in api."""
+ return self._delete('/switches/%s' % switch_id)
+
+ def get_machines(self, switch_id=None, vlan_id=None,
+ port=None, limit=None):
+ """Get the details of machines.
+
+ .. note::
+ The machines can be filtered by switch_id, vlan_id, port
+ and limit. These params can be None or missing. If the param
+ is None or missing, the filter will be ignored.
+
+ :param switch_id: Return machine(s) connected to the switch.
+ :type switch_id: int.
+ :param vlan_id: Return machine(s) belonging to the vlan.
+ :type vlan_id: int.
+ :param port: Return machine(s) connect to the port.
+ :type port: int.
+ :param limit: the maximum number of machines will be returned.
+ :type limit: int. 0 means no limit.
+ """
+ params = {}
+ if switch_id:
+ params['switchId'] = switch_id
+
+ if vlan_id:
+ params['vlanId'] = vlan_id
+
+ if port:
+ params['port'] = port
+
+ if limit:
+ params['limit'] = limit
+
+ return self._get('/machines', params=params)
+
+ def get_machine(self, machine_id):
+ """Lists the details for a specified machine.
+
+ :param machine_id: Return machine with the id.
+ :type machine_id: int.
+ """
+ return self._get('/machines/%s' % machine_id)
+
+ def get_clusters(self):
+ """Lists the details for all clusters."""
+ return self._get('/clusters')
+
+ def get_cluster(self, cluster_id):
+ """Lists the details of the specified cluster.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ """
+ return self._get('/clusters/%d' % cluster_id)
+
+ def add_cluster(self, cluster_name, adapter_id, raw_data=None):
+ """Creates a cluster by specified name and given adapter id.
+
+ :param cluster_name: cluster name.
+ :type cluster_name: str.
+ :param adapter_id: adapter id.
+ :type adapter_id: int.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['cluster'] = {}
+ data['cluster']['name'] = cluster_name
+ data['cluster']['adapter_id'] = adapter_id
+ return self._post('/clusters', data=data)
+
+ def add_hosts(self, cluster_id, machine_ids, raw_data=None):
+ """add the specified machine(s) as the host(s) to the cluster.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ :param machine_ids: machine ids to add to cluster.
+ :type machine_ids: list of int, each is the id of one machine.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['addHosts'] = machine_ids
+ return self._post('/clusters/%d/action' % cluster_id, data=data)
+
+ def remove_hosts(self, cluster_id, host_ids, raw_data=None):
+ """remove the specified host(s) from the cluster.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ :param host_ids: host ids to remove from cluster.
+ :type host_ids: list of int, each is the id of one host.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['removeHosts'] = host_ids
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def replace_hosts(self, cluster_id, machine_ids, raw_data=None):
+ """replace the cluster hosts with the specified machine(s).
+
+ :param cluster_id: int, The unique identifier of the cluster.
+ :type cluster_id: int.
+ :param machine_ids: the machine ids to replace the hosts in cluster.
+ :type machine_ids: list of int, each is the id of one machine.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['replaceAllHosts'] = machine_ids
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
+ def deploy_hosts(self, cluster_id, raw_data=None):
+ """Deploy the cluster.
+
+ :param cluster_id: The unique identifier of the cluster
+ :type cluster_id: int.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ data['deploy'] = []
+ return self._post('/clusters/%d/action' % cluster_id, data=data)
+
+ @classmethod
+ def parse_security(cls, kwargs):
+ """parse the arguments to security data."""
+ data = {}
+ for key, value in kwargs.items():
+ if '_' not in key:
+ continue
+ key_name, key_value = key.split('_', 1)
+ data.setdefault(
+ '%s_credentials' % key_name, {})[key_value] = value
+
+ return data
+
+ def set_security(self, cluster_id, **kwargs):
+ """Update the cluster security configuration.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ :param <security_name>_username: username of the security name.
+ :type <security_name>_username: str.
+ :param <security_name>_password: passowrd of the security name.
+ :type <security_name>_password: str.
+
+ .. note::
+ security_name should be one of ['server', 'service', 'console'].
+ """
+ data = {}
+ data['security'] = self.parse_security(kwargs)
+ return self._put('/clusters/%d/security' % cluster_id, data=data)
+
+ @classmethod
+ def parse_networking(cls, kwargs):
+ """parse arguments to network data."""
+ data = {}
+ global_keys = [
+ 'nameservers', 'search_path', 'gateway',
+ 'proxy', 'ntp_server', 'ha_vip']
+ for key, value in kwargs.items():
+ if key in global_keys:
+ data.setdefault('global', {})[key] = value
+ else:
+ if '_' not in key:
+ continue
+
+ key_name, key_value = key.split('_', 1)
+ data.setdefault(
+ 'interfaces', {}
+ ).setdefault(
+ key_name, {}
+ )[key_value] = value
+
+ return data
+
+ def set_networking(self, cluster_id, **kwargs):
+ """Update the cluster network configuration.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ :param nameservers: comma seperated nameserver ip address.
+ :type nameservers: str.
+ :param search_path: comma seperated dns name search path.
+ :type search_path: str.
+ :param gateway: gateway ip address for routing to outside.
+ :type gateway: str.
+ :param proxy: proxy url for downloading packages.
+ :type proxy: str.
+ :param ntp_server: ntp server ip address to sync timestamp.
+ :type ntp_server: str.
+ :param ha_vip: ha vip address to run ha proxy.
+ :type ha_vip: str.
+ :param <interface>_ip_start: start ip address to host's interface.
+ :type <interface>_ip_start: str.
+ :param <interface>_ip_end: end ip address to host's interface.
+ :type <interface>_ip_end: str.
+ :param <interface>_netmask: netmask to host's interface.
+ :type <interface>_netmask: str.
+ :param <interface>_nic: host physical interface name.
+ :type <interface>_nic: str.
+ :param <interface>_promisc: if the interface in promiscous mode.
+ :type <interface>_promisc: int, 0 or 1.
+
+ .. note::
+ interface should be one of ['management', 'tenant',
+ 'public', 'storage'].
+ """
+ data = {}
+ data['networking'] = self.parse_networking(kwargs)
+ return self._put('/clusters/%d/networking' % cluster_id, data=data)
+
+ @classmethod
+ def parse_partition(cls, kwargs):
+ """parse arguments to partition data."""
+ data = {}
+ for key, value in kwargs.items():
+ if key.endswith('_percentage'):
+ key_name = key[:-len('_percentage')]
+ data[key_name] = '%s%%' % value
+ elif key.endswitch('_mbytes'):
+ key_name = key[:-len('_mbytes')]
+ data[key_name] = str(value)
+
+ return ';'.join([
+ '/%s %s' % (key, value) for key, value in data.items()
+ ])
+
+ def set_partition(self, cluster_id, **kwargs):
+ """Update the cluster partition configuration.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ :param <partition>_percentage: the partiton percentage.
+ :type <partition>_percentage: float between 0 to 100.
+ :param <partition>_mbytes: the partition mbytes.
+ :type <partition>_mbytes: int.
+
+ .. note::
+ partition should be one of ['home', 'var', 'tmp'].
+ """
+ data = {}
+ data['partition'] = self.parse_partition(kwargs)
+ return self._put('/clusters/%s/partition' % cluster_id, data=data)
+
+ def get_hosts(self, hostname=None, clustername=None):
+ """Lists the details of hosts.
+
+ .. note::
+ The hosts can be filtered by hostname, clustername.
+ These params can be None or missing. If the param
+ is None or missing, the filter will be ignored.
+
+ :param hostname: The name of a host.
+ :type hostname: str.
+ :param clustername: The name of a cluster.
+ :type clustername: str.
+ """
+ params = {}
+ if hostname:
+ params['hostname'] = hostname
+
+ if clustername:
+ params['clustername'] = clustername
+
+ return self._get('/clusterhosts', params=params)
+
+ def get_host(self, host_id):
+ """Lists the details for the specified host.
+
+ :param host_id: host id.
+ :type host_id: int.
+ """
+ return self._get('/clusterhosts/%s' % host_id)
+
+ def get_host_config(self, host_id):
+ """Lists the details of the config for the specified host.
+
+ :param host_id: host id.
+ :type host_id: int.
+ """
+ return self._get('/clusterhosts/%s/config' % host_id)
+
+ def update_host_config(self, host_id, hostname=None,
+ roles=None, raw_data=None, **kwargs):
+ """Updates config for the host.
+
+ :param host_id: host id.
+ :type host_id: int.
+ :param hostname: host name.
+ :type hostname: str.
+ :param security_<security>_username: username of the security name.
+ :type security_<security>_username: str.
+ :param security_<security>_password: passowrd of the security name.
+ :type security_<security>_password: str.
+ :param networking_nameservers: comma seperated nameserver ip address.
+ :type networking_nameservers: str.
+ :param networking_search_path: comma seperated dns name search path.
+ :type networking_search_path: str.
+ :param networking_gateway: gateway ip address for routing to outside.
+ :type networking_gateway: str.
+ :param networking_proxy: proxy url for downloading packages.
+ :type networking_proxy: str.
+ :param networking_ntp_server: ntp server ip address to sync timestamp.
+ :type networking_ntp_server: str.
+ :param networking_<interface>_ip: ip address to host interface.
+ :type networking_<interface>_ip: str.
+ :param networking_<interface>_netmask: netmask to host's interface.
+ :type networking_<interface>_netmask: str.
+ :param networking_<interface>_nic: host physical interface name.
+ :type networking_<interface>_nic: str.
+ :param networking_<interface>_promisc: if the interface is promiscous.
+ :type networking_<interface>_promisc: int, 0 or 1.
+ :param partition_<partition>_percentage: the partiton percentage.
+ :type partition_<partition>_percentage: float between 0 to 100.
+ :param partition_<partition>_mbytes: the partition mbytes.
+ :type partition_<partition>_mbytes: int.
+ :param roles: host assigned roles in the cluster.
+ :type roles: list of str.
+ """
+ data = {}
+ if raw_data:
+ data = raw_data
+ else:
+ if hostname:
+ data['hostname'] = hostname
+
+ sub_kwargs = {}
+ for key, value in kwargs.items():
+ key_name, key_value = key.split('_', 1)
+ sub_kwargs.setdefault(key_name, {})[key_value] = value
+
+ if 'security' in sub_kwargs:
+ data['security'] = self.parse_security(sub_kwargs['security'])
+
+ if 'networking' in sub_kwargs:
+ data['networking'] = self.parse_networking(
+ sub_kwargs['networking'])
+ if 'partition' in sub_kwargs:
+ data['partition'] = self.parse_partition(
+ sub_kwargs['partition'])
+
+ if roles:
+ data['roles'] = roles
+
+ return self._put('/clusterhosts/%s/config' % host_id, data)
+
+ def delete_from_host_config(self, host_id, delete_key):
+ """Deletes one key in config for the host.
+
+ :param host_id: host id.
+ :type host_id: int.
+ :param delete_key: the key in host config to be deleted.
+ :type delete_key: str.
+ """
+ return self._delete('/clusterhosts/%s/config/%s' % (
+ host_id, delete_key))
+
+ def get_adapters(self, name=None):
+ """Lists details of adapters.
+
+ .. note::
+ the adapter can be filtered by name of name is given and not None.
+
+ :param name: adapter name.
+ :type name: str.
+ """
+ params = {}
+ if name:
+ params['name'] = name
+
+ return self._get('/adapters', params=params)
+
+ def get_adapter(self, adapter_id):
+ """Lists details for the specified adapter.
+
+ :param adapter_id: adapter id.
+ :type adapter_id: int.
+ """
+ return self._get('/adapters/%s' % adapter_id)
+
+ def get_adapter_roles(self, adapter_id):
+ """Lists roles to assign to hosts for the specified adapter.
+
+ :param adapter_id: adapter id.
+ :type adapter_id: int.
+ """
+ return self._get('/adapters/%s/roles' % adapter_id)
+
+ def get_host_installing_progress(self, host_id):
+ """Lists progress details for the specified host.
+
+ :param host_id: host id.
+ :type host_id: int.
+ """
+ return self._get('/clusterhosts/%s/progress' % host_id)
+
+ def get_cluster_installing_progress(self, cluster_id):
+ """Lists progress details for the specified cluster.
+
+ :param cluster_id: cluster id.
+ :param cluster_id: int.
+ """
+
+ return self._get('/clusters/%s/progress' % cluster_id)
+
+ def get_dashboard_links(self, cluster_id):
+ """Lists links for dashboards of deployed cluster.
+
+ :param cluster_id: cluster id.
+ :type cluster_id: int.
+ """
+ params = {}
+ params['cluster_id'] = cluster_id
+ return self._get('/dashboardlinks', params)
diff --git a/compass-tasks/build.sh b/compass-tasks/build.sh
new file mode 100755
index 0000000..e0dceea
--- /dev/null
+++ b/compass-tasks/build.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2016-2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -x
+COMPASS_DIR=${BASH_SOURCE[0]%/*}
+
+rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+sed -i 's/^mirrorlist=https/mirrorlist=http/g' /etc/yum.repos.d/epel.repo
+yum update -y
+
+yum --nogpgcheck install -y python python-devel git amqp python-pip libffi-devel openssl-devel gcc python-setuptools MySQL-python supervisor redis sshpass python-keyczar vim ansible-2.2.1.0 libyaml-devel make
+
+mkdir -p $COMPASS_DIR/compass
+touch $COMPASS_DIR/compass/__init__.py
+mv $COMPASS_DIR/actions $COMPASS_DIR/compass/
+mv $COMPASS_DIR/apiclient $COMPASS_DIR/compass/
+mv $COMPASS_DIR/tasks $COMPASS_DIR/compass/
+mv $COMPASS_DIR/utils $COMPASS_DIR/compass/
+mv $COMPASS_DIR/deployment $COMPASS_DIR/compass/
+mv $COMPASS_DIR/db $COMPASS_DIR/compass/
+mv $COMPASS_DIR/hdsdiscovery $COMPASS_DIR/compass/
+mv $COMPASS_DIR/log_analyzor $COMPASS_DIR/compass/
+
+easy_install --upgrade pip
+pip install --upgrade pip
+pip install --upgrade setuptools
+pip install --upgrade Flask
+
+mkdir -p /etc/compass/
+mkdir -p /etc/compass/machine_list
+mkdir -p /etc/compass/switch_list
+mkdir -p /var/log/compass
+mkdir -p /opt/ansible_callbacks
+mkdir -p /root/.ssh;
+echo "UserKnownHostsFile /dev/null" >> /root/.ssh/config;
+echo "StrictHostKeyChecking no" >> /root/.ssh/config
+
+cd $COMPASS_DIR
+python setup.py install
+cp supervisord.conf /etc/supervisord.conf
+cp start.sh /usr/local/bin/start.sh
+
+yum clean all
+
+set -x
diff --git a/compass-tasks/db/__init__.py b/compass-tasks/db/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/db/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/db/api/__init__.py b/compass-tasks/db/api/__init__.py
new file mode 100644
index 0000000..5e42ae9
--- /dev/null
+++ b/compass-tasks/db/api/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/db/api/adapter.py b/compass-tasks/db/api/adapter.py
new file mode 100644
index 0000000..c3ad48d
--- /dev/null
+++ b/compass-tasks/db/api/adapter.py
@@ -0,0 +1,313 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Adapter related database operations."""
+import logging
+import re
+
+from compass.db.api import database
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+OSES = None
+OS_INSTALLERS = None
+PACKAGE_INSTALLERS = None
+ADAPTERS = None
+ADAPTERS_FLAVORS = None
+ADAPTERS_ROLES = None
+
+
+def _get_oses_from_configuration():
+ """Get all os configs from os configuration dir.
+
+ Example: {
+ <os_name>: {
+ 'name': <os_name>,
+ 'id': <os_name>,
+ 'os_id': <os_name>,
+ 'deployable': True
+ }
+ }
+ """
+ configs = util.load_configs(setting.OS_DIR)
+ systems = {}
+ for config in configs:
+ logging.info('get config %s', config)
+ system_name = config['NAME']
+ parent_name = config.get('PARENT', None)
+ system = {
+ 'name': system_name,
+ 'id': system_name,
+ 'os_id': system_name,
+ 'parent': parent_name,
+ 'parent_id': parent_name,
+ 'deployable': config.get('DEPLOYABLE', False)
+ }
+ systems[system_name] = system
+ parents = {}
+ for name, system in systems.items():
+ parent = system.get('parent', None)
+ parents[name] = parent
+ for name, system in systems.items():
+ util.recursive_merge_dict(name, systems, parents)
+ return systems
+
+
+def _get_installers_from_configuration(configs):
+ """Get installers from configurations.
+
+ Example: {
+ <installer_isntance>: {
+ 'alias': <instance_name>,
+ 'id': <instance_name>,
+ 'name': <name>,
+ 'settings': <dict pass to installer plugin>
+ }
+ }
+ """
+ installers = {}
+ for config in configs:
+ name = config['NAME']
+ instance_name = config.get('INSTANCE_NAME', name)
+ installers[instance_name] = {
+ 'alias': instance_name,
+ 'id': instance_name,
+ 'name': name,
+ 'settings': config.get('SETTINGS', {})
+ }
+ return installers
+
+
+def _get_os_installers_from_configuration():
+ """Get os installers from os installer config dir."""
+ configs = util.load_configs(setting.OS_INSTALLER_DIR)
+ return _get_installers_from_configuration(configs)
+
+
+def _get_package_installers_from_configuration():
+ """Get package installers from package installer config dir."""
+ configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR)
+ return _get_installers_from_configuration(configs)
+
+
+def _get_adapters_from_configuration():
+ """Get adapters from adapter config dir."""
+ configs = util.load_configs(setting.ADAPTER_DIR)
+ adapters = {}
+ for config in configs:
+ logging.info('add config %s to adapter', config)
+ if 'OS_INSTALLER' in config:
+ os_installer = OS_INSTALLERS[config['OS_INSTALLER']]
+ else:
+ os_installer = None
+
+ if 'PACKAGE_INSTALLER' in config:
+ package_installer = PACKAGE_INSTALLERS[
+ config['PACKAGE_INSTALLER']
+ ]
+ else:
+ package_installer = None
+
+ adapter_name = config['NAME']
+ parent_name = config.get('PARENT', None)
+ adapter = {
+ 'name': adapter_name,
+ 'id': adapter_name,
+ 'parent': parent_name,
+ 'parent_id': parent_name,
+ 'display_name': config.get('DISPLAY_NAME', adapter_name),
+ 'os_installer': os_installer,
+ 'package_installer': package_installer,
+ 'deployable': config.get('DEPLOYABLE', False),
+ 'health_check_cmd': config.get('HEALTH_CHECK_COMMAND', None),
+ 'supported_oses': [],
+ 'roles': [],
+ 'flavors': []
+ }
+ supported_os_patterns = [
+ re.compile(supported_os_pattern)
+ for supported_os_pattern in config.get('SUPPORTED_OS_PATTERNS', [])
+ ]
+ for os_name, os in OSES.items():
+ if not os.get('deployable', False):
+ continue
+ for supported_os_pattern in supported_os_patterns:
+ if supported_os_pattern.match(os_name):
+ adapter['supported_oses'].append(os)
+ break
+ adapters[adapter_name] = adapter
+
+ parents = {}
+ for name, adapter in adapters.items():
+ parent = adapter.get('parent', None)
+ parents[name] = parent
+ for name, adapter in adapters.items():
+ util.recursive_merge_dict(name, adapters, parents)
+ return adapters
+
+
+def _add_roles_from_configuration():
+ """Get roles from roles config dir and update to adapters."""
+ configs = util.load_configs(setting.ADAPTER_ROLE_DIR)
+ for config in configs:
+ logging.info(
+ 'add config %s to role', config
+ )
+ adapter_name = config['ADAPTER_NAME']
+ adapter = ADAPTERS[adapter_name]
+ adapter_roles = ADAPTERS_ROLES.setdefault(adapter_name, {})
+ for role_dict in config['ROLES']:
+ role_name = role_dict['role']
+ display_name = role_dict.get('display_name', role_name)
+ adapter_roles[role_name] = {
+ 'name': role_name,
+ 'id': '%s:%s' % (adapter_name, role_name),
+ 'adapter_id': adapter_name,
+ 'adapter_name': adapter_name,
+ 'display_name': display_name,
+ 'description': role_dict.get('description', display_name),
+ 'optional': role_dict.get('optional', False)
+ }
+ parents = {}
+ for name, adapter in ADAPTERS.items():
+ parent = adapter.get('parent', None)
+ parents[name] = parent
+ for adapter_name, adapter_roles in ADAPTERS_ROLES.items():
+ util.recursive_merge_dict(adapter_name, ADAPTERS_ROLES, parents)
+ for adapter_name, adapter_roles in ADAPTERS_ROLES.items():
+ adapter = ADAPTERS[adapter_name]
+ adapter['roles'] = adapter_roles.values()
+
+
+def _add_flavors_from_configuration():
+ """Get flavors from flavor config dir and update to adapters."""
+ configs = util.load_configs(setting.ADAPTER_FLAVOR_DIR)
+ for config in configs:
+ logging.info('add config %s to flavor', config)
+ adapter_name = config['ADAPTER_NAME']
+ adapter = ADAPTERS[adapter_name]
+ adapter_flavors = ADAPTERS_FLAVORS.setdefault(adapter_name, {})
+ adapter_roles = ADAPTERS_ROLES[adapter_name]
+ for flavor_dict in config['FLAVORS']:
+ flavor_name = flavor_dict['flavor']
+ flavor_id = '%s:%s' % (adapter_name, flavor_name)
+ flavor = {
+ 'name': flavor_name,
+ 'id': flavor_id,
+ 'adapter_id': adapter_name,
+ 'adapter_name': adapter_name,
+ 'display_name': flavor_dict.get('display_name', flavor_name),
+ 'template': flavor_dict.get('template', None)
+ }
+ flavor_roles = flavor_dict.get('roles', [])
+ roles_in_flavor = []
+ for flavor_role in flavor_roles:
+ if isinstance(flavor_role, basestring):
+ role_name = flavor_role
+ role_in_flavor = {
+ 'name': role_name,
+ 'flavor_id': flavor_id
+ }
+ else:
+ role_in_flavor = flavor_role
+ role_in_flavor['flavor_id'] = flavor_id
+ if 'role' in role_in_flavor:
+ role_in_flavor['name'] = role_in_flavor['role']
+ del role_in_flavor['role']
+ role_name = role_in_flavor['name']
+ role = adapter_roles[role_name]
+ util.merge_dict(role_in_flavor, role, override=False)
+ roles_in_flavor.append(role_in_flavor)
+ flavor['roles'] = roles_in_flavor
+ adapter_flavors[flavor_name] = flavor
+ parents = {}
+ for name, adapter in ADAPTERS.items():
+ parent = adapter.get('parent', None)
+ parents[name] = parent
+ for adapter_name, adapter_roles in ADAPTERS_FLAVORS.items():
+ util.recursive_merge_dict(adapter_name, ADAPTERS_FLAVORS, parents)
+ for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items():
+ adapter = ADAPTERS[adapter_name]
+ adapter['flavors'] = adapter_flavors.values()
+
+
+def load_adapters_internal(force_reload=False):
+ """Load adapter related configurations into memory.
+
+ If force_reload, reload all configurations even it is loaded already.
+ """
+ global OSES
+ if force_reload or OSES is None:
+ OSES = _get_oses_from_configuration()
+ global OS_INSTALLERS
+ if force_reload or OS_INSTALLERS is None:
+ OS_INSTALLERS = _get_os_installers_from_configuration()
+ global PACKAGE_INSTALLERS
+ if force_reload or PACKAGE_INSTALLERS is None:
+ PACKAGE_INSTALLERS = _get_package_installers_from_configuration()
+ global ADAPTERS
+ if force_reload or ADAPTERS is None:
+ ADAPTERS = _get_adapters_from_configuration()
+ global ADAPTERS_ROLES
+ if force_reload or ADAPTERS_ROLES is None:
+ ADAPTERS_ROLES = {}
+ _add_roles_from_configuration()
+ global ADAPTERS_FLAVORS
+ if force_reload or ADAPTERS_FLAVORS is None:
+ ADAPTERS_FLAVORS = {}
+ _add_flavors_from_configuration()
+
+
+def get_adapters_internal(force_reload=False):
+ """Get all deployable adapters."""
+ load_adapters_internal(force_reload=force_reload)
+ adapter_mapping = {}
+ for adapter_name, adapter in ADAPTERS.items():
+ if adapter.get('deployable'):
+ # TODO(xicheng): adapter should be filtered before
+ # return to caller.
+ adapter_mapping[adapter_name] = adapter
+ else:
+ logging.info(
+ 'ignore adapter %s since it is not deployable',
+ adapter_name
+ )
+ return adapter_mapping
+
+
+def get_flavors_internal(force_reload=False):
+ """Get all deployable flavors."""
+ load_adapters_internal(force_reload=force_reload)
+ adapter_flavor_mapping = {}
+ for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items():
+ adapter = ADAPTERS.get(adapter_name, {})
+ for flavor_name, flavor in adapter_flavors.items():
+ if adapter.get('deployable'):
+ # TODO(xicheng): flavor dict should be filtered before
+ # return to caller.
+ adapter_flavor_mapping.setdefault(
+ adapter_name, {}
+ )[flavor_name] = flavor
+ else:
+ logging.info(
+ 'ignore adapter %s since it is not deployable',
+ adapter_name
+ )
+
+ return adapter_flavor_mapping
diff --git a/compass-tasks/db/api/adapter_holder.py b/compass-tasks/db/api/adapter_holder.py
new file mode 100644
index 0000000..91c65c4
--- /dev/null
+++ b/compass-tasks/db/api/adapter_holder.py
@@ -0,0 +1,155 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Adapter related object holder."""
+import logging
+
+from compass.db.api import adapter as adapter_api
+from compass.db.api import database
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+
+
+SUPPORTED_FIELDS = [
+ 'name',
+]
+RESP_FIELDS = [
+ 'id', 'name', 'roles', 'flavors',
+ 'os_installer', 'package_installer',
+ 'supported_oses', 'display_name', 'health_check_cmd'
+]
+RESP_OS_FIELDS = [
+ 'id', 'name', 'os_id'
+]
+RESP_ROLES_FIELDS = [
+ 'id', 'name', 'display_name', 'description', 'optional'
+]
+RESP_FLAVORS_FIELDS = [
+ 'id', 'adapter_id', 'adapter_name', 'name', 'display_name',
+ 'template', 'roles'
+]
+
+
+ADAPTER_MAPPING = None
+FLAVOR_MAPPING = None
+
+
+def load_adapters(force_reload=False):
+ global ADAPTER_MAPPING
+ if force_reload or ADAPTER_MAPPING is None:
+ logging.info('load adapters into memory')
+ ADAPTER_MAPPING = adapter_api.get_adapters_internal(
+ force_reload=force_reload
+ )
+
+
+def load_flavors(force_reload=False):
+ global FLAVOR_MAPPING
+ if force_reload or FLAVOR_MAPPING is None:
+ logging.info('load flavors into memory')
+ FLAVOR_MAPPING = {}
+ adapters_flavors = adapter_api.get_flavors_internal(
+ force_reload=force_reload
+ )
+ for adapter_name, adapter_flavors in adapters_flavors.items():
+ for flavor_name, flavor in adapter_flavors.items():
+ FLAVOR_MAPPING['%s:%s' % (adapter_name, flavor_name)] = flavor
+
+
+def _filter_adapters(adapter_config, filter_name, filter_value):
+ if filter_name not in adapter_config:
+ return False
+ if isinstance(filter_value, list):
+ return bool(
+ adapter_config[filter_name] in filter_value
+ )
+ elif isinstance(filter_value, dict):
+ return all([
+ _filter_adapters(
+ adapter_config[filter_name],
+ sub_filter_key, sub_filter_value
+ )
+ for sub_filter_key, sub_filter_value in filter_value.items()
+ ])
+ else:
+ return adapter_config[filter_name] == filter_value
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_ADAPTERS
+)
+@utils.output_filters(name=utils.general_filter_callback)
+@utils.wrap_to_dict(
+ RESP_FIELDS,
+ supported_oses=RESP_OS_FIELDS,
+ roles=RESP_ROLES_FIELDS,
+ flavors=RESP_FLAVORS_FIELDS
+)
+def list_adapters(user=None, session=None, **filters):
+ """list adapters."""
+ load_adapters()
+ return ADAPTER_MAPPING.values()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_ADAPTERS
+)
+@utils.wrap_to_dict(
+ RESP_FIELDS,
+ supported_oses=RESP_OS_FIELDS,
+ roles=RESP_ROLES_FIELDS,
+ flavors=RESP_FLAVORS_FIELDS
+)
+def get_adapter(adapter_id, user=None, session=None, **kwargs):
+ """get adapter."""
+ load_adapters()
+ if adapter_id not in ADAPTER_MAPPING:
+ raise exception.RecordNotExists(
+ 'adpater %s does not exist' % adapter_id
+ )
+ return ADAPTER_MAPPING[adapter_id]
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
+def list_flavors(user=None, session=None, **filters):
+ """List flavors."""
+ load_flavors()
+ return FLAVOR_MAPPING.values()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
+def get_flavor(flavor_id, user=None, session=None, **kwargs):
+ """Get flavor."""
+ load_flavors()
+ if flavor_id not in FLAVOR_MAPPING:
+ raise exception.RecordNotExists(
+ 'flavor %s does not exist' % flavor_id
+ )
+ return FLAVOR_MAPPING[flavor_id]
diff --git a/compass-tasks/db/api/cluster.py b/compass-tasks/db/api/cluster.py
new file mode 100644
index 0000000..7a7022c
--- /dev/null
+++ b/compass-tasks/db/api/cluster.py
@@ -0,0 +1,2444 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Cluster database operations."""
+import copy
+import functools
+import logging
+import re
+
+from compass.db.api import adapter_holder as adapter_api
+from compass.db.api import database
+from compass.db.api import metadata_holder as metadata_api
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = [
+ 'name', 'os_name', 'owner',
+ 'adapter_name', 'flavor_name'
+]
+SUPPORTED_CLUSTERHOST_FIELDS = []
+RESP_FIELDS = [
+ 'id', 'name', 'os_name', 'os_id', 'adapter_id', 'flavor_id',
+ 'reinstall_distributed_system', 'flavor',
+ 'distributed_system_installed',
+ 'owner', 'adapter_name', 'flavor_name',
+ 'created_at', 'updated_at'
+]
+RESP_CLUSTERHOST_FIELDS = [
+ 'id', 'host_id', 'clusterhost_id', 'machine_id',
+ 'name', 'hostname', 'roles', 'os_installer',
+ 'cluster_id', 'clustername', 'location', 'tag',
+ 'networks', 'mac', 'switch_ip', 'port', 'switches',
+ 'os_installed', 'distributed_system_installed',
+ 'os_name', 'os_id', 'ip',
+ 'reinstall_os', 'reinstall_distributed_system',
+ 'owner', 'cluster_id',
+ 'created_at', 'updated_at',
+ 'patched_roles'
+]
+RESP_CONFIG_FIELDS = [
+ 'os_config',
+ 'package_config',
+ 'config_step',
+ 'config_validated',
+ 'created_at',
+ 'updated_at'
+]
+RESP_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config',
+ 'deployed_package_config',
+ 'created_at',
+ 'updated_at'
+]
+RESP_METADATA_FIELDS = [
+ 'os_config', 'package_config'
+]
+RESP_CLUSTERHOST_CONFIG_FIELDS = [
+ 'package_config',
+ 'os_config',
+ 'config_step',
+ 'config_validated',
+ 'networks',
+ 'created_at',
+ 'updated_at'
+]
+RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config',
+ 'deployed_package_config',
+ 'created_at',
+ 'updated_at'
+]
+RESP_STATE_FIELDS = [
+ 'id', 'state', 'percentage', 'message', 'severity',
+ 'status', 'ready',
+ 'created_at', 'updated_at'
+]
+RESP_CLUSTERHOST_STATE_FIELDS = [
+ 'id', 'state', 'percentage', 'message', 'severity',
+ 'ready', 'created_at', 'updated_at'
+]
+RESP_REVIEW_FIELDS = [
+ 'cluster', 'hosts'
+]
+RESP_DEPLOY_FIELDS = [
+ 'status', 'cluster', 'hosts'
+]
+IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
+ADDED_FIELDS = ['name', 'adapter_id', 'os_id']
+OPTIONAL_ADDED_FIELDS = ['flavor_id']
+UPDATED_FIELDS = ['name', 'reinstall_distributed_system']
+ADDED_HOST_FIELDS = ['machine_id']
+UPDATED_HOST_FIELDS = ['name', 'reinstall_os']
+UPDATED_CLUSTERHOST_FIELDS = ['roles', 'patched_roles']
+PATCHED_CLUSTERHOST_FIELDS = ['patched_roles']
+UPDATED_CONFIG_FIELDS = [
+ 'put_os_config', 'put_package_config', 'config_step'
+]
+UPDATED_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config', 'deployed_package_config'
+]
+PATCHED_CONFIG_FIELDS = [
+ 'patched_os_config', 'patched_package_config', 'config_step'
+]
+UPDATED_CLUSTERHOST_CONFIG_FIELDS = [
+ 'put_os_config',
+ 'put_package_config'
+]
+PATCHED_CLUSTERHOST_CONFIG_FIELDS = [
+ 'patched_os_config',
+ 'patched_package_config'
+]
+UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config',
+ 'deployed_package_config'
+]
+UPDATED_CLUSTERHOST_STATE_FIELDS = [
+ 'state', 'percentage', 'message', 'severity'
+]
+UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS = [
+ 'ready'
+]
+UPDATED_CLUSTER_STATE_FIELDS = ['state']
+IGNORE_UPDATED_CLUSTER_STATE_FIELDS = ['percentage', 'message', 'severity']
+UPDATED_CLUSTER_STATE_INTERNAL_FIELDS = ['ready']
+RESP_CLUSTERHOST_LOG_FIELDS = [
+ 'clusterhost_id', 'id', 'host_id', 'cluster_id',
+ 'filename', 'position', 'partial_line',
+ 'percentage',
+ 'message', 'severity', 'line_matcher_name'
+]
+ADDED_CLUSTERHOST_LOG_FIELDS = [
+ 'filename'
+]
+UPDATED_CLUSTERHOST_LOG_FIELDS = [
+ 'position', 'partial_line', 'percentage',
+ 'message', 'severity', 'line_matcher_name'
+]
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_clusters(user=None, session=None, **filters):
+ """List clusters."""
+ clusters = utils.list_db_objects(
+ session, models.Cluster, **filters
+ )
+ logging.info('user is %s', user.email)
+ if not user.is_admin and len(clusters):
+ clusters = [c for c in clusters if c.owner == user.email]
+ return clusters
+
+
+def _get_cluster(cluster_id, session=None, **kwargs):
+ """Get cluster by id."""
+ if isinstance(cluster_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Cluster, id=cluster_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'cluster id %s type is not int compatible' % cluster_id
+ )
+
+
+def get_cluster_internal(cluster_id, session=None, **kwargs):
+ """Helper function to get cluster.
+
+ Should be only used by other files under db/api.
+ """
+ return _get_cluster(cluster_id, session=session, **kwargs)
+
+
+def _get_cluster_host(
+ cluster_id, host_id, session=None, **kwargs
+):
+ """Get clusterhost by cluster id and host id."""
+ cluster = _get_cluster(cluster_id, session=session, **kwargs)
+ from compass.db.api import host as host_api
+ host = host_api.get_host_internal(host_id, session=session, **kwargs)
+ return utils.get_db_object(
+ session, models.ClusterHost,
+ cluster_id=cluster.id,
+ host_id=host.id,
+ **kwargs
+ )
+
+
+def _get_clusterhost(clusterhost_id, session=None, **kwargs):
+ """Get clusterhost by clusterhost id."""
+ if isinstance(clusterhost_id, (int, long)):
+ return utils.get_db_object(
+ session, models.ClusterHost,
+ clusterhost_id=clusterhost_id,
+ **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'clusterhost id %s type is not int compatible' % clusterhost_id
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_cluster(
+ cluster_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get cluster info."""
+ return _get_cluster(
+ cluster_id,
+ session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERS)
+def is_cluster_os_ready(
+ cluster_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ cluster = utils.get_db_object(
+ session, models.Cluster, exception_when_missing, id=cluster_id)
+
+ all_states = ([i.host.state.ready for i in cluster.clusterhosts])
+
+ logging.info("is_cluster_os_ready: all_states %s" % all_states)
+
+ return all(all_states)
+
+
+def check_cluster_validated(cluster):
+ """Check cluster is validated."""
+ if not cluster.config_validated:
+ raise exception.Forbidden(
+ 'cluster %s is not validated' % cluster.name
+ )
+
+
+def check_clusterhost_validated(clusterhost):
+ """Check clusterhost is validated."""
+ if not clusterhost.config_validated:
+ raise exception.Forbidden(
+ 'clusterhost %s is not validated' % clusterhost.name
+ )
+
+
+def check_cluster_editable(
+ cluster, user=None,
+ check_in_installing=False
+):
+ """Check if cluster is editable.
+
+ If we try to set cluster
+ reinstall_distributed_system attribute or any
+ checking to make sure the cluster is not in installing state,
+ we can set check_in_installing to True.
+ Otherwise we will make sure the cluster is not in deploying or
+ deployed.
+ If user is not admin or not the owner of the cluster, the check
+ will fail to make sure he can not update the cluster attributes.
+ """
+ if check_in_installing:
+ if cluster.state.state == 'INSTALLING':
+ raise exception.Forbidden(
+ 'cluster %s is not editable '
+ 'when state is installing' % cluster.name
+ )
+# elif (
+# cluster.flavor_name and
+# not cluster.reinstall_distributed_system
+# ):
+# raise exception.Forbidden(
+# 'cluster %s is not editable '
+# 'when not to be reinstalled' % cluster.name
+# )
+ if user and not user.is_admin and cluster.creator_id != user.id:
+ raise exception.Forbidden(
+ 'cluster %s is not editable '
+ 'when user is not admin or cluster owner' % cluster.name
+ )
+
+
+def is_cluster_editable(
+ cluster, user=None,
+ check_in_installing=False
+):
+ """Get if cluster is editble."""
+ try:
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=check_in_installing
+ )
+ return True
+ except exception.Forbidden:
+ return False
+
+
+@utils.supported_filters(
+ ADDED_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(name=utils.check_name)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_cluster(
+ exception_when_existing=True,
+ name=None, adapter_id=None, flavor_id=None,
+ user=None, session=None, **kwargs
+):
+ """Create a cluster."""
+ adapter = adapter_api.get_adapter(
+ adapter_id, user=user, session=session
+ )
+ # if flavor_id is not None, also set flavor field.
+ # In future maybe we can move the use of flavor from
+ # models.py to db/api and explictly get flavor when
+ # needed instead of setting flavor into cluster record.
+ flavor = {}
+ if flavor_id:
+ flavor = adapter_api.get_flavor(
+ flavor_id,
+ user=user, session=session
+ )
+ if flavor['adapter_id'] != adapter['id']:
+ raise exception.InvalidParameter(
+ 'flavor %s is not of adapter %s' % (
+ flavor_id, adapter_id
+ )
+ )
+
+ cluster = utils.add_db_object(
+ session, models.Cluster, exception_when_existing,
+ name, user.id, adapter_id=adapter_id,
+ flavor_id=flavor_id, flavor=flavor, **kwargs
+ )
+ return cluster
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(name=utils.check_name)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_cluster(cluster_id, user=None, session=None, **kwargs):
+ """Update a cluster."""
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=(
+ kwargs.get('reinstall_distributed_system', False)
+ )
+ )
+ return utils.update_db_object(session, cluster, **kwargs)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_FIELDS + ['status', 'cluster', 'hosts'],
+ cluster=RESP_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def del_cluster(
+ cluster_id, force=False, from_database_only=False,
+ delete_underlying_host=False, user=None, session=None, **kwargs
+):
+ """Delete a cluster.
+
+ If force, the cluster will be deleted anyway. It is used by cli to
+ force clean a cluster in any case.
+ If from_database_only, the cluster recored will only be removed from
+ database. Otherwise, a del task is sent to celery to do clean deletion.
+ If delete_underlying_host, all hosts under this cluster will also be
+ deleted.
+ The backend will call del_cluster again with from_database_only set
+ when it has done the deletion work on os installer/package installer.
+ """
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ logging.debug(
+ 'delete cluster %s with force=%s '
+ 'from_database_only=%s delete_underlying_host=%s',
+ cluster.id, force, from_database_only, delete_underlying_host
+ )
+ # force set cluster state to ERROR and the state of any clusterhost
+ # in the cluster to ERROR when we want to delete the cluster anyway
+ # even the cluster is in installing or already installed.
+ # It let the api know the deleting is in doing when backend is doing
+ # the real deleting.
+ # In future we may import a new state like INDELETE to indicate
+ # the deleting is processing.
+ # We need discuss about if we can delete a cluster when it is already
+ # installed by api.
+ for clusterhost in cluster.clusterhosts:
+ if clusterhost.state.state != 'UNINITIALIZED' and force:
+ clusterhost.state.state = 'ERROR'
+ if delete_underlying_host:
+ host = clusterhost.host
+ if host.state.state != 'UNINITIALIZED' and force:
+ host.state.state = 'ERROR'
+ if cluster.state.state != 'UNINITIALIZED' and force:
+ cluster.state.state = 'ERROR'
+
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+
+ # delete underlying host if delete_underlying_host is set.
+ if delete_underlying_host:
+ for clusterhost in cluster.clusterhosts:
+ # delete underlying host only user has permission.
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ if host_api.is_host_editable(
+ host, user=user, check_in_installing=True
+ ):
+ # Delete host record directly in database when there is no need
+ # to do the deletion in backend or from_database_only is set.
+ if host.state.state == 'UNINITIALIZED' or from_database_only:
+ utils.del_db_object(
+ session, host
+ )
+
+ # Delete cluster record directly in database when there
+ # is no need to do the deletion in backend or from_database_only is set.
+ if cluster.state.state == 'UNINITIALIZED' or from_database_only:
+ return utils.del_db_object(
+ session, cluster
+ )
+ else:
+ from compass.tasks import client as celery_client
+ logging.info('send del cluster %s task to celery', cluster_id)
+ celery_client.celery.send_task(
+ 'compass.tasks.delete_cluster',
+ (
+ user.email, cluster.id,
+ [
+ clusterhost.host_id
+ for clusterhost in cluster.clusterhosts
+ ],
+ delete_underlying_host
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'delete action is sent',
+ 'cluster': cluster,
+ 'hosts': cluster.clusterhosts
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def get_cluster_config(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster config."""
+ return _get_cluster(cluster_id, session=session)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
+def get_cluster_deployed_config(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster deployed config."""
+ return _get_cluster(cluster_id, session=session)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_cluster_metadata(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster metadata.
+
+ If no flavor in the cluster, it means this is a os only cluster.
+ We ignore package metadata for os only cluster.
+ """
+ cluster = _get_cluster(cluster_id, session=session)
+ metadatas = {}
+ os_name = cluster.os_name
+ if os_name:
+ metadatas.update(
+ metadata_api.get_os_metadata(
+ os_name, session=session
+ )
+ )
+ flavor_id = cluster.flavor_id
+ if flavor_id:
+ metadatas.update(
+ metadata_api.get_flavor_metadata(
+ flavor_id,
+ user=user, session=session
+ )
+ )
+
+ return metadatas
+
+
+def _cluster_os_config_validates(
+ config, cluster, session=None, user=None, **kwargs
+):
+ """Check cluster os config validation."""
+ metadata_api.validate_os_config(
+ config, cluster.os_id
+ )
+
+
+def _cluster_package_config_validates(
+ config, cluster, session=None, user=None, **kwargs
+):
+ """Check cluster package config validation."""
+ metadata_api.validate_flavor_config(
+ config, cluster.flavor_id
+ )
+
+
+@utils.input_validates_with_args(
+ put_os_config=_cluster_os_config_validates,
+ put_package_config=_cluster_package_config_validates
+)
+@utils.output_validates_with_args(
+ os_config=_cluster_os_config_validates,
+ package_config=_cluster_package_config_validates
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def _update_cluster_config(cluster, session=None, user=None, **kwargs):
+ """Update a cluster config."""
+ check_cluster_editable(cluster, user=user)
+ return utils.update_db_object(
+ session, cluster, **kwargs
+ )
+
+
+# replace os_config to deployed_os_config,
+# package_config to deployed_package_config
+@utils.replace_filters(
+ os_config='deployed_os_config',
+ package_config='deployed_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_DEPLOYED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
+def update_cluster_deployed_config(
+ cluster_id, user=None, session=None, **kwargs
+):
+ """Update cluster deployed config."""
+ cluster = _get_cluster(cluster_id, session=session)
+ check_cluster_editable(cluster, user=user)
+ check_cluster_validated(cluster)
+ return utils.update_db_object(
+ session, cluster, **kwargs
+ )
+
+
+# replace os_config to put_os_config,
+# package_config to put_package_config in kwargs.
+# It tells db these fields will be updated not patched.
+@utils.replace_filters(
+ os_config='put_os_config',
+ package_config='put_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER_CONFIG
+)
+def update_cluster_config(cluster_id, user=None, session=None, **kwargs):
+ """Update cluster config."""
+ cluster = _get_cluster(cluster_id, session=session)
+ return _update_cluster_config(
+ cluster, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to patched_os_config and
+# package_config to patched_package_config in kwargs.
+# It tells db these fields will be patched not updated.
+@utils.replace_filters(
+ os_config='patched_os_config',
+ package_config='patched_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTER_CONFIG
+)
+def patch_cluster_config(cluster_id, user=None, session=None, **kwargs):
+ """patch cluster config."""
+ cluster = _get_cluster(cluster_id, session=session)
+ return _update_cluster_config(
+ cluster, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTER_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def del_cluster_config(cluster_id, user=None, session=None):
+ """Delete a cluster config."""
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ check_cluster_editable(cluster, user=user)
+ return utils.update_db_object(
+ session, cluster, os_config={},
+ package_config={}, config_validated=False
+ )
+
+
+def _roles_validates(roles, cluster, session=None, user=None):
+ """Check roles is validated to a cluster's roles."""
+ if roles:
+ if not cluster.flavor_name:
+ raise exception.InvalidParameter(
+ 'not flavor in cluster %s' % cluster.name
+ )
+ cluster_roles = [role['name'] for role in cluster.flavor['roles']]
+ for role in roles:
+ if role not in cluster_roles:
+ raise exception.InvalidParameter(
+ 'role %s is not in cluster roles %s' % (
+ role, cluster_roles
+ )
+ )
+
+
+def _cluster_host_roles_validates(
+ value, cluster, host, session=None, user=None, **kwargs
+):
+ """Check clusterhost roles is validated by cluster and host."""
+ _roles_validates(value, cluster, session=session, user=user)
+
+
+def _clusterhost_roles_validates(
+ value, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost roles is validated by clusterhost."""
+ _roles_validates(
+ value, clusterhost.cluster, session=session, user=user
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_HOST_FIELDS,
+ ignore_support_keys=UPDATED_CLUSTERHOST_FIELDS
+)
+@utils.input_validates(name=utils.check_name)
+def _add_host_if_not_exist(
+ machine_id, cluster, session=None, user=None, **kwargs
+):
+ """Add underlying host if it does not exist."""
+ from compass.db.api import host as host_api
+ host = host_api.get_host_internal(
+ machine_id, session=session, exception_when_missing=False
+ )
+ if host:
+ if kwargs:
+ # ignore update underlying host if host is not editable.
+ from compass.db.api import host as host_api
+ if host_api.is_host_editable(
+ host, user=cluster.creator,
+ check_in_installing=kwargs.get('reinstall_os', False),
+ ):
+ utils.update_db_object(
+ session, host,
+ **kwargs
+ )
+ else:
+ logging.debug(
+ 'ignore update host host %s '
+ 'since it is not editable' % host.name
+ )
+ else:
+ logging.debug('nothing to update for host %s', host.name)
+ else:
+ from compass.db.api import adapter_holder as adapter_api
+ adapter = adapter_api.get_adapter(
+ cluster.adapter_name, user=user, session=session
+ )
+ host = utils.add_db_object(
+ session, models.Host, False, machine_id,
+ os_name=cluster.os_name,
+ os_installer=adapter['os_installer'],
+ creator=cluster.creator,
+ **kwargs
+ )
+ return host
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=UPDATED_HOST_FIELDS
+)
+@utils.input_validates_with_args(
+ roles=_cluster_host_roles_validates
+)
+def _add_clusterhost_only(
+ cluster, host,
+ exception_when_existing=False,
+ session=None, user=None,
+ **kwargs
+):
+ """Get clusterhost only."""
+ if not cluster.state.state == "UNINITIALIZED":
+ cluster.state.ready = False
+ cluster.state.state = "UNINITIALIZED"
+ cluster.state.percentage = 0.0
+ utils.update_db_object(session, cluster.state, state="UNINITIALIZED")
+
+ return utils.add_db_object(
+ session, models.ClusterHost, exception_when_existing,
+ cluster.id, host.id, **kwargs
+ )
+
+
+@utils.supported_filters(
+ ADDED_HOST_FIELDS,
+ optional_support_keys=UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+def _add_clusterhost(
+ cluster,
+ exception_when_existing=False,
+ session=None, user=None, machine_id=None, **kwargs
+):
+ """Add clusterhost and add underlying host if it does not exist."""
+ host = _add_host_if_not_exist(
+ machine_id, cluster, session=session,
+ user=user, **kwargs
+ )
+
+ return _add_clusterhost_only(
+ cluster, host, exception_when_existing=exception_when_existing,
+ session=session, user=user, **kwargs
+ )
+
+
+def _add_clusterhosts(cluster, machines, session=None, user=None):
+ """Add machines to cluster.
+
+ Args:
+ machines: list of dict which contains clusterost attr to update.
+
+ Examples:
+ [{'machine_id': 1, 'name': 'host1'}]
+ """
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ if cluster.state.state == 'SUCCESSFUL':
+ cluster.state.state == 'UPDATE_PREPARING'
+ for machine_dict in machines:
+ _add_clusterhost(
+ cluster, session=session, user=user, **machine_dict
+ )
+
+
+def _remove_clusterhosts(cluster, hosts, session=None, user=None):
+ """Remove hosts from cluster.
+
+ Args:
+ hosts: list of host id.
+ """
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ utils.del_db_objects(
+ session, models.ClusterHost,
+ cluster_id=cluster.id, host_id=hosts
+ )
+
+
+def _set_clusterhosts(cluster, machines, session=None, user=None):
+ """set machines to cluster.
+
+ Args:
+ machines: list of dict which contains clusterost attr to update.
+
+ Examples:
+ [{'machine_id': 1, 'name': 'host1'}]
+ """
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ utils.del_db_objects(
+ session, models.ClusterHost,
+ cluster_id=cluster.id
+ )
+ if cluster.state.state == 'SUCCESSFUL':
+ cluster.state.state = 'UPDATE_PREPARING'
+ for machine_dict in machines:
+ _add_clusterhost(
+ cluster, True, session=session, user=user, **machine_dict
+ )
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_CLUSTERHOST_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def list_cluster_hosts(cluster_id, user=None, session=None, **filters):
+ """List clusterhosts of a cluster."""
+ cluster = _get_cluster(cluster_id, session=session)
+ return utils.list_db_objects(
+ session, models.ClusterHost, cluster_id=cluster.id,
+ **filters
+ )
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_CLUSTERHOST_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def list_clusterhosts(user=None, session=None, **filters):
+ """List all clusterhosts."""
+ return utils.list_db_objects(
+ session, models.ClusterHost, **filters
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def get_cluster_host(
+ cluster_id, host_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get clusterhost info by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session,
+ exception_when_missing=exception_when_missing,
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def get_clusterhost(
+ clusterhost_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get clusterhost info by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session,
+ exception_when_missing=exception_when_missing,
+ user=user
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def add_cluster_host(
+ cluster_id, exception_when_existing=True,
+ user=None, session=None, **kwargs
+):
+ """Add a host to a cluster."""
+ cluster = _get_cluster(cluster_id, session=session)
+ check_cluster_editable(
+ cluster, user=user,
+ check_in_installing=True
+ )
+ if cluster.state.state == 'SUCCESSFUL':
+ cluster.state.state = 'UPDATE_PREPARING'
+ return _add_clusterhost(
+ cluster, exception_when_existing,
+ session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_HOST_FIELDS,
+ ignore_support_keys=(
+ UPDATED_CLUSTERHOST_FIELDS +
+ PATCHED_CLUSTERHOST_FIELDS
+ )
+)
+def _update_host_if_necessary(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """Update underlying host if there is something to update."""
+ host = clusterhost.host
+ if kwargs:
+ # ignore update underlying host if the host is not editable.
+ from compass.db.api import host as host_api
+ if host_api.is_host_editable(
+ host, user=clusterhost.cluster.creator,
+ check_in_installing=kwargs.get('reinstall_os', False),
+ ):
+ utils.update_db_object(
+ session, host,
+ **kwargs
+ )
+ else:
+ logging.debug(
+ 'ignore update host %s since it is not editable' % host.name
+ )
+ else:
+ logging.debug(
+ 'nothing to update for host %s', host.name
+ )
+ return host
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_CLUSTERHOST_FIELDS +
+ PATCHED_CLUSTERHOST_FIELDS
+ ),
+ ignore_support_keys=UPDATED_HOST_FIELDS
+)
+@utils.input_validates_with_args(
+ roles=_clusterhost_roles_validates,
+ patched_roles=_clusterhost_roles_validates
+)
+def _update_clusterhost_only(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """Update clusterhost only."""
+ check_cluster_editable(clusterhost.cluster, user=user)
+ return utils.update_db_object(
+ session, clusterhost, **kwargs
+ )
+
+
+@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
+def _update_clusterhost(clusterhost, session=None, user=None, **kwargs):
+ """Update clusterhost and underlying host if necessary."""
+ _update_host_if_necessary(
+ clusterhost, session=session, user=user, **kwargs
+ )
+ return _update_clusterhost_only(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def update_cluster_host(
+ cluster_id, host_id, user=None,
+ session=None, **kwargs
+):
+ """Update clusterhost by cluster id and host id."""
+ logging.info('updating kwargs: %s', kwargs)
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(UPDATED_HOST_FIELDS + UPDATED_CLUSTERHOST_FIELDS),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def update_clusterhost(
+ clusterhost_id, user=None,
+ session=None, **kwargs
+):
+ """Update clusterhost by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace roles to patched_roles in kwargs.
+# It tells db roles field will be patched.
+@utils.replace_filters(
+ roles='patched_roles'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def patch_cluster_host(
+ cluster_id, host_id, user=None,
+ session=None, **kwargs
+):
+ """Patch clusterhost by cluster id and host id."""
+ logging.info("kwargs are %s", kwargs)
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ updated_clusterhost = _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+ return updated_clusterhost
+
+
+# replace roles to patched_roles in kwargs.
+# It tells db roles field will be patched.
+@utils.replace_filters(
+ roles='patched_roles'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+def patch_clusterhost(
+ clusterhost_id, user=None, session=None,
+ **kwargs
+):
+ """Patch clusterhost by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTER_HOST
+)
+@utils.wrap_to_dict(
+ RESP_CLUSTERHOST_FIELDS + ['status', 'host'],
+ host=RESP_CLUSTERHOST_FIELDS
+)
+def _del_cluster_host(
+ clusterhost,
+ force=False, from_database_only=False,
+ delete_underlying_host=False, user=None,
+ session=None, **kwargs
+):
+ """delete clusterhost.
+
+ If force, the cluster host will be deleted anyway.
+ If from_database_only, the cluster host recored will only be
+ deleted from database. Otherwise a celery task sent to do
+ clean deletion.
+ If delete_underlying_host, the underlying host will also be deleted.
+ The backend will call _del_cluster_host again when the clusterhost is
+ deleted from os installer/package installer with from_database_only
+ set.
+ """
+ # force set clusterhost state to ERROR when we want to delete the
+ # clusterhost anyway even the clusterhost is in installing or already
+ # installed. It let the api know the deleting is in doing when backend
+ # is doing the real deleting. In future we may import a new state like
+ # INDELETE to indicate the deleting is processing.
+ # We need discuss about if we can delete a clusterhost when it is already
+ # installed by api.
+ if clusterhost.state.state != 'UNINITIALIZED' and force:
+ clusterhost.state.state = 'ERROR'
+ if not force:
+ check_cluster_editable(
+ clusterhost.cluster, user=user,
+ check_in_installing=True
+ )
+ # delete underlying host if delete_underlying_host is set.
+ if delete_underlying_host:
+ host = clusterhost.host
+ if host.state.state != 'UNINITIALIZED' and force:
+ host.state.state = 'ERROR'
+ # only delete the host when user have the permission to delete it.
+ import compass.db.api.host as host_api
+ if host_api.is_host_editable(
+ host, user=user,
+ check_in_installing=True
+ ):
+ # if there is no need to do the deletion by backend or
+ # from_database_only is set, we only delete the record
+ # in database.
+ if host.state.state == 'UNINITIALIZED' or from_database_only:
+ utils.del_db_object(
+ session, host
+ )
+
+ # if there is no need to do the deletion by backend or
+ # from_database_only is set, we only delete the record in database.
+ if clusterhost.state.state == 'UNINITIALIZED' or from_database_only:
+ return utils.del_db_object(
+ session, clusterhost
+ )
+ else:
+ logging.info(
+ 'send del cluster %s host %s task to celery',
+ clusterhost.cluster_id, clusterhost.host_id
+ )
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.delete_cluster_host',
+ (
+ user.email, clusterhost.cluster_id, clusterhost.host_id,
+ delete_underlying_host
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'delete action sent',
+ 'host': clusterhost,
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+def del_cluster_host(
+ cluster_id, host_id,
+ force=False, from_database_only=False,
+ delete_underlying_host=False, user=None,
+ session=None, **kwargs
+):
+ """Delete clusterhost by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _del_cluster_host(
+ clusterhost, force=force, from_database_only=from_database_only,
+ delete_underlying_host=delete_underlying_host, user=user,
+ session=session, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+def del_clusterhost(
+ clusterhost_id,
+ force=False, from_database_only=False,
+ delete_underlying_host=False, user=None,
+ session=None, **kwargs
+):
+ """Delete clusterhost by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _del_cluster_host(
+ clusterhost, force=force, from_database_only=from_database_only,
+ delete_underlying_host=delete_underlying_host, user=user,
+ session=session, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def get_cluster_host_config(
+ cluster_id, host_id, user=None,
+ session=None, **kwargs
+):
+ """Get clusterhost config by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS)
+def get_cluster_host_deployed_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost deployed config by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def get_clusterhost_config(clusterhost_id, user=None, session=None, **kwargs):
+ """Get clusterhost config by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS)
+def get_clusterhost_deployed_config(
+ clusterhost_id, user=None,
+ session=None, **kwargs
+):
+ """Get clusterhost deployed config by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ )
+
+
+def _clusterhost_os_config_validates(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Validate clusterhost's underlying host os config."""
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ host_api.check_host_editable(host, user=user)
+ metadata_api.validate_os_config(
+ config, host.os_id
+ )
+
+
+def _clusterhost_package_config_validates(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Validate clusterhost's cluster package config."""
+ cluster = clusterhost.cluster
+ check_cluster_editable(cluster, user=user)
+ metadata_api.validate_flavor_config(
+ config, cluster.flavor_id
+ )
+
+
+def _filter_clusterhost_host_editable(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Filter fields if the underlying host is not editable."""
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ return host_api.is_host_editable(host, user=user)
+
+
+@utils.input_filters(
+ put_os_config=_filter_clusterhost_host_editable,
+ patched_os_config=_filter_clusterhost_host_editable
+)
+@utils.input_validates_with_args(
+ put_os_config=_clusterhost_os_config_validates,
+ put_package_config=_clusterhost_package_config_validates
+)
+@utils.output_validates_with_args(
+ os_config=_clusterhost_os_config_validates,
+ package_config=_clusterhost_package_config_validates
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def _update_clusterhost_config(clusterhost, session=None, user=None, **kwargs):
+ """Update clusterhost config."""
+ return utils.update_db_object(
+ session, clusterhost, **kwargs
+ )
+
+
+def _clusterhost_host_validated(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost's underlying host is validated."""
+ from compass.db.api import host as host_api
+ host = clusterhost.host
+ host_api.check_host_editable(host, user=user)
+ host_api.check_host_validated(host)
+
+
+def _clusterhost_cluster_validated(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost's cluster is validated."""
+ cluster = clusterhost.cluster
+ check_cluster_editable(cluster, user=user)
+ check_clusterhost_validated(clusterhost)
+
+
+@utils.input_filters(
+ deployed_os_config=_filter_clusterhost_host_editable,
+)
+@utils.input_validates_with_args(
+ deployed_os_config=_clusterhost_host_validated,
+ deployed_package_config=_clusterhost_cluster_validated
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS)
+def _update_clusterhost_deployed_config(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """Update clusterhost deployed config."""
+ return utils.update_db_object(
+ session, clusterhost, **kwargs
+ )
+
+
+# replace os_config to put_os_config and
+# package_config to put_package_config in kwargs.
+# It tells db these fields will be updated not patched.
+@utils.replace_filters(
+ os_config='put_os_config',
+ package_config='put_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_cluster_host_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, user=user, session=session, **kwargs
+ )
+
+
+# replace os_config to deployed_os_config and
+# package_config to deployed_package_config in kwargs.
+@utils.replace_filters(
+ os_config='deployed_os_config',
+ package_config='deployed_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_cluster_host_deployed_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost deployed config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_deployed_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to put_os_config and
+# package_config to put_package_config in kwargs.
+# It tells db these fields will be updated not patched.
+@utils.replace_filters(
+ os_config='put_os_config',
+ package_config='put_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_clusterhost_config(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to deployed_os_config and
+# package_config to deployed_package_config in kwargs.
+@utils.replace_filters(
+ os_config='deployed_os_config',
+ package_config='deployed_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def update_clusterhost_deployed_config(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Update clusterhost deployed config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost_deployed_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to patched_os_config and
+# package_config to patched_package_config in kwargs
+# It tells db these fields will be patched not updated.
+@utils.replace_filters(
+ os_config='patched_os_config',
+ package_config='patched_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def patch_cluster_host_config(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """patch clusterhost config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to patched_os_config and
+# package_config to patched_package_config in kwargs
+# It tells db these fields will be patched not updated.
+@utils.replace_filters(
+ os_config='patched_os_config',
+ package_config='patched_package_config'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_CLUSTERHOST_CONFIG_FIELDS,
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_CLUSTERHOST_CONFIG
+)
+def patch_clusterhost_config(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """patch clusterhost config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _update_clusterhost_config(
+ clusterhost, session=session, user=user, **kwargs
+ )
+
+
+def _clusterhost_host_editable(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost underlying host is editable."""
+ from compass.db.api import host as host_api
+ host_api.check_host_editable(clusterhost.host, user=user)
+
+
+def _clusterhost_cluster_editable(
+ config, clusterhost, session=None, user=None, **kwargs
+):
+ """Check clusterhost's cluster is editable."""
+ check_cluster_editable(clusterhost.cluster, user=user)
+
+
+@utils.supported_filters(
+ optional_support_keys=['os_config', 'package_config']
+)
+@utils.input_filters(
+ os_config=_filter_clusterhost_host_editable,
+)
+@utils.output_validates_with_args(
+ package_config=_clusterhost_cluster_editable
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def _delete_clusterhost_config(
+ clusterhost, session=None, user=None, **kwargs
+):
+ """delete clusterhost config."""
+ return utils.update_db_object(
+ session, clusterhost, config_validated=False,
+ **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTERHOST_CONFIG
+)
+def delete_cluster_host_config(
+ cluster_id, host_id, user=None, session=None
+):
+ """Delete a clusterhost config by cluster id and host id."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _delete_clusterhost_config(
+ clusterhost, session=session, user=user,
+ os_config={}, package_config={}
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_CLUSTERHOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
+def delete_clusterhost_config(clusterhost_id, user=None, session=None):
+ """Delet a clusterhost config by clusterhost id."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ return _delete_clusterhost_config(
+ clusterhost, session=session, user=user,
+ os_config={}, package_config={}
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=['add_hosts', 'remove_hosts', 'set_hosts']
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_HOSTS
+)
+@utils.wrap_to_dict(
+ ['hosts'],
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def update_cluster_hosts(
+ cluster_id, add_hosts={}, set_hosts=None,
+ remove_hosts={}, user=None, session=None
+):
+ """Update cluster hosts."""
+ cluster = _get_cluster(cluster_id, session=session)
+ if remove_hosts:
+ _remove_clusterhosts(
+ cluster, session=session, user=user, **remove_hosts
+ )
+ if add_hosts:
+ _add_clusterhosts(
+ cluster, session=session, user=user, **add_hosts
+ )
+ if set_hosts is not None:
+ _set_clusterhosts(
+ cluster, session=session, user=user, **set_hosts
+ )
+
+ return {
+ 'hosts': list_cluster_hosts(cluster_id, session=session)
+ }
+
+
+def validate_clusterhost(clusterhost, session=None):
+ """validate clusterhost."""
+ roles = clusterhost.roles
+ if not roles:
+ if clusterhost.cluster.flavor_name:
+ raise exception.InvalidParameter(
+ 'empty roles for clusterhost %s' % clusterhost.name
+ )
+
+
+def validate_cluster(cluster, session=None):
+ """Validate cluster."""
+ if not cluster.clusterhosts:
+ raise exception.InvalidParameter(
+ 'cluster %s does not have any hosts' % cluster.name
+ )
+ if cluster.flavor_name:
+ cluster_roles = cluster.flavor['roles']
+ else:
+ cluster_roles = []
+ necessary_roles = set([
+ role['name'] for role in cluster_roles if not role.get('optional')
+ ])
+ clusterhost_roles = set([])
+ interface_subnets = {}
+ for clusterhost in cluster.clusterhosts:
+ roles = clusterhost.roles
+ for role in roles:
+ clusterhost_roles.add(role['name'])
+ host = clusterhost.host
+ for host_network in host.host_networks:
+ interface_subnets.setdefault(
+ host_network.interface, set([])
+ ).add(host_network.subnet.subnet)
+ missing_roles = necessary_roles - clusterhost_roles
+ if missing_roles:
+ raise exception.InvalidParameter(
+ 'cluster %s have some roles %s not assigned to any host' % (
+ cluster.name, list(missing_roles)
+ )
+ )
+ for interface, subnets in interface_subnets.items():
+ if len(subnets) > 1:
+ raise exception.InvalidParameter(
+ 'cluster %s multi subnets %s in interface %s' % (
+ cluster.name, list(subnets), interface
+ )
+ )
+
+
+@utils.supported_filters(optional_support_keys=['review'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_REVIEW_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_REVIEW_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_CONFIG_FIELDS
+)
+def review_cluster(cluster_id, review={}, user=None, session=None, **kwargs):
+ """review cluster.
+
+ Args:
+ cluster_id: the cluster id.
+ review: dict contains hosts to be reviewed. either contains key
+ hosts or clusterhosts. where hosts is a list of host id,
+ clusterhosts is a list of clusterhost id.
+ """
+ from compass.db.api import host as host_api
+ cluster = _get_cluster(cluster_id, session=session)
+ check_cluster_editable(cluster, user=user)
+ host_ids = review.get('hosts', [])
+ clusterhost_ids = review.get('clusterhosts', [])
+ clusterhosts = []
+ # Get clusterhosts need to be reviewed.
+ for clusterhost in cluster.clusterhosts:
+ if (
+ clusterhost.clusterhost_id in clusterhost_ids or
+ clusterhost.host_id in host_ids
+ ):
+ clusterhosts.append(clusterhost)
+
+ os_config = copy.deepcopy(cluster.os_config)
+ os_config = metadata_api.autofill_os_config(
+ os_config, cluster.os_id, cluster=cluster
+ )
+ metadata_api.validate_os_config(
+ os_config, cluster.os_id, True
+ )
+ for clusterhost in clusterhosts:
+ host = clusterhost.host
+ # ignore underlying host os config validation
+ # since the host is not editable
+ if not host_api.is_host_editable(
+ host, user=user, check_in_installing=False
+ ):
+ logging.info(
+ 'ignore update host %s config '
+ 'since it is not editable' % host.name
+ )
+ continue
+ host_os_config = copy.deepcopy(host.os_config)
+ host_os_config = metadata_api.autofill_os_config(
+ host_os_config, host.os_id,
+ host=host
+ )
+ deployed_os_config = util.merge_dict(
+ os_config, host_os_config
+ )
+ metadata_api.validate_os_config(
+ deployed_os_config, host.os_id, True
+ )
+ host_api.validate_host(host)
+ utils.update_db_object(
+ session, host, os_config=host_os_config, config_validated=True
+ )
+
+ package_config = copy.deepcopy(cluster.package_config)
+ if cluster.flavor_name:
+ package_config = metadata_api.autofill_flavor_config(
+ package_config, cluster.flavor_id,
+ cluster=cluster
+ )
+ metadata_api.validate_flavor_config(
+ package_config, cluster.flavor_id, True
+ )
+ for clusterhost in clusterhosts:
+ clusterhost_package_config = copy.deepcopy(
+ clusterhost.package_config
+ )
+ clusterhost_package_config = (
+ metadata_api.autofill_flavor_config(
+ clusterhost_package_config,
+ cluster.flavor_id,
+ clusterhost=clusterhost
+ )
+ )
+ deployed_package_config = util.merge_dict(
+ package_config, clusterhost_package_config
+ )
+ metadata_api.validate_flavor_config(
+ deployed_package_config,
+ cluster.flavor_id, True
+ )
+ validate_clusterhost(clusterhost, session=session)
+ utils.update_db_object(
+ session, clusterhost,
+ package_config=clusterhost_package_config,
+ config_validated=True
+ )
+
+ validate_cluster(cluster, session=session)
+ utils.update_db_object(
+ session, cluster, os_config=os_config, package_config=package_config,
+ config_validated=True
+ )
+ return {
+ 'cluster': cluster,
+ 'hosts': clusterhosts
+ }
+
+
+@utils.supported_filters(optional_support_keys=['deploy'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def deploy_cluster(
+ cluster_id, deploy={}, user=None, session=None, **kwargs
+):
+ """deploy cluster.
+
+ Args:
+ cluster_id: cluster id.
+ deploy: dict contains key either hosts or clusterhosts.
+ deploy['hosts'] is a list of host id,
+ deploy['clusterhosts'] is a list of clusterhost id.
+ """
+ from compass.db.api import host as host_api
+ from compass.tasks import client as celery_client
+ cluster = _get_cluster(cluster_id, session=session)
+ host_ids = deploy.get('hosts', [])
+ clusterhost_ids = deploy.get('clusterhosts', [])
+ clusterhosts = []
+ # get clusterhost to deploy.
+ for clusterhost in cluster.clusterhosts:
+ if (
+ clusterhost.clusterhost_id in clusterhost_ids or
+ clusterhost.host_id in host_ids
+ ):
+ clusterhosts.append(clusterhost)
+ check_cluster_editable(cluster, user=user)
+ check_cluster_validated(cluster)
+ utils.update_db_object(session, cluster.state, state='INITIALIZED')
+ for clusterhost in clusterhosts:
+ host = clusterhost.host
+ # ignore checking if underlying host is validated if
+ # the host is not editable.
+ if host_api.is_host_editable(host, user=user):
+ host_api.check_host_validated(host)
+ utils.update_db_object(session, host.state, state='INITIALIZED')
+ if cluster.flavor_name:
+ check_clusterhost_validated(clusterhost)
+ utils.update_db_object(
+ session, clusterhost.state, state='INITIALIZED'
+ )
+
+ celery_client.celery.send_task(
+ 'compass.tasks.deploy_cluster',
+ (
+ user.email, cluster_id,
+ [clusterhost.host_id for clusterhost in clusterhosts]
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'deploy action sent',
+ 'cluster': cluster,
+ 'hosts': clusterhosts
+ }
+
+
+@utils.supported_filters(optional_support_keys=['redeploy'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def redeploy_cluster(
+ cluster_id, deploy={}, user=None, session=None, **kwargs
+):
+ """redeploy cluster.
+
+ Args:
+ cluster_id: cluster id.
+ """
+ from compass.db.api import host as host_api
+ from compass.tasks import client as celery_client
+ cluster = _get_cluster(cluster_id, session=session)
+
+ check_cluster_editable(cluster, user=user)
+ check_cluster_validated(cluster)
+ utils.update_db_object(
+ session, cluster.state,
+ state='INITIALIZED',
+ percentage=0,
+ ready=False
+ )
+ for clusterhost in cluster.clusterhosts:
+ host = clusterhost.host
+ # ignore checking if underlying host is validated if
+ # the host is not editable.
+ host_api.check_host_validated(host)
+ utils.update_db_object(
+ session, host.state,
+ state='INITIALIZED',
+ percentage=0,
+ ready=False
+ )
+ if cluster.flavor_name:
+ check_clusterhost_validated(clusterhost)
+ utils.update_db_object(
+ session,
+ clusterhost.state,
+ state='INITIALIZED',
+ percentage=0,
+ ready=False
+ )
+
+ celery_client.celery.send_task(
+ 'compass.tasks.redeploy_cluster',
+ (
+ user.email, cluster_id
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'redeploy action sent',
+ 'cluster': cluster
+ }
+
+
+@utils.supported_filters(optional_support_keys=['apply_patch'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_CLUSTER
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ cluster=RESP_CONFIG_FIELDS,
+ hosts=RESP_CLUSTERHOST_FIELDS
+)
+def patch_cluster(cluster_id, user=None, session=None, **kwargs):
+
+ from compass.tasks import client as celery_client
+
+ cluster = _get_cluster(cluster_id, session=session)
+ celery_client.celery.send_task(
+ 'compass.tasks.patch_cluster',
+ (
+ user.email, cluster_id,
+ ),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'patch action sent',
+ 'cluster': cluster
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTER_STATE
+)
+@utils.wrap_to_dict(RESP_STATE_FIELDS)
+def get_cluster_state(cluster_id, user=None, session=None, **kwargs):
+ """Get cluster state info."""
+ return _get_cluster(cluster_id, session=session).state_dict()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_cluster_host_state(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost state merged with underlying host state."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ ).state_dict()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_cluster_host_self_state(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost itself state."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ ).state
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_clusterhost_state(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost state merged with underlying host state."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ ).state_dict()
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def get_clusterhost_self_state(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost itself state."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ ).state
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def update_cluster_host_state(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Update a clusterhost itself state."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ # Modify(harry): without progress_update.py to update cluster state
+ # update cluster state here
+ cluster = _get_cluster(clusterhost.cluster_id, session=session)
+ utils.update_db_object(session, clusterhost.state, **kwargs)
+ utils.update_db_object(session, cluster.state, **kwargs)
+ return clusterhost.state_dict()
+
+
+def _update_clusterhost_state(
+ clusterhost, from_database_only=False,
+ session=None, user=None, **kwargs
+):
+ """Update clusterhost state.
+
+ If from_database_only, the state will only be updated in database.
+ Otherwise a task sent to celery and os installer/package installer
+ will also update its state if needed.
+ """
+ if 'ready' in kwargs and kwargs['ready'] and not clusterhost.state.ready:
+ ready_triggered = True
+ else:
+ ready_triggered = False
+ cluster_ready = False
+ host = clusterhost.host
+ cluster = clusterhost.cluster
+ host_ready = not host.state.ready
+ if ready_triggered:
+ cluster_ready = True
+ for clusterhost_in_cluster in cluster.clusterhosts:
+ if (
+ clusterhost_in_cluster.clusterhost_id
+ == clusterhost.clusterhost_id
+ ):
+ continue
+ if not clusterhost_in_cluster.state.ready:
+ cluster_ready = False
+
+ logging.info(
+ 'clusterhost %s ready: %s',
+ clusterhost.name, ready_triggered
+ )
+ logging.info('cluster ready: %s', cluster_ready)
+ logging.info('host ready: %s', host_ready)
+ if not ready_triggered or from_database_only:
+ logging.info('%s state is set to %s', clusterhost.name, kwargs)
+ utils.update_db_object(session, clusterhost.state, **kwargs)
+ if not clusterhost.state.ready:
+ logging.info('%s state ready is set to False', cluster.name)
+ utils.update_db_object(session, cluster.state, ready=False)
+ status = '%s state is updated' % clusterhost.name
+ else:
+ if not user:
+ user_id = cluster.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.package_installed',
+ (
+ clusterhost.cluster_id, clusterhost.host_id,
+ cluster_ready, host_ready
+ ),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ status = '%s: cluster ready %s host ready %s' % (
+ clusterhost.name, cluster_ready, host_ready
+ )
+ logging.info('action status: %s', status)
+ return {
+ 'status': status,
+ 'clusterhost': clusterhost.state_dict()
+ }
+
+
+@util.deprecated
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(['status', 'clusterhost'])
+def update_cluster_host_state_internal(
+ cluster_id, host_id, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Update a clusterhost state by installation process."""
+ # TODO(xicheng): it should be merged into update_cluster_host_state
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return _update_clusterhost_state(
+ clusterhost, from_database_only=from_database_only,
+ session=session, users=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
+def update_clusterhost_state(
+ clusterhost_id, user=None, session=None, **kwargs
+):
+ """Update a clusterhost itself state."""
+ clusterhost = _get_clusterhost(
+ clusterhost_id, session=session
+ )
+ # Modify(harry): without progress_update.py to update cluster state
+ # update cluster state here
+ cluster = _get_cluster(clusterhost.cluster_id, session=session)
+ utils.update_db_object(session, clusterhost.state, **kwargs)
+ utils.update_db_object(session, cluster.state, **kwargs)
+ return clusterhost.state_dict()
+
+
+@util.deprecated
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
+)
+@utils.wrap_to_dict(['status', 'clusterhost'])
+def update_clusterhost_state_internal(
+ clusterhost_id, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Update a clusterhost state by installation process."""
+ # TODO(xicheng): it should be merged into update_clusterhost_state
+ clusterhost = _get_clusterhost(clusterhost_id, session=session)
+ return _update_clusterhost_state(
+ clusterhost, from_database_only=from_database_only,
+ session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTER_STATE_FIELDS,
+ ignore_support_keys=(IGNORE_FIELDS + IGNORE_UPDATED_CLUSTER_STATE_FIELDS)
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_STATE
+)
+@utils.wrap_to_dict(RESP_STATE_FIELDS)
+def update_cluster_state(
+ cluster_id, user=None, session=None, **kwargs
+):
+ """Update a cluster state."""
+ cluster = _get_cluster(
+ cluster_id, session=session
+ )
+ utils.update_db_object(session, cluster.state, **kwargs)
+ return cluster.state_dict()
+
+
+@util.deprecated
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTER_STATE_INTERNAL_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_CLUSTER_STATE
+)
+@utils.wrap_to_dict(['status', 'cluster'])
+def update_cluster_state_internal(
+ cluster_id, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Update a cluster state by installation process.
+
+ If from_database_only, the state will only be updated in database.
+ Otherwise a task sent to do state update in os installer and
+ package installer.
+ """
+ # TODO(xicheng): it should be merged into update_cluster_state
+ cluster = _get_cluster(cluster_id, session=session)
+ if 'ready' in kwargs and kwargs['ready'] and not cluster.state.ready:
+ ready_triggered = True
+ else:
+ ready_triggered = False
+ clusterhost_ready = {}
+ if ready_triggered:
+ for clusterhost in cluster.clusterhosts:
+ clusterhost_ready[clusterhost.host_id] = (
+ not clusterhost.state.ready
+ )
+
+ logging.info('cluster %s ready: %s', cluster_id, ready_triggered)
+ logging.info('clusterhost ready: %s', clusterhost_ready)
+
+ if not ready_triggered or from_database_only:
+ logging.info('%s state is set to %s', cluster.name, kwargs)
+ utils.update_db_object(session, cluster.state, **kwargs)
+ if not cluster.state.ready:
+ for clusterhost in cluster.clusterhosts:
+ logging.info('%s state ready is to False', clusterhost.name)
+ utils.update_db_object(
+ session, clusterhost.state, ready=False
+ )
+ status = '%s state is updated' % cluster.name
+ else:
+ if not user:
+ user_id = cluster.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.cluster_installed',
+ (clusterhost.cluster_id, clusterhost_ready),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ status = '%s installed action set clusterhost ready %s' % (
+ cluster.name, clusterhost_ready
+ )
+ logging.info('action status: %s', status)
+ return {
+ 'status': status,
+ 'cluster': cluster.state_dict()
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_cluster_host_log_histories(
+ cluster_id, host_id, user=None, session=None, **kwargs
+):
+ """Get clusterhost log history by cluster id and host id."""
+ return _get_cluster_host(
+ cluster_id, host_id, session=session
+ ).log_histories
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_clusterhost_log_histories(
+ clusterhost_id, user=None,
+ session=None, **kwargs
+):
+ """Get clusterhost log history by clusterhost id."""
+ return _get_clusterhost(
+ clusterhost_id, session=session
+ ).log_histories
+
+
+def _get_cluster_host_log_history(
+ cluster_id, host_id, filename, session=None, **kwargs
+):
+ """Get clusterhost log history by cluster id, host id and filename."""
+ clusterhost = _get_cluster_host(cluster_id, host_id, session=session)
+ return utils.get_db_object(
+ session, models.ClusterHostLogHistory,
+ clusterhost_id=clusterhost.clusterhost_id, filename=filename,
+ **kwargs
+ )
+
+
+def _get_clusterhost_log_history(
+ clusterhost_id, filename, session=None, **kwargs
+):
+ """Get clusterhost log history by clusterhost id and filename."""
+ clusterhost = _get_clusterhost(clusterhost_id, session=session)
+ return utils.get_db_object(
+ session, models.ClusterHostLogHistory,
+ clusterhost_id=clusterhost.clusterhost_id, filename=filename,
+ **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_cluster_host_log_history(
+ cluster_id, host_id, filename, user=None, session=None, **kwargs
+):
+ """Get clusterhost log history by cluster id, host id and filename."""
+ return _get_cluster_host_log_history(
+ cluster_id, host_id, filename, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def get_clusterhost_log_history(
+ clusterhost_id, filename, user=None, session=None, **kwargs
+):
+ """Get host log history by clusterhost id and filename."""
+ return _get_clusterhost_log_history(
+ clusterhost_id, filename, session=session
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def update_cluster_host_log_history(
+ cluster_id, host_id, filename, user=None, session=None, **kwargs
+):
+ """Update a host log history by cluster id, host id and filename."""
+ cluster_host_log_history = _get_cluster_host_log_history(
+ cluster_id, host_id, filename, session=session
+ )
+ return utils.update_db_object(
+ session, cluster_host_log_history, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def update_clusterhost_log_history(
+ clusterhost_id, filename, user=None, session=None, **kwargs
+):
+ """Update a host log history by clusterhost id and filename."""
+ clusterhost_log_history = _get_clusterhost_log_history(
+ clusterhost_id, filename, session=session
+ )
+ return utils.update_db_object(session, clusterhost_log_history, **kwargs)
+
+
+@utils.supported_filters(
+ ADDED_CLUSTERHOST_LOG_FIELDS,
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def add_clusterhost_log_history(
+ clusterhost_id, exception_when_existing=False,
+ filename=None, user=None, session=None, **kwargs
+):
+ """add a host log history by clusterhost id and filename."""
+ clusterhost = _get_clusterhost(clusterhost_id, session=session)
+ return utils.add_db_object(
+ session, models.ClusterHostLogHistory,
+ exception_when_existing,
+ clusterhost.clusterhost_id, filename, **kwargs
+ )
+
+
+@utils.supported_filters(
+ ADDED_CLUSTERHOST_LOG_FIELDS,
+ optional_support_keys=UPDATED_CLUSTERHOST_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)
+def add_cluster_host_log_history(
+ cluster_id, host_id, exception_when_existing=False,
+ filename=None, user=None, session=None, **kwargs
+):
+ """add a host log history by cluster id, host id and filename."""
+ clusterhost = _get_cluster_host(
+ cluster_id, host_id, session=session
+ )
+ return utils.add_db_object(
+ session, models.ClusterHostLogHistory, exception_when_existing,
+ clusterhost.clusterhost_id, filename, **kwargs
+ )
diff --git a/compass-tasks/db/api/database.py b/compass-tasks/db/api/database.py
new file mode 100644
index 0000000..49769d7
--- /dev/null
+++ b/compass-tasks/db/api/database.py
@@ -0,0 +1,264 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Provider interface to manipulate database."""
+import functools
+import logging
+import netaddr
+
+from contextlib import contextmanager
+from sqlalchemy import create_engine
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.exc import OperationalError
+from sqlalchemy.orm import scoped_session
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.pool import NullPool
+from sqlalchemy.pool import QueuePool
+from sqlalchemy.pool import SingletonThreadPool
+from sqlalchemy.pool import StaticPool
+from threading import local
+
+from compass.db import exception
+from compass.db import models
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+ENGINE = None
+SESSION = sessionmaker(autocommit=False, autoflush=False)
+SCOPED_SESSION = None
+SESSION_HOLDER = local()
+
+POOL_MAPPING = {
+ 'instant': NullPool,
+ 'static': StaticPool,
+ 'queued': QueuePool,
+ 'thread_single': SingletonThreadPool
+}
+
+
+def init(database_url=None):
+ """Initialize database.
+
+ Adjust sqlalchemy logging if necessary.
+
+ :param database_url: string, database url.
+ """
+ global ENGINE
+ global SCOPED_SESSION
+ if not database_url:
+ database_url = setting.SQLALCHEMY_DATABASE_URI
+ logging.info('init database %s', database_url)
+ root_logger = logging.getLogger()
+ fine_debug = root_logger.isEnabledFor(logsetting.LOGLEVEL_MAPPING['fine'])
+ if fine_debug:
+ logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
+ finest_debug = root_logger.isEnabledFor(
+ logsetting.LOGLEVEL_MAPPING['finest']
+ )
+ if finest_debug:
+ logging.getLogger('sqlalchemy.dialects').setLevel(logging.INFO)
+ logging.getLogger('sqlalchemy.pool').setLevel(logging.INFO)
+ logging.getLogger('sqlalchemy.orm').setLevel(logging.INFO)
+ poolclass = POOL_MAPPING[setting.SQLALCHEMY_DATABASE_POOL_TYPE]
+ ENGINE = create_engine(
+ database_url, convert_unicode=True,
+ poolclass=poolclass
+ )
+ SESSION.configure(bind=ENGINE)
+ SCOPED_SESSION = scoped_session(SESSION)
+ models.BASE.query = SCOPED_SESSION.query_property()
+
+
+def in_session():
+ """check if in database session scope."""
+ bool(hasattr(SESSION_HOLDER, 'session'))
+
+
+@contextmanager
+def session(exception_when_in_session=True):
+ """database session scope.
+
+ To operate database, it should be called in database session.
+ If not exception_when_in_session, the with session statement support
+ nested session and only the out most session commit/rollback the
+ transaction.
+ """
+ if not ENGINE:
+ init()
+
+ nested_session = False
+ if hasattr(SESSION_HOLDER, 'session'):
+ if exception_when_in_session:
+ logging.error('we are already in session')
+ raise exception.DatabaseException('session already exist')
+ else:
+ new_session = SESSION_HOLDER.session
+ nested_session = True
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'reuse session %s', nested_session
+ )
+ else:
+ new_session = SCOPED_SESSION()
+ setattr(SESSION_HOLDER, 'session', new_session)
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'enter session %s', new_session
+ )
+ try:
+ yield new_session
+ if not nested_session:
+ new_session.commit()
+ except Exception as error:
+ if not nested_session:
+ new_session.rollback()
+ logging.error('failed to commit session')
+ logging.exception(error)
+ if isinstance(error, IntegrityError):
+ for item in error.statement.split():
+ if item.islower():
+ object = item
+ break
+ raise exception.DuplicatedRecord(
+ '%s in %s' % (error.orig, object)
+ )
+ elif isinstance(error, OperationalError):
+ raise exception.DatabaseException(
+ 'operation error in database'
+ )
+ elif isinstance(error, exception.DatabaseException):
+ raise error
+ else:
+ raise exception.DatabaseException(str(error))
+ finally:
+ if not nested_session:
+ new_session.close()
+ SCOPED_SESSION.remove()
+ delattr(SESSION_HOLDER, 'session')
+ logging.log(
+ logsetting.getLevelByName('fine'),
+ 'exit session %s', new_session
+ )
+
+
+def current_session():
+ """Get the current session scope when it is called.
+
+ :return: database session.
+ :raises: DatabaseException when it is not in session.
+ """
+ try:
+ return SESSION_HOLDER.session
+ except Exception as error:
+ logging.error('It is not in the session scope')
+ logging.exception(error)
+ if isinstance(error, exception.DatabaseException):
+ raise error
+ else:
+ raise exception.DatabaseException(str(error))
+
+
+def run_in_session(exception_when_in_session=True):
+ """Decorator to make sure the decorated function run in session.
+
+ When not exception_when_in_session, the run_in_session can be
+ decorated several times.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ my_session = kwargs.get('session')
+ if my_session is not None:
+ return func(*args, **kwargs)
+ else:
+ with session(
+ exception_when_in_session=exception_when_in_session
+ ) as my_session:
+ kwargs['session'] = my_session
+ return func(*args, **kwargs)
+ except Exception as error:
+ logging.error(
+ 'got exception with func %s args %s kwargs %s',
+ func, args, kwargs
+ )
+ logging.exception(error)
+ raise error
+ return wrapper
+ return decorator
+
+
+def _setup_user_table(user_session):
+ """Initialize user table with default user."""
+ logging.info('setup user table')
+ from compass.db.api import user
+ user.add_user(
+ session=user_session,
+ email=setting.COMPASS_ADMIN_EMAIL,
+ password=setting.COMPASS_ADMIN_PASSWORD,
+ is_admin=True
+ )
+
+
+def _setup_permission_table(permission_session):
+ """Initialize permission table."""
+ logging.info('setup permission table.')
+ from compass.db.api import permission
+ permission.add_permissions_internal(
+ session=permission_session
+ )
+
+
+def _setup_switch_table(switch_session):
+ """Initialize switch table."""
+ # TODO(xicheng): deprecate setup default switch.
+ logging.info('setup switch table')
+ from compass.db.api import switch
+ switch.add_switch(
+ True, setting.DEFAULT_SWITCH_IP,
+ session=switch_session,
+ machine_filters=['allow ports all']
+ )
+
+
+def _update_others(other_session):
+ """Update other tables."""
+ logging.info('update other tables')
+ from compass.db.api import utils
+ from compass.db import models
+ utils.update_db_objects(
+ other_session, models.Cluster
+ )
+ utils.update_db_objects(
+ other_session, models.Host
+ )
+ utils.update_db_objects(
+ other_session, models.ClusterHost
+ )
+
+
+@run_in_session()
+def create_db(session=None):
+ """Create database."""
+ models.BASE.metadata.create_all(bind=ENGINE)
+ _setup_permission_table(session)
+ _setup_user_table(session)
+ _setup_switch_table(session)
+ _update_others(session)
+
+
+def drop_db():
+ """Drop database."""
+ models.BASE.metadata.drop_all(bind=ENGINE)
diff --git a/compass-tasks/db/api/health_check_report.py b/compass-tasks/db/api/health_check_report.py
new file mode 100644
index 0000000..aaea7a7
--- /dev/null
+++ b/compass-tasks/db/api/health_check_report.py
@@ -0,0 +1,190 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Cluster health check report."""
+import logging
+
+from compass.db.api import cluster as cluster_api
+from compass.db.api import database
+from compass.db.api import host as host_api
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+
+REQUIRED_INSERT_FIELDS = ['name']
+OPTIONAL_INSERT_FIELDS = [
+ 'display_name', 'report', 'category', 'state', 'error_message'
+]
+UPDATE_FIELDS = ['report', 'state', 'error_message']
+RESP_FIELDS = [
+ 'cluster_id', 'name', 'display_name', 'report',
+ 'category', 'state', 'error_message'
+]
+RESP_ACTION_FIELDS = ['cluster_id', 'status']
+
+
+@utils.supported_filters(REQUIRED_INSERT_FIELDS, OPTIONAL_INSERT_FIELDS)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_report_record(cluster_id, name=None, report={},
+ state='verifying', session=None, **kwargs):
+ """Create a health check report record."""
+ # Replace any white space into '-'
+ words = name.split()
+ name = '-'.join(words)
+ cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
+ return utils.add_db_object(
+ session, models.HealthCheckReport, True, cluster.id, name,
+ report=report, state=state, **kwargs
+ )
+
+
+def _get_report(cluster_id, name, session=None):
+ cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
+ return utils.get_db_object(
+ session, models.HealthCheckReport, cluster_id=cluster.id, name=name
+ )
+
+
+@utils.supported_filters(UPDATE_FIELDS)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_report(cluster_id, name, session=None, **kwargs):
+ """Update health check report."""
+ report = _get_report(cluster_id, name, session=session)
+ if report.state == 'finished':
+ err_msg = 'Report cannot be updated if state is in "finished"'
+ raise exception.Forbidden(err_msg)
+
+ return utils.update_db_object(session, report, **kwargs)
+
+
+@utils.supported_filters(UPDATE_FIELDS)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_multi_reports(cluster_id, session=None, **kwargs):
+ """Bulk update reports."""
+ # TODO(grace): rename the fuction if needed to reflect the fact.
+ return set_error(cluster_id, session=session, **kwargs)
+
+
+def set_error(cluster_id, report={}, session=None,
+ state='error', error_message=None):
+ cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
+ logging.debug(
+ "updates all reports as %s in cluster %s",
+ state, cluster_id
+ )
+ return utils.update_db_objects(
+ session, models.HealthCheckReport,
+ updates={
+ 'report': {},
+ 'state': 'error',
+ 'error_message': error_message
+ }, cluster_id=cluster.id
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HEALTH_REPORT
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_health_reports(cluster_id, user=None, session=None):
+ """List all reports in the specified cluster."""
+ cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
+ return utils.list_db_objects(
+ session, models.HealthCheckReport, cluster_id=cluster.id
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_HEALTH_REPORT
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_health_report(cluster_id, name, user=None, session=None):
+ return _get_report(
+ cluster_id, name, session=session
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DELETE_REPORT
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def delete_reports(cluster_id, name=None, user=None, session=None):
+ # TODO(grace): better to separate this function into two.
+ # One is to delete a report of a cluster, the other to delete all
+ # reports under a cluster.
+ if name:
+ report = _get_report(cluster_id, name, session=session)
+ return utils.del_db_object(session, report)
+ else:
+ cluster = cluster_api.get_cluster_internal(
+ cluster_id, session=session
+ )
+ return utils.del_db_objects(
+ session, models.HealthCheckReport, cluster_id=cluster.id
+ )
+
+
+@utils.supported_filters(optional_support_keys=['check_health'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_CHECK_CLUSTER_HEALTH
+)
+@utils.wrap_to_dict(RESP_ACTION_FIELDS)
+def start_check_cluster_health(cluster_id, send_report_url,
+ user=None, session=None, check_health={}):
+ """Start to check cluster health."""
+ cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
+
+ if cluster.state.state != 'SUCCESSFUL':
+ logging.debug("state is %s" % cluster.state.state)
+ err_msg = "Healthcheck starts only after cluster finished deployment!"
+ raise exception.Forbidden(err_msg)
+
+ reports = utils.list_db_objects(
+ session, models.HealthCheckReport,
+ cluster_id=cluster.id, state='verifying'
+ )
+ if reports:
+ err_msg = 'Healthcheck in progress, please wait for it to complete!'
+ raise exception.Forbidden(err_msg)
+
+ # Clear all preivous report
+ # TODO(grace): the delete should be moved into celery task.
+ # We should consider the case that celery task is down.
+ utils.del_db_objects(
+ session, models.HealthCheckReport, cluster_id=cluster.id
+ )
+
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.cluster_health',
+ (cluster.id, send_report_url, user.email),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ "cluster_id": cluster.id,
+ "status": "start to check cluster health."
+ }
diff --git a/compass-tasks/db/api/host.py b/compass-tasks/db/api/host.py
new file mode 100644
index 0000000..15e0bb6
--- /dev/null
+++ b/compass-tasks/db/api/host.py
@@ -0,0 +1,1120 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Host database operations."""
+import functools
+import logging
+import netaddr
+import re
+
+from compass.db.api import database
+from compass.db.api import metadata_holder as metadata_api
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = ['name', 'os_name', 'owner', 'mac', 'id']
+SUPPORTED_MACHINE_HOST_FIELDS = [
+ 'mac', 'tag', 'location', 'os_name', 'os_id'
+]
+SUPPORTED_NETOWORK_FIELDS = [
+ 'interface', 'ip', 'is_mgmt', 'is_promiscuous'
+]
+RESP_FIELDS = [
+ 'id', 'name', 'hostname', 'os_name', 'owner', 'mac',
+ 'switch_ip', 'port', 'switches', 'os_installer', 'os_id', 'ip',
+ 'reinstall_os', 'os_installed', 'tag', 'location', 'networks',
+ 'created_at', 'updated_at'
+]
+RESP_CLUSTER_FIELDS = [
+ 'id', 'name', 'os_name', 'reinstall_distributed_system',
+ 'owner', 'adapter_name', 'flavor_name',
+ 'distributed_system_installed', 'created_at', 'updated_at'
+]
+RESP_NETWORK_FIELDS = [
+ 'id', 'ip', 'interface', 'netmask', 'is_mgmt', 'is_promiscuous',
+ 'created_at', 'updated_at'
+]
+RESP_CONFIG_FIELDS = [
+ 'os_config',
+ 'config_setp',
+ 'config_validated',
+ 'networks',
+ 'created_at',
+ 'updated_at'
+]
+RESP_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config'
+]
+RESP_DEPLOY_FIELDS = [
+ 'status', 'host'
+]
+UPDATED_FIELDS = ['name', 'reinstall_os']
+UPDATED_CONFIG_FIELDS = [
+ 'put_os_config'
+]
+PATCHED_CONFIG_FIELDS = [
+ 'patched_os_config'
+]
+UPDATED_DEPLOYED_CONFIG_FIELDS = [
+ 'deployed_os_config'
+]
+ADDED_NETWORK_FIELDS = [
+ 'interface', 'ip', 'subnet_id'
+]
+OPTIONAL_ADDED_NETWORK_FIELDS = ['is_mgmt', 'is_promiscuous']
+UPDATED_NETWORK_FIELDS = [
+ 'interface', 'ip', 'subnet_id', 'subnet', 'is_mgmt',
+ 'is_promiscuous'
+]
+IGNORE_FIELDS = [
+ 'id', 'created_at', 'updated_at'
+]
+RESP_STATE_FIELDS = [
+ 'id', 'state', 'percentage', 'message', 'severity', 'ready'
+]
+UPDATED_STATE_FIELDS = [
+ 'state', 'percentage', 'message', 'severity'
+]
+UPDATED_STATE_INTERNAL_FIELDS = [
+ 'ready'
+]
+RESP_LOG_FIELDS = [
+ 'id', 'filename', 'position', 'partial_line', 'percentage',
+ 'message', 'severity', 'line_matcher_name'
+]
+ADDED_LOG_FIELDS = [
+ 'filename'
+]
+UPDATED_LOG_FIELDS = [
+ 'position', 'partial_line', 'percentage',
+ 'message', 'severity', 'line_matcher_name'
+]
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOSTS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_hosts(user=None, session=None, **filters):
+ """List hosts."""
+ return utils.list_db_objects(
+ session, models.Host, **filters
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_MACHINE_HOST_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOSTS
+)
+@utils.output_filters(
+ missing_ok=True,
+ tag=utils.general_filter_callback,
+ location=utils.general_filter_callback,
+ os_name=utils.general_filter_callback,
+ os_id=utils.general_filter_callback
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_machines_or_hosts(user=None, session=None, **filters):
+ """List machines or hosts if possible."""
+ machines = utils.list_db_objects(
+ session, models.Machine, **filters
+ )
+ machines_or_hosts = []
+ for machine in machines:
+ host = machine.host
+ if host:
+ machines_or_hosts.append(host)
+ else:
+ machines_or_hosts.append(machine)
+ return machines_or_hosts
+
+
+def _get_host(host_id, session=None, **kwargs):
+ """Get host by id."""
+ if isinstance(host_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Host,
+ id=host_id, **kwargs
+ )
+ else:
+ raise exception.InvalidParameter(
+ 'host id %s type is not int compatible' % host_id
+ )
+
+
+def get_host_internal(host_id, session=None, **kwargs):
+ """Helper function to get host.
+
+ Used by other files under db/api.
+ """
+ return _get_host(host_id, session=session, **kwargs)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOSTS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_host(
+ host_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get host info."""
+ return _get_host(
+ host_id,
+ exception_when_missing=exception_when_missing,
+ session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOSTS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_machine_or_host(
+ host_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get machine or host if possible."""
+ from compass.db.api import machine as machine_api
+ machine = machine_api.get_machine_internal(
+ host_id,
+ exception_when_missing=exception_when_missing,
+ session=session
+ )
+ if machine.host:
+ return machine.host
+ else:
+ return machine
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_CLUSTERS
+)
+@utils.wrap_to_dict(RESP_CLUSTER_FIELDS)
+def get_host_clusters(host_id, user=None, session=None, **kwargs):
+ """get host clusters."""
+ host = _get_host(host_id, session=session)
+ return [clusterhost.cluster for clusterhost in host.clusterhosts]
+
+
+def check_host_validated(host):
+ """Check host is validated."""
+ if not host.config_validated:
+ raise exception.Forbidden(
+ 'host %s is not validated' % host.name
+ )
+
+
+def check_host_editable(
+ host, user=None,
+ check_in_installing=False
+):
+ """Check host is editable.
+
+ If we try to set reinstall_os or check the host is not in installing
+ state, we should set check_in_installing to True.
+ Otherwise we will check the host is not in installing or installed.
+ We also make sure the user is admin or the owner of the host to avoid
+ unauthorized user to update host attributes.
+ """
+ if check_in_installing:
+ if host.state.state == 'INSTALLING':
+ raise exception.Forbidden(
+ 'host %s is not editable '
+ 'when state is in installing' % host.name
+ )
+ elif not host.reinstall_os:
+ raise exception.Forbidden(
+ 'host %s is not editable '
+ 'when not to be reinstalled' % host.name
+ )
+ if user and not user.is_admin and host.creator_id != user.id:
+ raise exception.Forbidden(
+ 'host %s is not editable '
+ 'when user is not admin or the owner of the host' % host.name
+ )
+
+
+def is_host_editable(
+ host, user=None,
+ check_in_installing=False
+):
+ """Get if host is editable."""
+ try:
+ check_host_editable(
+ host, user=user,
+ check_in_installing=check_in_installing
+ )
+ return True
+ except exception.Forbidden:
+ return False
+
+
+def validate_host(host):
+ """Validate host.
+
+ Makesure hostname is not empty, there is only one mgmt network,
+ The mgmt network is not in promiscuous mode.
+ """
+ if not host.hostname:
+ raise exception.Invalidparameter(
+ 'host %s does not set hostname' % host.name
+ )
+ if not host.host_networks:
+ raise exception.InvalidParameter(
+ 'host %s does not have any network' % host.name
+ )
+ mgmt_interface_set = False
+ for host_network in host.host_networks:
+ if host_network.is_mgmt:
+ if mgmt_interface_set:
+ raise exception.InvalidParameter(
+ 'host %s multi interfaces set mgmt ' % host.name
+ )
+ if host_network.is_promiscuous:
+ raise exception.InvalidParameter(
+ 'host %s interface %s is mgmt but promiscuous' % (
+ host.name, host_network.interface
+ )
+ )
+ mgmt_interface_set = True
+ if not mgmt_interface_set:
+ raise exception.InvalidParameter(
+ 'host %s has no mgmt interface' % host.name
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(name=utils.check_name)
+@utils.wrap_to_dict(RESP_FIELDS)
+def _update_host(host_id, session=None, user=None, **kwargs):
+ """Update a host internal."""
+ host = _get_host(host_id, session=session)
+ if host.state.state == "SUCCESSFUL" and not host.reinstall_os:
+ logging.info("ignoring successful host: %s", host_id)
+ return {}
+ check_host_editable(
+ host, user=user,
+ check_in_installing=kwargs.get('reinstall_os', False)
+ )
+ return utils.update_db_object(session, host, **kwargs)
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_HOST
+)
+def update_host(host_id, user=None, session=None, **kwargs):
+ """Update a host."""
+ return _update_host(host_id, session=session, user=user, **kwargs)
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_HOST
+)
+def update_hosts(data=[], user=None, session=None):
+ """Update hosts."""
+ # TODO(xicheng): this batch function is not similar as others.
+ # try to make it similar output as others and batch update should
+ # tolerate partial failure.
+ hosts = []
+ for host_data in data:
+ hosts.append(_update_host(session=session, user=user, **host_data))
+ return hosts
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_HOST
+)
+@utils.wrap_to_dict(
+ RESP_FIELDS + ['status', 'host'],
+ host=RESP_FIELDS
+)
+def del_host(
+ host_id, force=False, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Delete a host.
+
+ If force, we delete the host anyway.
+ If from_database_only, we only delete the host record in databaes.
+ Otherwise we send to del host task to celery to delete the host
+ record in os installer and package installer, clean installation logs
+ and at last clean database record.
+ The backend will call this function again after it deletes the record
+ in os installer and package installer with from_database_only set.
+ """
+ from compass.db.api import cluster as cluster_api
+ host = _get_host(host_id, session=session)
+ # force set host state to ERROR when we want to delete the
+ # host anyway even the host is in installing or already
+ # installed. It let the api know the deleting is in doing when backend
+ # is doing the real deleting. In future we may import a new state like
+ # INDELETE to indicate the deleting is processing.
+ # We need discuss about if we can delete a host when it is already
+ # installed by api.
+ if host.state.state != 'UNINITIALIZED' and force:
+ host.state.state = 'ERROR'
+ check_host_editable(
+ host, user=user,
+ check_in_installing=True
+ )
+ cluster_ids = []
+ for clusterhost in host.clusterhosts:
+ if clusterhost.state.state != 'UNINITIALIZED' and force:
+ clusterhost.state.state = 'ERROR'
+ # TODO(grace): here we check all clusters which use this host editable.
+ # Because in backend we do not have functions to delete host without
+ # reference its cluster. After deleting pure host supported in backend,
+ # we should change code here to is_cluster_editable.
+ # Here delete a host may fail even we set force flag.
+ cluster_api.check_cluster_editable(
+ clusterhost.cluster, user=user,
+ check_in_installing=True
+ )
+ cluster_ids.append(clusterhost.cluster_id)
+
+ # Delete host record directly if there is no need to delete it
+ # in backend or from_database_only is set.
+ if host.state.state == 'UNINITIALIZED' or from_database_only:
+ return utils.del_db_object(session, host)
+ else:
+ logging.info(
+ 'send del host %s task to celery', host_id
+ )
+ if not user:
+ user_id = host.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.delete_host',
+ (
+ user.email, host.id, cluster_ids
+ ),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'delete action sent',
+ 'host': host,
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def get_host_config(host_id, user=None, session=None, **kwargs):
+ """Get host config."""
+ return _get_host(host_id, session=session)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_DEPLOYED_CONFIG_FIELDS)
+def get_host_deployed_config(host_id, user=None, session=None, **kwargs):
+ """Get host deployed config."""
+ return _get_host(host_id, session=session)
+
+
+# replace os_config to deployed_os_config in kwargs.
+@utils.replace_filters(
+ os_config='deployed_os_config'
+)
+@utils.supported_filters(
+ UPDATED_DEPLOYED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def update_host_deployed_config(host_id, user=None, session=None, **kwargs):
+ """Update host deployed config."""
+ host = _get_host(host_id, session=session)
+ check_host_editable(host, user=user)
+ check_host_validated(host)
+ return utils.update_db_object(session, host, **kwargs)
+
+
+def _host_os_config_validates(
+ config, host, session=None, user=None, **kwargs
+):
+ """Check host os config's validation."""
+ metadata_api.validate_os_config(
+ config, host.os_id
+ )
+
+
+@utils.input_validates_with_args(
+ put_os_config=_host_os_config_validates
+)
+@utils.output_validates_with_args(
+ os_config=_host_os_config_validates
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def _update_host_config(host, session=None, user=None, **kwargs):
+ """Update host config."""
+ check_host_editable(host, user=user)
+ return utils.update_db_object(session, host, **kwargs)
+
+
+# replace os_config to put_os_config in kwargs.
+# It tells db the os_config will be updated not patched.
+@utils.replace_filters(
+ os_config='put_os_config'
+)
+@utils.supported_filters(
+ UPDATED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_CONFIG
+)
+def update_host_config(host_id, user=None, session=None, **kwargs):
+ """Update host config."""
+ host = _get_host(host_id, session=session)
+ return _update_host_config(
+ host, session=session, user=user, **kwargs
+ )
+
+
+# replace os_config to patched_os_config in kwargs.
+# It tells db os_config will be patched not be updated.
+@utils.replace_filters(
+ os_config='patched_os_config'
+)
+@utils.supported_filters(
+ PATCHED_CONFIG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_CONFIG
+)
+def patch_host_config(host_id, user=None, session=None, **kwargs):
+ """Patch host config."""
+ host = _get_host(host_id, session=session)
+ return _update_host_config(
+ host, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_HOST_CONFIG
+)
+@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
+def del_host_config(host_id, user=None, session=None):
+ """delete a host config."""
+ host = _get_host(host_id, session=session)
+ check_host_editable(host, user=user)
+ return utils.update_db_object(
+ session, host, os_config={}, config_validated=False
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_NETOWORK_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_NETWORKS
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def list_host_networks(host_id, user=None, session=None, **filters):
+ """Get host networks for a host."""
+ host = _get_host(host_id, session=session)
+ return utils.list_db_objects(
+ session, models.HostNetwork,
+ host_id=host.id, **filters
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_NETOWORK_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_NETWORKS
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def list_hostnetworks(user=None, session=None, **filters):
+ """Get host networks."""
+ return utils.list_db_objects(
+ session, models.HostNetwork, **filters
+ )
+
+
+def _get_hostnetwork(host_network_id, session=None, **kwargs):
+ """Get hostnetwork by hostnetwork id."""
+ if isinstance(host_network_id, (int, long)):
+ return utils.get_db_object(
+ session, models.HostNetwork,
+ id=host_network_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'host network id %s type is not int compatible' % host_network_id
+ )
+
+
+def _get_host_network(host_id, host_network_id, session=None, **kwargs):
+ """Get hostnetwork by host id and hostnetwork id."""
+ host = _get_host(host_id, session=session)
+ host_network = _get_hostnetwork(host_network_id, session=session, **kwargs)
+ if host_network.host_id != host.id:
+ raise exception.RecordNotExists(
+ 'host %s does not own host network %s' % (
+ host.id, host_network.id
+ )
+ )
+ return host_network
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_NETWORKS
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def get_host_network(
+ host_id, host_network_id,
+ user=None, session=None, **kwargs
+):
+ """Get host network."""
+ return _get_host_network(
+ host_id, host_network_id, session=session
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_HOST_NETWORKS
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def get_hostnetwork(host_network_id, user=None, session=None, **kwargs):
+ """Get host network."""
+ return _get_hostnetwork(host_network_id, session=session)
+
+
+@utils.supported_filters(
+ ADDED_NETWORK_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_NETWORK_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ ip=utils.check_ip
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def _add_host_network(
+ host_id, exception_when_existing=True,
+ session=None, user=None, interface=None, ip=None, **kwargs
+):
+ """Add hostnetwork to a host."""
+ host = _get_host(host_id, session=session)
+ check_host_editable(host, user=user)
+ user_id = user.id
+ return utils.add_db_object(
+ session, models.HostNetwork,
+ exception_when_existing,
+ host.id, interface, user_id, ip=ip, **kwargs
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_NETWORK
+)
+def add_host_network(
+ host_id, exception_when_existing=True,
+ interface=None, user=None, session=None, **kwargs
+):
+ """Create a hostnetwork to a host."""
+ return _add_host_network(
+ host_id,
+ exception_when_existing,
+ interface=interface, session=session, user=user, **kwargs
+ )
+
+
+def _get_hostnetwork_by_ip(
+ ip, session=None, **kwargs
+):
+ ip_int = long(netaddr.IPAddress(ip))
+ return utils.get_db_object(
+ session, models.HostNetwork,
+ ip_int=ip_int, **kwargs
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_NETWORK
+)
+def add_host_networks(
+ exception_when_existing=False,
+ data=[], user=None, session=None
+):
+ """Create host networks."""
+ hosts = []
+ failed_hosts = []
+ for host_data in data:
+ host_id = host_data['host_id']
+ host = _get_host(host_id, session=session)
+ networks = host_data['networks']
+ host_networks = []
+ failed_host_networks = []
+ for network in networks:
+ host_network = _get_hostnetwork_by_ip(
+ network['ip'], session=session,
+ exception_when_missing=False
+ )
+ if (
+ host_network and not (
+ host_network.host_id == host.id and
+ host_network.interface == network['interface']
+ )
+ ):
+ logging.error('ip %s exists in host network %s' % (
+ network['ip'], host_network.id
+ ))
+ failed_host_networks.append(network)
+ else:
+ host_networks.append(_add_host_network(
+ host.id, exception_when_existing,
+ session=session, user=user, **network
+ ))
+ if host_networks:
+ hosts.append({'host_id': host.id, 'networks': host_networks})
+ if failed_host_networks:
+ failed_hosts.append({
+ 'host_id': host.id, 'networks': failed_host_networks
+ })
+ return {
+ 'hosts': hosts,
+ 'failed_hosts': failed_hosts
+ }
+
+
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def _update_host_network(
+ host_network, session=None, user=None, **kwargs
+):
+ """Update host network."""
+ check_host_editable(host_network.host, user=user)
+ return utils.update_db_object(session, host_network, **kwargs)
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_NETWORK_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ ip=utils.check_ip
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_NETWORK
+)
+def update_host_network(
+ host_id, host_network_id, user=None, session=None, **kwargs
+):
+ """Update a host network by host id and host network id."""
+ host = _get_host(
+ host_id, session=session
+ )
+ if host.state.state == "SUCCESSFUL" and not host.reinstall_os:
+ logging.info("ignoring updating request for successful hosts")
+ return {}
+
+ host_network = _get_host_network(
+ host_id, host_network_id, session=session
+ )
+ return _update_host_network(
+ host_network, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_NETWORK_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ ip=utils.check_ip
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_HOST_NETWORK
+)
+def update_hostnetwork(host_network_id, user=None, session=None, **kwargs):
+ """Update a host network by host network id."""
+ host_network = _get_hostnetwork(
+ host_network_id, session=session
+ )
+ return _update_host_network(
+ host_network, session=session, user=user, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_HOST_NETWORK
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def del_host_network(
+ host_id, host_network_id, user=None,
+ session=None, **kwargs
+):
+ """Delete a host network by host id and host network id."""
+ host_network = _get_host_network(
+ host_id, host_network_id, session=session
+ )
+ check_host_editable(host_network.host, user=user)
+ return utils.del_db_object(session, host_network)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_HOST_NETWORK
+)
+@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
+def del_hostnetwork(host_network_id, user=None, session=None, **kwargs):
+ """Delete a host network by host network id."""
+ host_network = _get_hostnetwork(
+ host_network_id, session=session
+ )
+ check_host_editable(host_network.host, user=user)
+ return utils.del_db_object(session, host_network)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_GET_HOST_STATE
+)
+@utils.wrap_to_dict(RESP_STATE_FIELDS)
+def get_host_state(host_id, user=None, session=None, **kwargs):
+ """Get host state info."""
+ return _get_host(host_id, session=session).state
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_STATE_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_HOST_STATE
+)
+@utils.wrap_to_dict(RESP_STATE_FIELDS)
+def update_host_state(host_id, user=None, session=None, **kwargs):
+ """Update a host state."""
+ host = _get_host(host_id, session=session)
+ utils.update_db_object(session, host.state, **kwargs)
+ return host.state
+
+
+@util.deprecated
+@utils.supported_filters(
+ optional_support_keys=UPDATED_STATE_INTERNAL_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_HOST_STATE
+)
+@utils.wrap_to_dict(['status', 'host'])
+def update_host_state_internal(
+ host_id, from_database_only=False,
+ user=None, session=None, **kwargs
+):
+ """Update a host state.
+
+ This function is called when host os is installed.
+ If from_database_only, the state is updated in database.
+ Otherwise a celery task sent to os installer and package installer
+ to do some future actions.
+ """
+ # TODO(xicheng): should be merged into update_host_state
+ host = _get_host(host_id, session=session)
+ logging.info("======host state: %s", host.state)
+ if 'ready' in kwargs and kwargs['ready'] and not host.state.ready:
+ ready_triggered = True
+ else:
+ ready_triggered = False
+ clusterhosts_ready = {}
+ clusters_os_ready = {}
+ if ready_triggered:
+ for clusterhost in host.clusterhosts:
+ cluster = clusterhost.cluster
+ if cluster.flavor_name:
+ clusterhosts_ready[cluster.id] = False
+ else:
+ clusterhosts_ready[cluster.id] = True
+ all_os_ready = True
+ for clusterhost_in_cluster in cluster.clusterhosts:
+ host_in_cluster = clusterhost_in_cluster.host
+ if host_in_cluster.id == host.id:
+ continue
+ if not host_in_cluster.state.ready:
+ all_os_ready = False
+ clusters_os_ready[cluster.id] = all_os_ready
+ logging.debug('host %s ready: %s', host_id, ready_triggered)
+ logging.debug("clusterhosts_ready is: %s", clusterhosts_ready)
+ logging.debug("clusters_os_ready is %s", clusters_os_ready)
+
+ if not ready_triggered or from_database_only:
+ logging.debug('%s state is set to %s', host.name, kwargs)
+ utils.update_db_object(session, host.state, **kwargs)
+ if not host.state.ready:
+ for clusterhost in host.clusterhosts:
+ utils.update_db_object(
+ session, clusterhost.state, ready=False
+ )
+ utils.update_db_object(
+ session, clusterhost.cluster.state, ready=False
+ )
+ status = '%s state is updated' % host.name
+ else:
+ if not user:
+ user_id = host.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ from compass.tasks import client as celery_client
+ celery_client.celery.send_task(
+ 'compass.tasks.os_installed',
+ (
+ host.id, clusterhosts_ready,
+ clusters_os_ready
+ ),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ status = '%s: clusterhosts ready %s clusters os ready %s' % (
+ host.name, clusterhosts_ready, clusters_os_ready
+ )
+ logging.info('action status: %s', status)
+ return {
+ 'status': status,
+ 'host': host.state
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_LOG_FIELDS)
+def get_host_log_histories(host_id, user=None, session=None, **kwargs):
+ """Get host log history."""
+ host = _get_host(host_id, session=session)
+ return utils.list_db_objects(
+ session, models.HostLogHistory, id=host.id, **kwargs
+ )
+
+
+def _get_host_log_history(host_id, filename, session=None, **kwargs):
+ host = _get_host(host_id, session=session)
+ return utils.get_db_object(
+ session, models.HostLogHistory, id=host.id,
+ filename=filename, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_LOG_FIELDS)
+def get_host_log_history(host_id, filename, user=None, session=None, **kwargs):
+ """Get host log history."""
+ return _get_host_log_history(
+ host_id, filename, session=session
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_LOG_FIELDS)
+def update_host_log_history(
+ host_id, filename, user=None,
+ session=None, **kwargs
+):
+ """Update a host log history."""
+ host_log_history = _get_host_log_history(
+ host_id, filename, session=session
+ )
+ return utils.update_db_object(session, host_log_history, **kwargs)
+
+
+@utils.supported_filters(
+ ADDED_LOG_FIELDS,
+ optional_support_keys=UPDATED_LOG_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_LOG_FIELDS)
+def add_host_log_history(
+ host_id, exception_when_existing=False,
+ filename=None, user=None, session=None, **kwargs
+):
+ """add a host log history."""
+ host = _get_host(host_id, session=session)
+ return utils.add_db_object(
+ session, models.HostLogHistory, exception_when_existing,
+ host.id, filename, **kwargs
+ )
+
+
+@utils.supported_filters(optional_support_keys=['poweron'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ host=RESP_CONFIG_FIELDS
+)
+def poweron_host(
+ host_id, poweron={}, user=None, session=None, **kwargs
+):
+ """power on host."""
+ from compass.tasks import client as celery_client
+ host = _get_host(host_id, session=session)
+ check_host_validated(host)
+ if not user:
+ user_id = host.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.poweron_host',
+ (host.id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'poweron %s action sent' % host.name,
+ 'host': host
+ }
+
+
+@utils.supported_filters(optional_support_keys=['poweroff'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ host=RESP_CONFIG_FIELDS
+)
+def poweroff_host(
+ host_id, poweroff={}, user=None, session=None, **kwargs
+):
+ """power off host."""
+ from compass.tasks import client as celery_client
+ host = _get_host(host_id, session=session)
+ check_host_validated(host)
+ if not user:
+ user_id = host.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.poweroff_host',
+ (host.id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'poweroff %s action sent' % host.name,
+ 'host': host
+ }
+
+
+@utils.supported_filters(optional_support_keys=['reset'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ host=RESP_CONFIG_FIELDS
+)
+def reset_host(
+ host_id, reset={}, user=None, session=None, **kwargs
+):
+ """reset host."""
+ from compass.tasks import client as celery_client
+ host = _get_host(host_id, session=session)
+ check_host_validated(host)
+ if not user:
+ user_id = host.creator_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.reset_host',
+ (host.id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'reset %s action sent' % host.name,
+ 'host': host
+ }
diff --git a/compass-tasks/db/api/machine.py b/compass-tasks/db/api/machine.py
new file mode 100644
index 0000000..b7b16b2
--- /dev/null
+++ b/compass-tasks/db/api/machine.py
@@ -0,0 +1,317 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Switch database operations."""
+import logging
+import re
+
+from compass.db.api import database
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+MACHINE_PRIMARY_FILEDS = ['mac', 'owner_id']
+SUPPORTED_FIELDS = [
+ 'mac', 'tag', 'location',
+ 'machine_attributes', 'owner_id']
+IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
+UPDATED_FIELDS = [
+ 'ipmi_credentials', 'machine_attributes',
+ 'tag', 'location']
+PATCHED_FIELDS = [
+ 'patched_ipmi_credentials', 'patched_tag',
+ 'patched_location'
+]
+RESP_FIELDS = [
+ 'id', 'mac', 'ipmi_credentials', 'switches', 'switch_ip',
+ 'port', 'vlans', 'machine_attributes', 'owner_id',
+ 'tag', 'location', 'created_at', 'updated_at'
+]
+RESP_DEPLOY_FIELDS = [
+ 'status', 'machine'
+]
+
+
+def _get_machine(machine_id, session=None, **kwargs):
+ """Get machine by id."""
+ if isinstance(machine_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Machine,
+ id=machine_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'machine id %s type is not int compatible' % machine_id
+ )
+
+
+@utils.supported_filters(
+ MACHINE_PRIMARY_FILEDS,
+ optional_support_keys=SUPPORTED_FIELDS
+)
+@utils.input_validates(mac=utils.check_mac)
+def _add_machine(mac, owner_id=None, session=None, **kwargs):
+ """Add a machine."""
+ if isinstance(owner_id, (int, long)):
+ return utils.add_db_object(
+ session, models.Machine,
+ True,
+ mac,
+ owner_id=owner_id,
+ **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'owner id %s type is not int compatible' % owner_id
+ )
+
+
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_machine(
+ mac, owner_id=None, session=None, user=None, **kwargs
+):
+ """Add a machine."""
+ return _add_machine(
+ mac,
+ owner_id=owner_id,
+ session=session, **kwargs
+ )
+
+
+def get_machine_internal(machine_id, session=None, **kwargs):
+ """Helper function to other files under db/api."""
+ return _get_machine(machine_id, session=session, **kwargs)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_MACHINES
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_machine(
+ machine_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get a machine."""
+ return _get_machine(
+ machine_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_MACHINES
+)
+@utils.output_filters(
+ tag=utils.general_filter_callback,
+ location=utils.general_filter_callback
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_machines(user=None, session=None, **filters):
+ """List machines."""
+ machines = utils.list_db_objects(
+ session, models.Machine, **filters
+ )
+ if not user.is_admin and len(machines):
+ machines = [m for m in machines if m.owner_id == user.id]
+ return machines
+
+
+@utils.wrap_to_dict(RESP_FIELDS)
+def _update_machine(machine_id, session=None, **kwargs):
+ """Update a machine."""
+ machine = _get_machine(machine_id, session=session)
+ return utils.update_db_object(session, machine, **kwargs)
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(ipmi_credentials=utils.check_ipmi_credentials)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_MACHINE
+)
+def update_machine(machine_id, user=None, session=None, **kwargs):
+ """Update a machine."""
+ return _update_machine(
+ machine_id, session=session, **kwargs
+ )
+
+
+# replace [ipmi_credentials, tag, location] to
+# [patched_ipmi_credentials, patched_tag, patched_location]
+# in kwargs. It tells db these fields will be patched.
+@utils.replace_filters(
+ ipmi_credentials='patched_ipmi_credentials',
+ tag='patched_tag',
+ location='patched_location'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@utils.output_validates(ipmi_credentials=utils.check_ipmi_credentials)
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_MACHINE
+)
+def patch_machine(machine_id, user=None, session=None, **kwargs):
+ """Patch a machine."""
+ return _update_machine(
+ machine_id, session=session, **kwargs
+ )
+
+
+def _check_machine_deletable(machine):
+ """Check a machine deletable."""
+ if machine.host:
+ host = machine.host
+ raise exception.NotAcceptable(
+ 'machine %s has host %s on it' % (
+ machine.mac, host.name
+ )
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_MACHINE
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_machine(machine_id, user=None, session=None, **kwargs):
+ """Delete a machine."""
+ machine = _get_machine(machine_id, session=session)
+ _check_machine_deletable(machine)
+ return utils.del_db_object(session, machine)
+
+
+@utils.supported_filters(optional_support_keys=['poweron'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ machine=RESP_FIELDS
+)
+def poweron_machine(
+ machine_id, poweron={}, user=None, session=None, **kwargs
+):
+ """power on machine."""
+ from compass.tasks import client as celery_client
+ machine = _get_machine(
+ machine_id, session=session
+ )
+ if not user:
+ user_id = machine.owner_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.poweron_machine',
+ (machine_id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'poweron %s action sent' % machine.mac,
+ 'machine': machine
+ }
+
+
+@utils.supported_filters(optional_support_keys=['poweroff'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ machine=RESP_FIELDS
+)
+def poweroff_machine(
+ machine_id, poweroff={}, user=None, session=None, **kwargs
+):
+ """power off machine."""
+ from compass.tasks import client as celery_client
+ machine = _get_machine(
+ machine_id, session=session
+ )
+ if not user:
+ user_id = machine.owner_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.poweroff_machine',
+ (machine_id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'poweroff %s action sent' % machine.mac,
+ 'machine': machine
+ }
+
+
+@utils.supported_filters(optional_support_keys=['reset'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEPLOY_HOST
+)
+@utils.wrap_to_dict(
+ RESP_DEPLOY_FIELDS,
+ machine=RESP_FIELDS
+)
+def reset_machine(
+ machine_id, reset={}, user=None, session=None, **kwargs
+):
+ """reset machine."""
+ from compass.tasks import client as celery_client
+ machine = _get_machine(
+ machine_id, session=session
+ )
+ if not user:
+ user_id = machine.owner_id
+ user_dict = user_api.get_user(user_id, session=session)
+ user_email = user_dict['email']
+ else:
+ user_email = user.email
+ celery_client.celery.send_task(
+ 'compass.tasks.reset_machine',
+ (machine_id,),
+ queue=user_email,
+ exchange=user_email,
+ routing_key=user_email
+ )
+ return {
+ 'status': 'reset %s action sent' % machine.mac,
+ 'machine': machine
+ }
diff --git a/compass-tasks/db/api/metadata.py b/compass-tasks/db/api/metadata.py
new file mode 100644
index 0000000..16310c8
--- /dev/null
+++ b/compass-tasks/db/api/metadata.py
@@ -0,0 +1,517 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Metadata related database operations."""
+import copy
+import logging
+import string
+
+from compass.db.api import adapter as adapter_api
+from compass.db.api import database
+from compass.db.api import utils
+from compass.db import callback as metadata_callback
+from compass.db import exception
+from compass.db import models
+from compass.db import validator as metadata_validator
+
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+OS_FIELDS = None
+PACKAGE_FIELDS = None
+FLAVOR_FIELDS = None
+OSES_METADATA = None
+PACKAGES_METADATA = None
+FLAVORS_METADATA = None
+OSES_METADATA_UI_CONVERTERS = None
+FLAVORS_METADATA_UI_CONVERTERS = None
+
+
+def _get_field_from_configuration(configs):
+ """Get fields from configurations."""
+ fields = {}
+ for config in configs:
+ if not isinstance(config, dict):
+ raise exception.InvalidParameter(
+ 'config %s is not dict' % config
+ )
+ field_name = config['NAME']
+ fields[field_name] = {
+ 'name': field_name,
+ 'id': field_name,
+ 'field_type': config.get('FIELD_TYPE', basestring),
+ 'display_type': config.get('DISPLAY_TYPE', 'text'),
+ 'validator': config.get('VALIDATOR', None),
+ 'js_validator': config.get('JS_VALIDATOR', None),
+ 'description': config.get('DESCRIPTION', field_name)
+ }
+ return fields
+
+
+def _get_os_fields_from_configuration():
+ """Get os fields from os field config dir."""
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.OS_FIELD_DIR,
+ env_locals=env_locals
+ )
+ return _get_field_from_configuration(
+ configs
+ )
+
+
+def _get_package_fields_from_configuration():
+ """Get package fields from package field config dir."""
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.PACKAGE_FIELD_DIR,
+ env_locals=env_locals
+ )
+ return _get_field_from_configuration(
+ configs
+ )
+
+
+def _get_flavor_fields_from_configuration():
+ """Get flavor fields from flavor field config dir."""
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.FLAVOR_FIELD_DIR,
+ env_locals=env_locals
+ )
+ return _get_field_from_configuration(
+ configs
+ )
+
+
+def _get_metadata_from_configuration(
+ path, name, config,
+ fields, **kwargs
+):
+ """Recursively get metadata from configuration.
+
+ Args:
+ path: used to indicate the path to the root element.
+ mainly for trouble shooting.
+ name: the key of the metadata section.
+ config: the value of the metadata section.
+ fields: all fields defined in os fields or package fields dir.
+ """
+ if not isinstance(config, dict):
+ raise exception.InvalidParameter(
+ '%s config %s is not dict' % (path, config)
+ )
+ metadata_self = config.get('_self', {})
+ if 'field' in metadata_self:
+ field_name = metadata_self['field']
+ field = fields[field_name]
+ else:
+ field = {}
+ # mapping to may contain $ like $partition. Here we replace the
+ # $partition to the key of the correspendent config. The backend then
+ # can use this kind of feature to support multi partitions when we
+ # only declare the partition metadata in one place.
+ mapping_to_template = metadata_self.get('mapping_to', None)
+ if mapping_to_template:
+ mapping_to = string.Template(
+ mapping_to_template
+ ).safe_substitute(
+ **kwargs
+ )
+ else:
+ mapping_to = None
+ self_metadata = {
+ 'name': name,
+ 'display_name': metadata_self.get('display_name', name),
+ 'field_type': field.get('field_type', dict),
+ 'display_type': field.get('display_type', None),
+ 'description': metadata_self.get(
+ 'description', field.get('description', None)
+ ),
+ 'is_required': metadata_self.get('is_required', False),
+ 'required_in_whole_config': metadata_self.get(
+ 'required_in_whole_config', False),
+ 'mapping_to': mapping_to,
+ 'validator': metadata_self.get(
+ 'validator', field.get('validator', None)
+ ),
+ 'js_validator': metadata_self.get(
+ 'js_validator', field.get('js_validator', None)
+ ),
+ 'default_value': metadata_self.get('default_value', None),
+ 'default_callback': metadata_self.get('default_callback', None),
+ 'default_callback_params': metadata_self.get(
+ 'default_callback_params', {}),
+ 'options': metadata_self.get('options', None),
+ 'options_callback': metadata_self.get('options_callback', None),
+ 'options_callback_params': metadata_self.get(
+ 'options_callback_params', {}),
+ 'autofill_callback': metadata_self.get(
+ 'autofill_callback', None),
+ 'autofill_callback_params': metadata_self.get(
+ 'autofill_callback_params', {}),
+ 'required_in_options': metadata_self.get(
+ 'required_in_options', False)
+ }
+ self_metadata.update(kwargs)
+ metadata = {'_self': self_metadata}
+ # Key extension used to do two things:
+ # one is to return the extended metadata that $<something>
+ # will be replace to possible extensions.
+ # The other is to record the $<something> to extended value
+ # and used in future mapping_to subsititution.
+ # TODO(grace): select proper name instead of key_extensions if
+ # you think it is better.
+ # Suppose key_extension is {'$partition': ['/var', '/']} for $partition
+ # the metadata for $partition will be mapped to {
+ # '/var': ..., '/': ...} and kwargs={'partition': '/var'} and
+ # kwargs={'partition': '/'} will be parsed to recursive metadata parsing
+ # for sub metadata under '/var' and '/'. Then in the metadata parsing
+ # for the sub metadata, this kwargs will be used to substitute mapping_to.
+ key_extensions = metadata_self.get('key_extensions', {})
+ general_keys = []
+ for key, value in config.items():
+ if key.startswith('_'):
+ continue
+ if key in key_extensions:
+ if not key.startswith('$'):
+ raise exception.InvalidParameter(
+ '%s subkey %s should start with $' % (
+ path, key
+ )
+ )
+ extended_keys = key_extensions[key]
+ for extended_key in extended_keys:
+ if extended_key.startswith('$'):
+ raise exception.InvalidParameter(
+ '%s extended key %s should not start with $' % (
+ path, extended_key
+ )
+ )
+ sub_kwargs = dict(kwargs)
+ sub_kwargs[key[1:]] = extended_key
+ metadata[extended_key] = _get_metadata_from_configuration(
+ '%s/%s' % (path, extended_key), extended_key, value,
+ fields, **sub_kwargs
+ )
+ else:
+ if key.startswith('$'):
+ general_keys.append(key)
+ metadata[key] = _get_metadata_from_configuration(
+ '%s/%s' % (path, key), key, value,
+ fields, **kwargs
+ )
+ if len(general_keys) > 1:
+ raise exception.InvalidParameter(
+ 'foud multi general keys in %s: %s' % (
+ path, general_keys
+ )
+ )
+ return metadata
+
+
+def _get_oses_metadata_from_configuration():
+ """Get os metadata from os metadata config dir."""
+ oses_metadata = {}
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.OS_METADATA_DIR,
+ env_locals=env_locals
+ )
+ for config in configs:
+ os_name = config['OS']
+ os_metadata = oses_metadata.setdefault(os_name, {})
+ for key, value in config['METADATA'].items():
+ os_metadata[key] = _get_metadata_from_configuration(
+ key, key, value, OS_FIELDS
+ )
+
+ oses = adapter_api.OSES
+ parents = {}
+ for os_name, os in oses.items():
+ parent = os.get('parent', None)
+ parents[os_name] = parent
+ for os_name, os in oses.items():
+ oses_metadata[os_name] = util.recursive_merge_dict(
+ os_name, oses_metadata, parents
+ )
+ return oses_metadata
+
+
+def _get_packages_metadata_from_configuration():
+ """Get package metadata from package metadata config dir."""
+ packages_metadata = {}
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.PACKAGE_METADATA_DIR,
+ env_locals=env_locals
+ )
+ for config in configs:
+ adapter_name = config['ADAPTER']
+ package_metadata = packages_metadata.setdefault(adapter_name, {})
+ for key, value in config['METADATA'].items():
+ package_metadata[key] = _get_metadata_from_configuration(
+ key, key, value, PACKAGE_FIELDS
+ )
+ adapters = adapter_api.ADAPTERS
+ parents = {}
+ for adapter_name, adapter in adapters.items():
+ parent = adapter.get('parent', None)
+ parents[adapter_name] = parent
+ for adapter_name, adapter in adapters.items():
+ packages_metadata[adapter_name] = util.recursive_merge_dict(
+ adapter_name, packages_metadata, parents
+ )
+ return packages_metadata
+
+
+def _get_flavors_metadata_from_configuration():
+ """Get flavor metadata from flavor metadata config dir."""
+ flavors_metadata = {}
+ env_locals = {}
+ env_locals.update(metadata_validator.VALIDATOR_LOCALS)
+ env_locals.update(metadata_callback.CALLBACK_LOCALS)
+ configs = util.load_configs(
+ setting.FLAVOR_METADATA_DIR,
+ env_locals=env_locals
+ )
+ for config in configs:
+ adapter_name = config['ADAPTER']
+ flavor_name = config['FLAVOR']
+ flavor_metadata = flavors_metadata.setdefault(
+ adapter_name, {}
+ ).setdefault(flavor_name, {})
+ for key, value in config['METADATA'].items():
+ flavor_metadata[key] = _get_metadata_from_configuration(
+ key, key, value, FLAVOR_FIELDS
+ )
+
+ packages_metadata = PACKAGES_METADATA
+ adapters_flavors = adapter_api.ADAPTERS_FLAVORS
+ for adapter_name, adapter_flavors in adapters_flavors.items():
+ package_metadata = packages_metadata.get(adapter_name, {})
+ for flavor_name, flavor in adapter_flavors.items():
+ flavor_metadata = flavors_metadata.setdefault(
+ adapter_name, {}
+ ).setdefault(flavor_name, {})
+ util.merge_dict(flavor_metadata, package_metadata, override=False)
+ return flavors_metadata
+
+
+def _filter_metadata(metadata, **kwargs):
+ if not isinstance(metadata, dict):
+ return metadata
+ filtered_metadata = {}
+ for key, value in metadata.items():
+ if key == '_self':
+ default_value = value.get('default_value', None)
+ if default_value is None:
+ default_callback_params = value.get(
+ 'default_callback_params', {}
+ )
+ callback_params = dict(kwargs)
+ if default_callback_params:
+ callback_params.update(default_callback_params)
+ default_callback = value.get('default_callback', None)
+ if default_callback:
+ default_value = default_callback(key, **callback_params)
+ options = value.get('options', None)
+ if options is None:
+ options_callback_params = value.get(
+ 'options_callback_params', {}
+ )
+ callback_params = dict(kwargs)
+ if options_callback_params:
+ callback_params.update(options_callback_params)
+
+ options_callback = value.get('options_callback', None)
+ if options_callback:
+ options = options_callback(key, **callback_params)
+ filtered_metadata[key] = value
+ if default_value is not None:
+ filtered_metadata[key]['default_value'] = default_value
+ if options is not None:
+ filtered_metadata[key]['options'] = options
+ else:
+ filtered_metadata[key] = _filter_metadata(value, **kwargs)
+ return filtered_metadata
+
+
+def _load_metadata(force_reload=False):
+ """Load metadata information into memory.
+
+ If force_reload, the metadata information will be reloaded
+ even if the metadata is already loaded.
+ """
+ adapter_api.load_adapters_internal(force_reload=force_reload)
+ global OS_FIELDS
+ if force_reload or OS_FIELDS is None:
+ OS_FIELDS = _get_os_fields_from_configuration()
+ global PACKAGE_FIELDS
+ if force_reload or PACKAGE_FIELDS is None:
+ PACKAGE_FIELDS = _get_package_fields_from_configuration()
+ global FLAVOR_FIELDS
+ if force_reload or FLAVOR_FIELDS is None:
+ FLAVOR_FIELDS = _get_flavor_fields_from_configuration()
+ global OSES_METADATA
+ if force_reload or OSES_METADATA is None:
+ OSES_METADATA = _get_oses_metadata_from_configuration()
+ global PACKAGES_METADATA
+ if force_reload or PACKAGES_METADATA is None:
+ PACKAGES_METADATA = _get_packages_metadata_from_configuration()
+ global FLAVORS_METADATA
+ if force_reload or FLAVORS_METADATA is None:
+ FLAVORS_METADATA = _get_flavors_metadata_from_configuration()
+ global OSES_METADATA_UI_CONVERTERS
+ if force_reload or OSES_METADATA_UI_CONVERTERS is None:
+ OSES_METADATA_UI_CONVERTERS = (
+ _get_oses_metadata_ui_converters_from_configuration()
+ )
+ global FLAVORS_METADATA_UI_CONVERTERS
+ if force_reload or FLAVORS_METADATA_UI_CONVERTERS is None:
+ FLAVORS_METADATA_UI_CONVERTERS = (
+ _get_flavors_metadata_ui_converters_from_configuration()
+ )
+
+
+def _get_oses_metadata_ui_converters_from_configuration():
+ """Get os metadata ui converters from os metadata mapping config dir.
+
+ os metadata ui converter is used to convert os metadata to
+ the format UI can understand and show.
+ """
+ oses_metadata_ui_converters = {}
+ configs = util.load_configs(setting.OS_MAPPING_DIR)
+ for config in configs:
+ os_name = config['OS']
+ oses_metadata_ui_converters[os_name] = config.get('CONFIG_MAPPING', {})
+
+ oses = adapter_api.OSES
+ parents = {}
+ for os_name, os in oses.items():
+ parent = os.get('parent', None)
+ parents[os_name] = parent
+ for os_name, os in oses.items():
+ oses_metadata_ui_converters[os_name] = util.recursive_merge_dict(
+ os_name, oses_metadata_ui_converters, parents
+ )
+ return oses_metadata_ui_converters
+
+
+def _get_flavors_metadata_ui_converters_from_configuration():
+ """Get flavor metadata ui converters from flavor mapping config dir."""
+ flavors_metadata_ui_converters = {}
+ configs = util.load_configs(setting.FLAVOR_MAPPING_DIR)
+ for config in configs:
+ adapter_name = config['ADAPTER']
+ flavor_name = config['FLAVOR']
+ flavors_metadata_ui_converters.setdefault(
+ adapter_name, {}
+ )[flavor_name] = config.get('CONFIG_MAPPING', {})
+ adapters = adapter_api.ADAPTERS
+ parents = {}
+ for adapter_name, adapter in adapters.items():
+ parent = adapter.get('parent', None)
+ parents[adapter_name] = parent
+ for adapter_name, adapter in adapters.items():
+ flavors_metadata_ui_converters[adapter_name] = (
+ util.recursive_merge_dict(
+ adapter_name, flavors_metadata_ui_converters, parents
+ )
+ )
+ return flavors_metadata_ui_converters
+
+
+def get_packages_metadata_internal(force_reload=False):
+ """Get deployable package metadata."""
+ _load_metadata(force_reload=force_reload)
+ metadata_mapping = {}
+ adapters = adapter_api.ADAPTERS
+ for adapter_name, adapter in adapters.items():
+ if adapter.get('deployable'):
+ metadata_mapping[adapter_name] = _filter_metadata(
+ PACKAGES_METADATA.get(adapter_name, {})
+ )
+ else:
+ logging.info(
+ 'ignore metadata since its adapter %s is not deployable',
+ adapter_name
+ )
+ return metadata_mapping
+
+
+def get_flavors_metadata_internal(force_reload=False):
+ """Get deployable flavor metadata."""
+ _load_metadata(force_reload=force_reload)
+ metadata_mapping = {}
+ adapters_flavors = adapter_api.ADAPTERS_FLAVORS
+ for adapter_name, adapter_flavors in adapters_flavors.items():
+ adapter = adapter_api.ADAPTERS[adapter_name]
+ if not adapter.get('deployable'):
+ logging.info(
+ 'ignore metadata since its adapter %s is not deployable',
+ adapter_name
+ )
+ continue
+ for flavor_name, flavor in adapter_flavors.items():
+ flavor_metadata = FLAVORS_METADATA.get(
+ adapter_name, {}
+ ).get(flavor_name, {})
+ metadata = _filter_metadata(flavor_metadata)
+ metadata_mapping.setdefault(
+ adapter_name, {}
+ )[flavor_name] = metadata
+ return metadata_mapping
+
+
+def get_flavors_metadata_ui_converters_internal(force_reload=False):
+ """Get usable flavor metadata ui converters."""
+ _load_metadata(force_reload=force_reload)
+ return FLAVORS_METADATA_UI_CONVERTERS
+
+
+def get_oses_metadata_internal(force_reload=False):
+ """Get deployable os metadata."""
+ _load_metadata(force_reload=force_reload)
+ metadata_mapping = {}
+ oses = adapter_api.OSES
+ for os_name, os in oses.items():
+ if os.get('deployable'):
+ metadata_mapping[os_name] = _filter_metadata(
+ OSES_METADATA.get(os_name, {})
+ )
+ else:
+ logging.info(
+ 'ignore metadata since its os %s is not deployable',
+ os_name
+ )
+ return metadata_mapping
+
+
+def get_oses_metadata_ui_converters_internal(force_reload=False):
+ """Get usable os metadata ui converters."""
+ _load_metadata(force_reload=force_reload)
+ return OSES_METADATA_UI_CONVERTERS
diff --git a/compass-tasks/db/api/metadata_holder.py b/compass-tasks/db/api/metadata_holder.py
new file mode 100644
index 0000000..24afc67
--- /dev/null
+++ b/compass-tasks/db/api/metadata_holder.py
@@ -0,0 +1,731 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Metadata related object holder."""
+import logging
+
+from compass.db.api import adapter as adapter_api
+from compass.db.api import adapter_holder as adapter_holder_api
+from compass.db.api import database
+from compass.db.api import metadata as metadata_api
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+RESP_METADATA_FIELDS = [
+ 'os_config', 'package_config'
+]
+RESP_UI_METADATA_FIELDS = [
+ 'os_global_config', 'flavor_config'
+]
+
+
+def load_metadatas(force_reload=False):
+ """Load metadatas."""
+ # TODO(xicheng): today we load metadata in memory as it original
+ # format in files in metadata.py. We get these inmemory metadata
+ # and do some translation, store the translated metadata into memory
+ # too in metadata_holder.py. api can only access the global inmemory
+ # data in metadata_holder.py.
+ _load_os_metadatas(force_reload=force_reload)
+ _load_package_metadatas(force_reload=force_reload)
+ _load_flavor_metadatas(force_reload=force_reload)
+ _load_os_metadata_ui_converters(force_reload=force_reload)
+ _load_flavor_metadata_ui_converters(force_reload=force_reload)
+
+
+def _load_os_metadata_ui_converters(force_reload=False):
+ global OS_METADATA_UI_CONVERTERS
+ if force_reload or OS_METADATA_UI_CONVERTERS is None:
+ logging.info('load os metadatas ui converters into memory')
+ OS_METADATA_UI_CONVERTERS = (
+ metadata_api.get_oses_metadata_ui_converters_internal(
+ force_reload=force_reload
+ )
+ )
+
+
+def _load_os_metadatas(force_reload=False):
+ """Load os metadata from inmemory db and map it by os_id."""
+ global OS_METADATA_MAPPING
+ if force_reload or OS_METADATA_MAPPING is None:
+ logging.info('load os metadatas into memory')
+ OS_METADATA_MAPPING = metadata_api.get_oses_metadata_internal(
+ force_reload=force_reload
+ )
+
+
+def _load_flavor_metadata_ui_converters(force_reload=False):
+ """Load flavor metadata ui converters from inmemory db.
+
+ The loaded metadata is mapped by flavor id.
+ """
+ global FLAVOR_METADATA_UI_CONVERTERS
+ if force_reload or FLAVOR_METADATA_UI_CONVERTERS is None:
+ logging.info('load flavor metadata ui converters into memory')
+ FLAVOR_METADATA_UI_CONVERTERS = {}
+ adapters_flavors_metadata_ui_converters = (
+ metadata_api.get_flavors_metadata_ui_converters_internal(
+ force_reload=force_reload
+ )
+ )
+ for adapter_name, adapter_flavors_metadata_ui_converters in (
+ adapters_flavors_metadata_ui_converters.items()
+ ):
+ for flavor_name, flavor_metadata_ui_converter in (
+ adapter_flavors_metadata_ui_converters.items()
+ ):
+ FLAVOR_METADATA_UI_CONVERTERS[
+ '%s:%s' % (adapter_name, flavor_name)
+ ] = flavor_metadata_ui_converter
+
+
+@util.deprecated
+def _load_package_metadatas(force_reload=False):
+ """Load deployable package metadata from inmemory db."""
+ global PACKAGE_METADATA_MAPPING
+ if force_reload or PACKAGE_METADATA_MAPPING is None:
+ logging.info('load package metadatas into memory')
+ PACKAGE_METADATA_MAPPING = (
+ metadata_api.get_packages_metadata_internal(
+ force_reload=force_reload
+ )
+ )
+
+
+def _load_flavor_metadatas(force_reload=False):
+ """Load flavor metadata from inmemory db.
+
+ The loaded metadata are mapped by flavor id.
+ """
+ global FLAVOR_METADATA_MAPPING
+ if force_reload or FLAVOR_METADATA_MAPPING is None:
+ logging.info('load flavor metadatas into memory')
+ FLAVOR_METADATA_MAPPING = {}
+ adapters_flavors_metadata = (
+ metadata_api.get_flavors_metadata_internal(
+ force_reload=force_reload
+ )
+ )
+ for adapter_name, adapter_flavors_metadata in (
+ adapters_flavors_metadata.items()
+ ):
+ for flavor_name, flavor_metadata in (
+ adapter_flavors_metadata.items()
+ ):
+ FLAVOR_METADATA_MAPPING[
+ '%s:%s' % (adapter_name, flavor_name)
+ ] = flavor_metadata
+
+
+OS_METADATA_MAPPING = None
+PACKAGE_METADATA_MAPPING = None
+FLAVOR_METADATA_MAPPING = None
+OS_METADATA_UI_CONVERTERS = None
+FLAVOR_METADATA_UI_CONVERTERS = None
+
+
+def validate_os_config(
+ config, os_id, whole_check=False, **kwargs
+):
+ """Validate os config."""
+ load_metadatas()
+ if os_id not in OS_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'os %s is not found in os metadata mapping' % os_id
+ )
+ _validate_config(
+ '', config, OS_METADATA_MAPPING[os_id],
+ whole_check, **kwargs
+ )
+
+
+@util.deprecated
+def validate_package_config(
+ config, adapter_id, whole_check=False, **kwargs
+):
+ """Validate package config."""
+ load_metadatas()
+ if adapter_id not in PACKAGE_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'adapter %s is not found in package metedata mapping' % adapter_id
+ )
+ _validate_config(
+ '', config, PACKAGE_METADATA_MAPPING[adapter_id],
+ whole_check, **kwargs
+ )
+
+
+def validate_flavor_config(
+ config, flavor_id, whole_check=False, **kwargs
+):
+ """Validate flavor config."""
+ load_metadatas()
+ if not flavor_id:
+ logging.info('There is no flavor, skipping flavor validation...')
+ elif flavor_id not in FLAVOR_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'flavor %s is not found in flavor metedata mapping' % flavor_id
+ )
+ else:
+ _validate_config(
+ '', config, FLAVOR_METADATA_MAPPING[flavor_id],
+ whole_check, **kwargs
+ )
+
+
+def _filter_metadata(metadata, **kwargs):
+ """Filter metadata before return it to api.
+
+ Some metadata fields are not json compatible or
+ only used in db/api internally.
+ We should strip these fields out before return to api.
+ """
+ if not isinstance(metadata, dict):
+ return metadata
+ filtered_metadata = {}
+ for key, value in metadata.items():
+ if key == '_self':
+ filtered_metadata[key] = {
+ 'name': value['name'],
+ 'description': value.get('description', None),
+ 'default_value': value.get('default_value', None),
+ 'is_required': value.get('is_required', False),
+ 'required_in_whole_config': value.get(
+ 'required_in_whole_config', False),
+ 'js_validator': value.get('js_validator', None),
+ 'options': value.get('options', None),
+ 'required_in_options': value.get(
+ 'required_in_options', False),
+ 'field_type': value.get(
+ 'field_type_data', 'str'),
+ 'display_type': value.get('display_type', None),
+ 'mapping_to': value.get('mapping_to', None)
+ }
+ else:
+ filtered_metadata[key] = _filter_metadata(value, **kwargs)
+ return filtered_metadata
+
+
+@util.deprecated
+def _get_package_metadata(adapter_id):
+ """get package metadata."""
+ load_metadatas()
+ if adapter_id not in PACKAGE_METADATA_MAPPING:
+ raise exception.RecordNotExists(
+ 'adpater %s does not exist' % adapter_id
+ )
+ return _filter_metadata(
+ PACKAGE_METADATA_MAPPING[adapter_id]
+ )
+
+
+@util.deprecated
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_package_metadata(adapter_id, user=None, session=None, **kwargs):
+ """Get package metadata from adapter."""
+ return {
+ 'package_config': _get_package_metadata(adapter_id)
+ }
+
+
+def _get_flavor_metadata(flavor_id):
+ """get flavor metadata."""
+ load_metadatas()
+ if not flavor_id:
+ logging.info('There is no flavor id, skipping...')
+ elif flavor_id not in FLAVOR_METADATA_MAPPING:
+ raise exception.RecordNotExists(
+ 'flavor %s does not exist' % flavor_id
+ )
+ else:
+ return _filter_metadata(FLAVOR_METADATA_MAPPING[flavor_id])
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_flavor_metadata(flavor_id, user=None, session=None, **kwargs):
+ """Get flavor metadata by flavor."""
+ return {
+ 'package_config': _get_flavor_metadata(flavor_id)
+ }
+
+
+def _get_os_metadata(os_id):
+ """get os metadata."""
+ load_metadatas()
+ if os_id not in OS_METADATA_MAPPING:
+ raise exception.RecordNotExists(
+ 'os %s does not exist' % os_id
+ )
+ return _filter_metadata(OS_METADATA_MAPPING[os_id])
+
+
+def _get_os_metadata_ui_converter(os_id):
+ """get os metadata ui converter."""
+ load_metadatas()
+ if os_id not in OS_METADATA_UI_CONVERTERS:
+ raise exception.RecordNotExists(
+ 'os %s does not exist' % os_id
+ )
+ return OS_METADATA_UI_CONVERTERS[os_id]
+
+
+def _get_flavor_metadata_ui_converter(flavor_id):
+ """get flavor metadata ui converter."""
+ load_metadatas()
+ if flavor_id not in FLAVOR_METADATA_UI_CONVERTERS:
+ raise exception.RecordNotExists(
+ 'flavor %s does not exist' % flavor_id
+ )
+ return FLAVOR_METADATA_UI_CONVERTERS[flavor_id]
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_os_metadata(os_id, user=None, session=None, **kwargs):
+ """get os metadatas."""
+ return {'os_config': _get_os_metadata(os_id)}
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_UI_METADATA_FIELDS)
+def get_os_ui_metadata(os_id, user=None, session=None, **kwargs):
+ """Get os metadata ui converter by os."""
+ metadata = _get_os_metadata(os_id)
+ metadata_ui_converter = _get_os_metadata_ui_converter(os_id)
+ return _get_ui_metadata(metadata, metadata_ui_converter)
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_UI_METADATA_FIELDS)
+def get_flavor_ui_metadata(flavor_id, user=None, session=None, **kwargs):
+ """Get flavor ui metadata by flavor."""
+ metadata = _get_flavor_metadata(flavor_id)
+ metadata_ui_converter = _get_flavor_metadata_ui_converter(flavor_id)
+ return _get_ui_metadata(metadata, metadata_ui_converter)
+
+
+def _get_ui_metadata(metadata, metadata_ui_converter):
+ """convert metadata to ui metadata.
+
+ Args:
+ metadata: metadata we defined in metadata files.
+ metadata_ui_converter: metadata ui converter defined in metadata
+ mapping files. Used to convert orignal
+ metadata to ui understandable metadata.
+
+ Returns:
+ ui understandable metadata.
+ """
+ ui_metadata = {}
+ ui_metadata[metadata_ui_converter['mapped_name']] = []
+ for mapped_child in metadata_ui_converter['mapped_children']:
+ data_dict = {}
+ for ui_key, ui_value in mapped_child.items():
+ for key, value in ui_value.items():
+ if 'data' == key:
+ result_data = []
+ _get_ui_metadata_data(
+ metadata[ui_key], value, result_data
+ )
+ data_dict['data'] = result_data
+ else:
+ data_dict[key] = value
+ ui_metadata[metadata_ui_converter['mapped_name']].append(data_dict)
+ return ui_metadata
+
+
+def _get_ui_metadata_data(metadata, config, result_data):
+ """Get ui metadata data and fill to result."""
+ data_dict = {}
+ for key, config_value in config.items():
+ if isinstance(config_value, dict) and key != 'content_data':
+ if key in metadata.keys():
+ _get_ui_metadata_data(metadata[key], config_value, result_data)
+ else:
+ _get_ui_metadata_data(metadata, config_value, result_data)
+ elif isinstance(config_value, list):
+ option_list = []
+ for item in config_value:
+ if isinstance(item, dict):
+ option_list.append(item)
+ data_dict[key] = option_list
+ else:
+ if isinstance(metadata['_self'][item], bool):
+ data_dict[item] = str(metadata['_self'][item]).lower()
+ else:
+ data_dict[item] = metadata['_self'][item]
+ else:
+ data_dict[key] = config_value
+ if data_dict:
+ result_data.append(data_dict)
+ return result_data
+
+
+@util.deprecated
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_package_os_metadata(
+ adapter_id, os_id,
+ user=None, session=None, **kwargs
+):
+ """Get metadata by adapter and os."""
+ adapter = adapter_holder_api.get_adapter(
+ adapter_id, user=user, session=session
+ )
+ os_ids = [os['id'] for os in adapter['supported_oses']]
+ if os_id not in os_ids:
+ raise exception.InvalidParameter(
+ 'os %s is not in the supported os list of adapter %s' % (
+ os_id, adapter_id
+ )
+ )
+ metadatas = {}
+ metadatas['os_config'] = _get_os_metadata(
+ os_id
+ )
+ metadatas['package_config'] = _get_package_metadata(
+ adapter_id
+ )
+ return metadatas
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_METADATAS
+)
+@utils.wrap_to_dict(RESP_METADATA_FIELDS)
+def get_flavor_os_metadata(
+ flavor_id, os_id,
+ user=None, session=None, **kwargs
+):
+ """Get metadata by flavor and os."""
+ flavor = adapter_holder_api.get_flavor(
+ flavor_id, user=user, session=session
+ )
+ adapter_id = flavor['adapter_id']
+ adapter = adapter_holder_api.get_adapter(
+ adapter_id, user=user, session=session
+ )
+ os_ids = [os['id'] for os in adapter['supported_oses']]
+ if os_id not in os_ids:
+ raise exception.InvalidParameter(
+ 'os %s is not in the supported os list of adapter %s' % (
+ os_id, adapter_id
+ )
+ )
+ metadatas = {}
+ metadatas['os_config'] = _get_os_metadata(
+ session, os_id
+ )
+ metadatas['package_config'] = _get_flavor_metadata(
+ session, flavor_id
+ )
+ return metadatas
+
+
+def _validate_self(
+ config_path, config_key, config,
+ metadata, whole_check,
+ **kwargs
+):
+ """validate config by metadata self section."""
+ logging.debug('validate config self %s', config_path)
+ if '_self' not in metadata:
+ if isinstance(config, dict):
+ _validate_config(
+ config_path, config, metadata, whole_check, **kwargs
+ )
+ return
+ field_type = metadata['_self'].get('field_type', basestring)
+ if not isinstance(config, field_type):
+ raise exception.InvalidParameter(
+ '%s config type is not %s: %s' % (config_path, field_type, config)
+ )
+ is_required = metadata['_self'].get(
+ 'is_required', False
+ )
+ required_in_whole_config = metadata['_self'].get(
+ 'required_in_whole_config', False
+ )
+ if isinstance(config, basestring):
+ if config == '' and not is_required and not required_in_whole_config:
+ # ignore empty config when it is optional
+ return
+ required_in_options = metadata['_self'].get(
+ 'required_in_options', False
+ )
+ options = metadata['_self'].get('options', None)
+ if required_in_options:
+ if field_type in [int, basestring, float, bool]:
+ if options and config not in options:
+ raise exception.InvalidParameter(
+ '%s config is not in %s: %s' % (
+ config_path, options, config
+ )
+ )
+ elif field_type in [list, tuple]:
+ if options and not set(config).issubset(set(options)):
+ raise exception.InvalidParameter(
+ '%s config is not in %s: %s' % (
+ config_path, options, config
+ )
+ )
+ elif field_type == dict:
+ if options and not set(config.keys()).issubset(set(options)):
+ raise exception.InvalidParameter(
+ '%s config is not in %s: %s' % (
+ config_path, options, config
+ )
+ )
+ validator = metadata['_self'].get('validator', None)
+ logging.debug('validate by validator %s', validator)
+ if validator:
+ if not validator(config_key, config, **kwargs):
+ raise exception.InvalidParameter(
+ '%s config is invalid' % config_path
+ )
+ if isinstance(config, dict):
+ _validate_config(
+ config_path, config, metadata, whole_check, **kwargs
+ )
+
+
+def _validate_config(
+ config_path, config, metadata, whole_check,
+ **kwargs
+):
+ """validate config by metadata."""
+ logging.debug('validate config %s', config_path)
+ generals = {}
+ specified = {}
+ for key, value in metadata.items():
+ if key.startswith('$'):
+ generals[key] = value
+ elif key.startswith('_'):
+ pass
+ else:
+ specified[key] = value
+ config_keys = set(config.keys())
+ specified_keys = set(specified.keys())
+ intersect_keys = config_keys & specified_keys
+ not_found_keys = config_keys - specified_keys
+ redundant_keys = specified_keys - config_keys
+ for key in redundant_keys:
+ if '_self' not in specified[key]:
+ continue
+ if specified[key]['_self'].get('is_required', False):
+ raise exception.InvalidParameter(
+ '%s/%s does not find but it is required' % (
+ config_path, key
+ )
+ )
+ if (
+ whole_check and
+ specified[key]['_self'].get(
+ 'required_in_whole_config', False
+ )
+ ):
+ raise exception.InvalidParameter(
+ '%s/%s does not find but it is required in whole config' % (
+ config_path, key
+ )
+ )
+ for key in intersect_keys:
+ _validate_self(
+ '%s/%s' % (config_path, key),
+ key, config[key], specified[key], whole_check,
+ **kwargs
+ )
+ for key in not_found_keys:
+ if not generals:
+ raise exception.InvalidParameter(
+ 'key %s missing in metadata %s' % (
+ key, config_path
+ )
+ )
+ for general_key, general_value in generals.items():
+ _validate_self(
+ '%s/%s' % (config_path, key),
+ key, config[key], general_value, whole_check,
+ **kwargs
+ )
+
+
+def _autofill_self_config(
+ config_path, config_key, config,
+ metadata,
+ **kwargs
+):
+ """Autofill config by metadata self section."""
+ if '_self' not in metadata:
+ if isinstance(config, dict):
+ _autofill_config(
+ config_path, config, metadata, **kwargs
+ )
+ return config
+ logging.debug(
+ 'autofill %s by metadata %s', config_path, metadata['_self']
+ )
+ autofill_callback = metadata['_self'].get(
+ 'autofill_callback', None
+ )
+ autofill_callback_params = metadata['_self'].get(
+ 'autofill_callback_params', {}
+ )
+ callback_params = dict(kwargs)
+ if autofill_callback_params:
+ callback_params.update(autofill_callback_params)
+ default_value = metadata['_self'].get(
+ 'default_value', None
+ )
+ if default_value is not None:
+ callback_params['default_value'] = default_value
+ options = metadata['_self'].get(
+ 'options', None
+ )
+ if options is not None:
+ callback_params['options'] = options
+ if autofill_callback:
+ config = autofill_callback(
+ config_key, config, **callback_params
+ )
+ if config is None:
+ new_config = {}
+ else:
+ new_config = config
+ if isinstance(new_config, dict):
+ _autofill_config(
+ config_path, new_config, metadata, **kwargs
+ )
+ if new_config:
+ config = new_config
+ return config
+
+
+def _autofill_config(
+ config_path, config, metadata, **kwargs
+):
+ """autofill config by metadata."""
+ generals = {}
+ specified = {}
+ for key, value in metadata.items():
+ if key.startswith('$'):
+ generals[key] = value
+ elif key.startswith('_'):
+ pass
+ else:
+ specified[key] = value
+ config_keys = set(config.keys())
+ specified_keys = set(specified.keys())
+ intersect_keys = config_keys & specified_keys
+ not_found_keys = config_keys - specified_keys
+ redundant_keys = specified_keys - config_keys
+ for key in redundant_keys:
+ self_config = _autofill_self_config(
+ '%s/%s' % (config_path, key),
+ key, None, specified[key], **kwargs
+ )
+ if self_config is not None:
+ config[key] = self_config
+ for key in intersect_keys:
+ config[key] = _autofill_self_config(
+ '%s/%s' % (config_path, key),
+ key, config[key], specified[key],
+ **kwargs
+ )
+ for key in not_found_keys:
+ for general_key, general_value in generals.items():
+ config[key] = _autofill_self_config(
+ '%s/%s' % (config_path, key),
+ key, config[key], general_value,
+ **kwargs
+ )
+ return config
+
+
+def autofill_os_config(
+ config, os_id, **kwargs
+):
+ load_metadatas()
+ if os_id not in OS_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'os %s is not found in os metadata mapping' % os_id
+ )
+
+ return _autofill_config(
+ '', config, OS_METADATA_MAPPING[os_id], **kwargs
+ )
+
+
+def autofill_package_config(
+ config, adapter_id, **kwargs
+):
+ load_metadatas()
+ if adapter_id not in PACKAGE_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'adapter %s is not found in package metadata mapping' % adapter_id
+ )
+
+ return _autofill_config(
+ '', config, PACKAGE_METADATA_MAPPING[adapter_id], **kwargs
+ )
+
+
+def autofill_flavor_config(
+ config, flavor_id, **kwargs
+):
+ load_metadatas()
+ if not flavor_id:
+ logging.info('There is no flavor, skipping...')
+ elif flavor_id not in FLAVOR_METADATA_MAPPING:
+ raise exception.InvalidParameter(
+ 'flavor %s is not found in flavor metadata mapping' % flavor_id
+ )
+ else:
+ return _autofill_config(
+ '', config, FLAVOR_METADATA_MAPPING[flavor_id], **kwargs
+ )
diff --git a/compass-tasks/db/api/network.py b/compass-tasks/db/api/network.py
new file mode 100644
index 0000000..e2bf7d3
--- /dev/null
+++ b/compass-tasks/db/api/network.py
@@ -0,0 +1,160 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Network related database operations."""
+import logging
+import netaddr
+import re
+
+from compass.db.api import database
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+
+SUPPORTED_FIELDS = ['subnet', 'name']
+RESP_FIELDS = [
+ 'id', 'name', 'subnet', 'created_at', 'updated_at'
+]
+ADDED_FIELDS = ['subnet']
+OPTIONAL_ADDED_FIELDS = ['name']
+IGNORE_FIELDS = [
+ 'id', 'created_at', 'updated_at'
+]
+UPDATED_FIELDS = ['subnet', 'name']
+
+
+def _check_subnet(subnet):
+ """Check subnet format is correct."""
+ try:
+ netaddr.IPNetwork(subnet)
+ except Exception as error:
+ logging.exception(error)
+ raise exception.InvalidParameter(
+ 'subnet %s format unrecognized' % subnet)
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SUBNETS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_subnets(user=None, session=None, **filters):
+ """List subnets."""
+ return utils.list_db_objects(
+ session, models.Subnet, **filters
+ )
+
+
+def _get_subnet(subnet_id, session=None, **kwargs):
+ """Get subnet by subnet id."""
+ if isinstance(subnet_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Subnet,
+ id=subnet_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'subnet id %s type is not int compatible' % subnet_id
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SUBNETS
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_subnet(
+ subnet_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get subnet info."""
+ return _get_subnet(
+ subnet_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters(
+ ADDED_FIELDS, optional_support_keys=OPTIONAL_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(subnet=_check_subnet)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SUBNET
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_subnet(
+ exception_when_existing=True, subnet=None,
+ user=None, session=None, **kwargs
+):
+ """Create a subnet."""
+ return utils.add_db_object(
+ session, models.Subnet,
+ exception_when_existing, subnet, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(subnet=_check_subnet)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SUBNET
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_subnet(subnet_id, user=None, session=None, **kwargs):
+ """Update a subnet."""
+ subnet = _get_subnet(
+ subnet_id, session=session
+ )
+ return utils.update_db_object(session, subnet, **kwargs)
+
+
+def _check_subnet_deletable(subnet):
+ """Check a subnet deletable."""
+ if subnet.host_networks:
+ host_networks = [
+ '%s:%s=%s' % (
+ host_network.host.name, host_network.interface,
+ host_network.ip
+ )
+ for host_network in subnet.host_networks
+ ]
+ raise exception.NotAcceptable(
+ 'subnet %s contains host networks %s' % (
+ subnet.subnet, host_networks
+ )
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_SUBNET
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_subnet(subnet_id, user=None, session=None, **kwargs):
+ """Delete a subnet."""
+ subnet = _get_subnet(
+ subnet_id, session=session
+ )
+ _check_subnet_deletable(subnet)
+ return utils.del_db_object(session, subnet)
diff --git a/compass-tasks/db/api/permission.py b/compass-tasks/db/api/permission.py
new file mode 100644
index 0000000..f4d777a
--- /dev/null
+++ b/compass-tasks/db/api/permission.py
@@ -0,0 +1,357 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Permission database operations."""
+import re
+
+from compass.db.api import database
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = ['id', 'name', 'alias', 'description']
+RESP_FIELDS = ['id', 'name', 'alias', 'description']
+
+
+class PermissionWrapper(object):
+ def __init__(self, name, alias, description):
+ self.name = name
+ self.alias = alias
+ self.description = description
+
+ def to_dict(self):
+ return {
+ 'name': self.name,
+ 'alias': self.alias,
+ 'description': self.description
+ }
+
+
+PERMISSION_LIST_PERMISSIONS = PermissionWrapper(
+ 'list_permissions', 'list permissions', 'list all permissions'
+)
+PERMISSION_LIST_SWITCHES = PermissionWrapper(
+ 'list_switches', 'list switches', 'list all switches'
+)
+PERMISSION_LIST_SWITCH_FILTERS = PermissionWrapper(
+ 'list_switch_filters',
+ 'list switch filters',
+ 'list switch filters'
+)
+PERMISSION_ADD_SWITCH = PermissionWrapper(
+ 'add_switch', 'add switch', 'add switch'
+)
+PERMISSION_UPDATE_SWITCH_FILTERS = PermissionWrapper(
+ 'update_switch_filters',
+ 'update switch filters',
+ 'update switch filters'
+)
+PERMISSION_DEL_SWITCH = PermissionWrapper(
+ 'delete_switch', 'delete switch', 'delete switch'
+)
+PERMISSION_LIST_SWITCH_MACHINES = PermissionWrapper(
+ 'list_switch_machines', 'list switch machines', 'list switch machines'
+)
+PERMISSION_ADD_SWITCH_MACHINE = PermissionWrapper(
+ 'add_switch_machine', 'add switch machine', 'add switch machine'
+)
+PERMISSION_DEL_SWITCH_MACHINE = PermissionWrapper(
+ 'del_switch_machine', 'delete switch machine', 'del switch machine'
+)
+PERMISSION_UPDATE_SWITCH_MACHINES = PermissionWrapper(
+ 'update_switch_machines',
+ 'update switch machines',
+ 'update switch machines'
+)
+PERMISSION_LIST_MACHINES = PermissionWrapper(
+ 'list_machines', 'list machines', 'list machines'
+)
+PERMISSION_ADD_MACHINE = PermissionWrapper(
+ 'add_machine', 'add machine', 'add machine'
+)
+PERMISSION_DEL_MACHINE = PermissionWrapper(
+ 'delete_machine', 'delete machine', 'delete machine'
+)
+PERMISSION_LIST_ADAPTERS = PermissionWrapper(
+ 'list_adapters', 'list adapters', 'list adapters'
+)
+PERMISSION_LIST_METADATAS = PermissionWrapper(
+ 'list_metadatas', 'list metadatas', 'list metadatas'
+)
+PERMISSION_LIST_SUBNETS = PermissionWrapper(
+ 'list_subnets', 'list subnets', 'list subnets'
+)
+PERMISSION_ADD_SUBNET = PermissionWrapper(
+ 'add_subnet', 'add subnet', 'add subnet'
+)
+PERMISSION_DEL_SUBNET = PermissionWrapper(
+ 'del_subnet', 'del subnet', 'del subnet'
+)
+PERMISSION_LIST_CLUSTERS = PermissionWrapper(
+ 'list_clusters', 'list clusters', 'list clusters'
+)
+PERMISSION_ADD_CLUSTER = PermissionWrapper(
+ 'add_cluster', 'add cluster', 'add cluster'
+)
+PERMISSION_DEL_CLUSTER = PermissionWrapper(
+ 'del_cluster', 'del cluster', 'del cluster'
+)
+PERMISSION_LIST_CLUSTER_CONFIG = PermissionWrapper(
+ 'list_cluster_config', 'list cluster config', 'list cluster config'
+)
+PERMISSION_ADD_CLUSTER_CONFIG = PermissionWrapper(
+ 'add_cluster_config', 'add cluster config', 'add cluster config'
+)
+PERMISSION_DEL_CLUSTER_CONFIG = PermissionWrapper(
+ 'del_cluster_config', 'del cluster config', 'del cluster config'
+)
+PERMISSION_UPDATE_CLUSTER_HOSTS = PermissionWrapper(
+ 'update_cluster_hosts',
+ 'update cluster hosts',
+ 'update cluster hosts'
+)
+PERMISSION_DEL_CLUSTER_HOST = PermissionWrapper(
+ 'del_clusterhost', 'delete clusterhost', 'delete clusterhost'
+)
+PERMISSION_REVIEW_CLUSTER = PermissionWrapper(
+ 'review_cluster', 'review cluster', 'review cluster'
+)
+PERMISSION_DEPLOY_CLUSTER = PermissionWrapper(
+ 'deploy_cluster', 'deploy cluster', 'deploy cluster'
+)
+PERMISSION_DEPLOY_HOST = PermissionWrapper(
+ 'deploy_host', 'deploy host', 'deploy host'
+)
+PERMISSION_GET_CLUSTER_STATE = PermissionWrapper(
+ 'get_cluster_state', 'get cluster state', 'get cluster state'
+)
+PERMISSION_UPDATE_CLUSTER_STATE = PermissionWrapper(
+ 'update_cluster_state', 'update cluster state',
+ 'update cluster state'
+)
+PERMISSION_LIST_HOSTS = PermissionWrapper(
+ 'list_hosts', 'list hosts', 'list hosts'
+)
+PERMISSION_LIST_HOST_CLUSTERS = PermissionWrapper(
+ 'list_host_clusters',
+ 'list host clusters',
+ 'list host clusters'
+)
+PERMISSION_UPDATE_HOST = PermissionWrapper(
+ 'update_host', 'update host', 'update host'
+)
+PERMISSION_DEL_HOST = PermissionWrapper(
+ 'del_host', 'del host', 'del host'
+)
+PERMISSION_LIST_HOST_CONFIG = PermissionWrapper(
+ 'list_host_config', 'list host config', 'list host config'
+)
+PERMISSION_ADD_HOST_CONFIG = PermissionWrapper(
+ 'add_host_config', 'add host config', 'add host config'
+)
+PERMISSION_DEL_HOST_CONFIG = PermissionWrapper(
+ 'del_host_config', 'del host config', 'del host config'
+)
+PERMISSION_LIST_HOST_NETWORKS = PermissionWrapper(
+ 'list_host_networks',
+ 'list host networks',
+ 'list host networks'
+)
+PERMISSION_ADD_HOST_NETWORK = PermissionWrapper(
+ 'add_host_network', 'add host network', 'add host network'
+)
+PERMISSION_DEL_HOST_NETWORK = PermissionWrapper(
+ 'del_host_network', 'del host network', 'del host network'
+)
+PERMISSION_GET_HOST_STATE = PermissionWrapper(
+ 'get_host_state', 'get host state', 'get host state'
+)
+PERMISSION_UPDATE_HOST_STATE = PermissionWrapper(
+ 'update_host_state', 'update host sate', 'update host state'
+)
+PERMISSION_LIST_CLUSTERHOSTS = PermissionWrapper(
+ 'list_clusterhosts', 'list cluster hosts', 'list cluster hosts'
+)
+PERMISSION_LIST_CLUSTERHOST_CONFIG = PermissionWrapper(
+ 'list_clusterhost_config',
+ 'list clusterhost config',
+ 'list clusterhost config'
+)
+PERMISSION_ADD_CLUSTERHOST_CONFIG = PermissionWrapper(
+ 'add_clusterhost_config',
+ 'add clusterhost config',
+ 'add clusterhost config'
+)
+PERMISSION_DEL_CLUSTERHOST_CONFIG = PermissionWrapper(
+ 'del_clusterhost_config',
+ 'del clusterhost config',
+ 'del clusterhost config'
+)
+PERMISSION_GET_CLUSTERHOST_STATE = PermissionWrapper(
+ 'get_clusterhost_state',
+ 'get clusterhost state',
+ 'get clusterhost state'
+)
+PERMISSION_UPDATE_CLUSTERHOST_STATE = PermissionWrapper(
+ 'update_clusterhost_state',
+ 'update clusterhost state',
+ 'update clusterhost state'
+)
+PERMISSION_LIST_HEALTH_REPORT = PermissionWrapper(
+ 'list_health_reports',
+ 'list health check report',
+ 'list health check report'
+)
+PERMISSION_GET_HEALTH_REPORT = PermissionWrapper(
+ 'get_health_report',
+ 'get health report',
+ 'get health report'
+)
+PERMISSION_CHECK_CLUSTER_HEALTH = PermissionWrapper(
+ 'start_check_cluster_health',
+ 'start check cluster health',
+ 'start check cluster health'
+)
+PERMISSION_SET_HEALTH_CHECK_ERROR = PermissionWrapper(
+ 'set_error_state',
+ 'set health check into error state',
+ 'set health check into error state'
+)
+PERMISSION_DELETE_REPORT = PermissionWrapper(
+ 'delete_reports',
+ 'delete health reports',
+ 'delete health reports'
+)
+PERMISSIONS = [
+ PERMISSION_LIST_PERMISSIONS,
+ PERMISSION_LIST_SWITCHES,
+ PERMISSION_ADD_SWITCH,
+ PERMISSION_DEL_SWITCH,
+ PERMISSION_LIST_SWITCH_FILTERS,
+ PERMISSION_UPDATE_SWITCH_FILTERS,
+ PERMISSION_LIST_SWITCH_MACHINES,
+ PERMISSION_ADD_SWITCH_MACHINE,
+ PERMISSION_DEL_SWITCH_MACHINE,
+ PERMISSION_UPDATE_SWITCH_MACHINES,
+ PERMISSION_LIST_MACHINES,
+ PERMISSION_ADD_MACHINE,
+ PERMISSION_DEL_MACHINE,
+ PERMISSION_LIST_ADAPTERS,
+ PERMISSION_LIST_METADATAS,
+ PERMISSION_LIST_SUBNETS,
+ PERMISSION_ADD_SUBNET,
+ PERMISSION_DEL_SUBNET,
+ PERMISSION_LIST_CLUSTERS,
+ PERMISSION_ADD_CLUSTER,
+ PERMISSION_DEL_CLUSTER,
+ PERMISSION_LIST_CLUSTER_CONFIG,
+ PERMISSION_ADD_CLUSTER_CONFIG,
+ PERMISSION_DEL_CLUSTER_CONFIG,
+ PERMISSION_UPDATE_CLUSTER_HOSTS,
+ PERMISSION_DEL_CLUSTER_HOST,
+ PERMISSION_REVIEW_CLUSTER,
+ PERMISSION_DEPLOY_CLUSTER,
+ PERMISSION_GET_CLUSTER_STATE,
+ PERMISSION_UPDATE_CLUSTER_STATE,
+ PERMISSION_LIST_HOSTS,
+ PERMISSION_LIST_HOST_CLUSTERS,
+ PERMISSION_UPDATE_HOST,
+ PERMISSION_DEL_HOST,
+ PERMISSION_LIST_HOST_CONFIG,
+ PERMISSION_ADD_HOST_CONFIG,
+ PERMISSION_DEL_HOST_CONFIG,
+ PERMISSION_LIST_HOST_NETWORKS,
+ PERMISSION_ADD_HOST_NETWORK,
+ PERMISSION_DEL_HOST_NETWORK,
+ PERMISSION_GET_HOST_STATE,
+ PERMISSION_UPDATE_HOST_STATE,
+ PERMISSION_DEPLOY_HOST,
+ PERMISSION_LIST_CLUSTERHOSTS,
+ PERMISSION_LIST_CLUSTERHOST_CONFIG,
+ PERMISSION_ADD_CLUSTERHOST_CONFIG,
+ PERMISSION_DEL_CLUSTERHOST_CONFIG,
+ PERMISSION_GET_CLUSTERHOST_STATE,
+ PERMISSION_UPDATE_CLUSTERHOST_STATE,
+ PERMISSION_LIST_HEALTH_REPORT,
+ PERMISSION_GET_HEALTH_REPORT,
+ PERMISSION_CHECK_CLUSTER_HEALTH,
+ PERMISSION_SET_HEALTH_CHECK_ERROR,
+ PERMISSION_DELETE_REPORT
+]
+
+
+@util.deprecated
+def list_permissions_internal(session, **filters):
+ """internal functions used only by other db.api modules."""
+ return utils.list_db_objects(session, models.Permission, **filters)
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(PERMISSION_LIST_PERMISSIONS)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_permissions(user=None, session=None, **filters):
+ """list permissions."""
+ return utils.list_db_objects(
+ session, models.Permission, **filters
+ )
+
+
+def _get_permission(permission_id, session=None, **kwargs):
+ """Get permission object by the unique key of Permission table."""
+ if isinstance(permission_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Permission, id=permission_id, **kwargs)
+ raise exception.InvalidParameter(
+ 'permission id %s type is not int compatible' % permission_id
+ )
+
+
+def get_permission_internal(permission_id, session=None, **kwargs):
+ return _get_permission(permission_id, session=session, **kwargs)
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_permission(PERMISSION_LIST_PERMISSIONS)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_permission(
+ permission_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get permissions."""
+ return _get_permission(
+ permission_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+def add_permissions_internal(session=None):
+ """internal functions used by other db.api modules only."""
+ permissions = []
+ for permission in PERMISSIONS:
+ permissions.append(
+ utils.add_db_object(
+ session, models.Permission,
+ True,
+ permission.name,
+ alias=permission.alias,
+ description=permission.description
+ )
+ )
+
+ return permissions
diff --git a/compass-tasks/db/api/switch.py b/compass-tasks/db/api/switch.py
new file mode 100644
index 0000000..647eec0
--- /dev/null
+++ b/compass-tasks/db/api/switch.py
@@ -0,0 +1,1213 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Switch database operations."""
+import logging
+import netaddr
+import re
+
+from compass.db.api import database
+from compass.db.api import permission
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = ['ip_int', 'vendor', 'state']
+SUPPORTED_FILTER_FIELDS = ['ip_int', 'vendor', 'state']
+SUPPORTED_SWITCH_MACHINES_FIELDS = [
+ 'switch_ip_int', 'port', 'vlans', 'mac', 'tag', 'location',
+ 'owner_id'
+]
+SUPPORTED_MACHINES_FIELDS = [
+ 'port', 'vlans', 'mac', 'tag', 'location', 'owner_id'
+]
+SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS = [
+ 'switch_ip_int', 'port', 'vlans', 'mac',
+ 'tag', 'location', 'os_name'
+]
+SUPPORTED_MACHINES_HOSTS_FIELDS = [
+ 'port', 'vlans', 'mac', 'tag', 'location',
+ 'os_name'
+]
+IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
+ADDED_FIELDS = ['ip']
+OPTIONAL_ADDED_FIELDS = [
+ 'credentials', 'vendor', 'state', 'err_msg', 'machine_filters'
+]
+UPDATED_FIELDS = [
+ 'ip', 'credentials', 'vendor', 'state',
+ 'err_msg', 'put_machine_filters'
+]
+PATCHED_FIELDS = ['patched_credentials', 'patched_machine_filters']
+UPDATED_FILTERS_FIELDS = ['put_machine_filters']
+PATCHED_FILTERS_FIELDS = ['patched_machine_filters']
+ADDED_MACHINES_FIELDS = ['mac']
+OPTIONAL_ADDED_MACHINES_FIELDS = [
+ 'ipmi_credentials', 'tag', 'location', 'owner_id'
+]
+ADDED_SWITCH_MACHINES_FIELDS = ['port']
+OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS = ['vlans']
+UPDATED_MACHINES_FIELDS = [
+ 'ipmi_credentials',
+ 'tag', 'location'
+]
+UPDATED_SWITCH_MACHINES_FIELDS = ['port', 'vlans', 'owner_id']
+PATCHED_MACHINES_FIELDS = [
+ 'patched_ipmi_credentials',
+ 'patched_tag', 'patched_location'
+]
+PATCHED_SWITCH_MACHINES_FIELDS = ['patched_vlans']
+RESP_FIELDS = [
+ 'id', 'ip', 'credentials', 'vendor', 'state', 'err_msg',
+ 'filters', 'created_at', 'updated_at'
+]
+RESP_FILTERS_FIELDS = [
+ 'id', 'ip', 'filters', 'created_at', 'updated_at'
+]
+RESP_ACTION_FIELDS = [
+ 'status', 'details'
+]
+RESP_MACHINES_FIELDS = [
+ 'id', 'switch_id', 'switch_ip', 'machine_id', 'switch_machine_id',
+ 'port', 'vlans', 'mac', 'owner_id',
+ 'ipmi_credentials', 'tag', 'location',
+ 'created_at', 'updated_at'
+]
+RESP_MACHINES_HOSTS_FIELDS = [
+ 'id', 'switch_id', 'switch_ip', 'machine_id', 'switch_machine_id',
+ 'port', 'vlans', 'mac',
+ 'ipmi_credentials', 'tag', 'location', 'ip',
+ 'name', 'hostname', 'os_name', 'owner',
+ 'os_installer', 'reinstall_os', 'os_installed',
+ 'clusters', 'created_at', 'updated_at'
+]
+RESP_CLUSTER_FIELDS = [
+ 'name', 'id'
+]
+
+
+def _check_machine_filters(machine_filters):
+ """Check if machine filters format is acceptable."""
+ logging.debug('check machine filters: %s', machine_filters)
+ models.Switch.parse_filters(machine_filters)
+
+
+def _check_vlans(vlans):
+ """Check vlans format is acceptable."""
+ for vlan in vlans:
+ if not isinstance(vlan, int):
+ raise exception.InvalidParameter(
+ 'vlan %s is not int' % vlan
+ )
+
+
+@utils.supported_filters(
+ ADDED_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ ip=utils.check_ip,
+ credentials=utils.check_switch_credentials,
+ machine_filters=_check_machine_filters
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def _add_switch(
+ ip, exception_when_existing=True,
+ machine_filters=setting.SWITCHES_DEFAULT_FILTERS,
+ session=None, **kwargs
+):
+ """Add switch by switch ip."""
+ ip_int = long(netaddr.IPAddress(ip))
+ return utils.add_db_object(
+ session, models.Switch, exception_when_existing, ip_int,
+ machine_filters=machine_filters, **kwargs
+ )
+
+
+def get_switch_internal(
+ switch_id, session=None, **kwargs
+):
+ """Get switch by switch id.
+
+ Should only be used by other files under db/api
+ """
+ return _get_switch(switch_id, session=session, **kwargs)
+
+
+def _get_switch(switch_id, session=None, **kwargs):
+ """Get Switch object switch id."""
+ if isinstance(switch_id, (int, long)):
+ return utils.get_db_object(
+ session, models.Switch,
+ id=switch_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'switch id %s type is not int compatible' % switch_id)
+
+
+def _get_switch_by_ip(switch_ip, session=None, **kwargs):
+ """Get switch by switch ip."""
+ switch_ip_int = long(netaddr.IPAddress(switch_ip))
+ return utils.get_db_object(
+ session, models.Switch,
+ ip_int=switch_ip_int, **kwargs
+ )
+
+
+def _get_switch_machine(switch_id, machine_id, session=None, **kwargs):
+ """Get switch machine by switch id and machine id."""
+ switch = _get_switch(switch_id, session=session)
+ from compass.db.api import machine as machine_api
+ machine = machine_api.get_machine_internal(machine_id, session=session)
+ return utils.get_db_object(
+ session, models.SwitchMachine,
+ switch_id=switch.id, machine_id=machine.id, **kwargs
+ )
+
+
+def _get_switchmachine(switch_machine_id, session=None, **kwargs):
+ """Get switch machine by switch_machine_id."""
+ if not isinstance(switch_machine_id, (int, long)):
+ raise exception.InvalidParameter(
+ 'switch machine id %s type is not int compatible' % (
+ switch_machine_id
+ )
+ )
+ return utils.get_db_object(
+ session, models.SwitchMachine,
+ switch_machine_id=switch_machine_id, **kwargs
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCHES
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_switch(
+ switch_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get a switch by switch id."""
+ return _get_switch(
+ switch_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCHES
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_switches(user=None, session=None, **filters):
+ """List switches."""
+ # TODO(xicheng): should discuss with weidong.
+ # If we can deprecate the use of DEFAULT_SWITCH_IP,
+ # The code will be simpler.
+ # The UI should use /machines-hosts instead of
+ # /switches-machines-hosts and can show multi switch ip/port
+ # under one row of machine info.
+ switches = utils.list_db_objects(
+ session, models.Switch, **filters
+ )
+ if 'ip_int' in filters:
+ return switches
+ else:
+ return [
+ switch for switch in switches
+ if switch.ip != setting.DEFAULT_SWITCH_IP
+ ]
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_SWITCH
+)
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_switch(switch_id, user=None, session=None, **kwargs):
+ """Delete a switch.
+
+ If switch is not the default switch, and the machine under this switch
+ is only connected to this switch, the machine will be moved to connect
+ to default switch. Otherwise we can only simply delete the switch
+ machine. The purpose here to make sure there is no machine not
+ connecting to any switch.
+ """
+ # TODO(xicheng): Simplify the logic if the default switch feature
+ # can be deprecated.
+ switch = _get_switch(switch_id, session=session)
+ default_switch = _get_switch_by_ip(
+ setting.DEFAULT_SWITCH_IP, session=session
+ )
+ if switch.id != default_switch.id:
+ for switch_machine in switch.switch_machines:
+ machine = switch_machine.machine
+ if len(machine.switch_machines) <= 1:
+ utils.add_db_object(
+ session, models.SwitchMachine,
+ False,
+ default_switch.id, machine.id,
+ port=switch_machine.port
+ )
+ return utils.del_db_object(session, switch)
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH
+)
+def add_switch(
+ exception_when_existing=True, ip=None,
+ user=None, session=None, **kwargs
+):
+ """Create a switch."""
+ return _add_switch(
+ ip,
+ exception_when_existing=exception_when_existing,
+ session=session, **kwargs
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH
+)
+def add_switches(
+ exception_when_existing=False,
+ data=[], user=None, session=None
+):
+ """Create switches."""
+ # TODO(xicheng): simplify the batch api.
+ switches = []
+ fail_switches = []
+ for switch_data in data:
+ switch_object = _get_switch_by_ip(
+ switch_data['ip'], session=session,
+ exception_when_missing=False
+ )
+ if switch_object:
+ logging.error('ip %s exists in switch %s' % (
+ switch_data['ip'], switch_object.id
+ ))
+ fail_switches.append(switch_data)
+ else:
+ switches.append(
+ _add_switch(
+ exception_when_existing=exception_when_existing,
+ session=session,
+ **switch_data
+ )
+ )
+ return {
+ 'switches': switches,
+ 'fail_switches': fail_switches
+ }
+
+
+@utils.wrap_to_dict(RESP_FIELDS)
+def _update_switch(switch_id, session=None, **kwargs):
+ """Update a switch."""
+ switch = _get_switch(switch_id, session=session)
+ return utils.update_db_object(session, switch, **kwargs)
+
+
+# replace machine_filters in kwargs to put_machine_filters,
+# which is used to tell db this is a put action for the field.
+@utils.replace_filters(
+ machine_filters='put_machine_filters'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ credentials=utils.check_switch_credentials,
+ put_machine_filters=_check_machine_filters
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH
+)
+def update_switch(switch_id, user=None, session=None, **kwargs):
+ """Update fields of a switch."""
+ return _update_switch(switch_id, session=session, **kwargs)
+
+
+# replace credentials to patched_credentials,
+# machine_filters to patched_machine_filters in kwargs.
+# This is to tell db they are patch action to the above fields.
+@utils.replace_filters(
+ credentials='patched_credentials',
+ machine_filters='patched_machine_filters'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(
+ patched_machine_filters=_check_machine_filters
+)
+@database.run_in_session()
+@utils.output_validates(
+ credentials=utils.check_switch_credentials
+)
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH
+)
+def patch_switch(switch_id, user=None, session=None, **kwargs):
+ """Patch fields of a switch."""
+ return _update_switch(switch_id, session=session, **kwargs)
+
+
+@util.deprecated
+@utils.supported_filters(optional_support_keys=SUPPORTED_FILTER_FIELDS)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_FILTERS
+)
+@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
+def list_switch_filters(user=None, session=None, **filters):
+ """List all switches' filters."""
+ return utils.list_db_objects(
+ session, models.Switch, **filters
+ )
+
+
+@util.deprecated
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_FILTERS
+)
+@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
+def get_switch_filters(
+ switch_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get filters of a switch."""
+ return _get_switch(
+ switch_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@util.deprecated
+@utils.replace_filters(
+ machine_filters='put_machine_filters'
+)
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FILTERS_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(put_machine_filters=_check_machine_filters)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_SWITCH_FILTERS
+)
+@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
+def update_switch_filters(switch_id, user=None, session=None, **kwargs):
+ """Update filters of a switch."""
+ switch = _get_switch(switch_id, session=session)
+ return utils.update_db_object(session, switch, **kwargs)
+
+
+@util.deprecated
+@utils.replace_filters(
+ machine_filters='patched_machine_filters'
+)
+@utils.supported_filters(
+ optional_support_keys=PATCHED_FILTERS_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(patched_machine_filters=_check_machine_filters)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_SWITCH_FILTERS
+)
+@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
+def patch_switch_filter(switch_id, user=None, session=None, **kwargs):
+ """Patch filters to a switch."""
+ switch = _get_switch(switch_id, session=session)
+ return utils.update_db_object(session, switch, **kwargs)
+
+
+@util.deprecated
+def get_switch_machines_internal(session, **filters):
+ return utils.list_db_objects(
+ session, models.SwitchMachine, **filters
+ )
+
+
+def _filter_port(port_filter, obj):
+ """filter switch machines by port.
+
+ supported port_filter keys: [
+ 'startswith', 'endswith', 'resp_lt',
+ 'resp_le', 'resp_gt', 'resp_ge', 'resp_range'
+ ]
+
+ port_filter examples:
+ {
+ 'startswitch': 'ae', 'endswith': '',
+ 'resp_ge': 20, 'resp_le': 30,
+ }
+ """
+ port_prefix = port_filter.get('startswith', '')
+ port_suffix = port_filter.get('endswith', '')
+ pattern = re.compile(r'%s(\d+)%s' % (port_prefix, port_suffix))
+ match = pattern.match(obj)
+ if not match:
+ return False
+ port_number = int(match.group(1))
+ if (
+ 'resp_lt' in port_filter and
+ port_number >= port_filter['resp_lt']
+ ):
+ return False
+ if (
+ 'resp_le' in port_filter and
+ port_number > port_filter['resp_le']
+ ):
+ return False
+ if (
+ 'resp_gt' in port_filter and
+ port_number <= port_filter['resp_gt']
+ ):
+ return False
+ if (
+ 'resp_ge' in port_filter and
+ port_number < port_filter['resp_ge']
+ ):
+ return False
+ if 'resp_range' in port_filter:
+ resp_range = port_filter['resp_range']
+ if not isinstance(resp_range, list):
+ resp_range = [resp_range]
+ in_range = False
+ for port_start, port_end in resp_range:
+ if port_start <= port_number <= port_end:
+ in_range = True
+ break
+ if not in_range:
+ return False
+ return True
+
+
+def _filter_vlans(vlan_filter, obj):
+ """Filter switch machines by vlan.
+
+ supported keys in vlan_filter:
+ ['resp_in']
+ """
+ vlans = set(obj)
+ if 'resp_in' in vlan_filter:
+ resp_vlans = set(vlan_filter['resp_in'])
+ if not (vlans & resp_vlans):
+ return False
+ return True
+
+
+@utils.output_filters(
+ port=_filter_port, vlans=_filter_vlans,
+ tag=utils.general_filter_callback,
+ location=utils.general_filter_callback
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def _filter_switch_machines(switch_machines):
+ """Get filtered switch machines.
+
+ The filters are defined in each switch.
+ """
+ return [
+ switch_machine for switch_machine in switch_machines
+ if not switch_machine.filtered
+ ]
+
+
+@utils.output_filters(
+ missing_ok=True,
+ port=_filter_port, vlans=_filter_vlans,
+ tag=utils.general_filter_callback,
+ location=utils.general_filter_callback,
+ os_name=utils.general_filter_callback,
+)
+@utils.wrap_to_dict(
+ RESP_MACHINES_HOSTS_FIELDS,
+ clusters=RESP_CLUSTER_FIELDS
+)
+def _filter_switch_machines_hosts(switch_machines):
+ """Similar as _filter_switch_machines, but also return host info."""
+ filtered_switch_machines = [
+ switch_machine for switch_machine in switch_machines
+ if not switch_machine.filtered
+ ]
+ switch_machines_hosts = []
+ for switch_machine in filtered_switch_machines:
+ machine = switch_machine.machine
+ host = machine.host
+ if host:
+ switch_machine_host_dict = host.to_dict()
+ else:
+ switch_machine_host_dict = machine.to_dict()
+ switch_machine_host_dict.update(
+ switch_machine.to_dict()
+ )
+ switch_machines_hosts.append(switch_machine_host_dict)
+ return switch_machines_hosts
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_MACHINES_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+def list_switch_machines(
+ switch_id, user=None, session=None, **filters
+):
+ """Get switch machines of a switch."""
+ switch = _get_switch(switch_id, session=session)
+ switch_machines = utils.list_db_objects(
+ session, models.SwitchMachine, switch_id=switch.id, **filters
+ )
+ if not user.is_admin and len(switch_machines):
+ switch_machines = [m for m in switch_machines if m.machine.owner_id == user.id]
+ return _filter_switch_machines(switch_machines)
+
+
+# replace ip_int to switch_ip_int in kwargs
+@utils.replace_filters(
+ ip_int='switch_ip_int'
+)
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_SWITCH_MACHINES_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+def list_switchmachines(user=None, session=None, **filters):
+ """List switch machines."""
+ switch_machines = utils.list_db_objects(
+ session, models.SwitchMachine, **filters
+ )
+ return _filter_switch_machines(
+ switch_machines
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_MACHINES_HOSTS_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+def list_switch_machines_hosts(
+ switch_id, user=None, session=None, **filters
+):
+ """Get switch machines and possible hosts of a switch."""
+ switch = _get_switch(switch_id, session=session)
+ switch_machines = utils.list_db_objects(
+ session, models.SwitchMachine, switch_id=switch.id, **filters
+ )
+ return _filter_switch_machines_hosts(
+ switch_machines
+ )
+
+
+# replace ip_int to switch_ip_int in kwargs
+@utils.replace_filters(
+ ip_int='switch_ip_int'
+)
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+def list_switchmachines_hosts(user=None, session=None, **filters):
+ """List switch machines hnd possible hosts."""
+ switch_machines = utils.list_db_objects(
+ session, models.SwitchMachine, **filters
+ )
+ if not user.is_admin and len(switch_machines):
+ switch_machines = [m for m in switch_machines if m.machine.owner_id == user.id]
+ return _filter_switch_machines_hosts(
+ switch_machines
+ )
+
+
+@utils.supported_filters(
+ ADDED_MACHINES_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_MACHINES_FIELDS,
+ ignore_support_keys=OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS
+)
+@utils.input_validates(mac=utils.check_mac)
+def _add_machine_if_not_exist(mac=None, session=None, **kwargs):
+ """Add machine if the mac does not exist in any machine."""
+ return utils.add_db_object(
+ session, models.Machine, False,
+ mac, **kwargs)
+
+
+@utils.supported_filters(
+ ADDED_SWITCH_MACHINES_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS,
+ ignore_support_keys=OPTIONAL_ADDED_MACHINES_FIELDS
+)
+@utils.input_validates(vlans=_check_vlans)
+def _add_switch_machine_only(
+ switch, machine, exception_when_existing=True,
+ session=None, owner_id=None, port=None, **kwargs
+):
+ """add a switch machine."""
+ return utils.add_db_object(
+ session, models.SwitchMachine,
+ exception_when_existing,
+ switch.id, machine.id, port=port,
+ owner_id=owner_id,
+ **kwargs
+ )
+
+
+@utils.supported_filters(
+ ADDED_MACHINES_FIELDS + ADDED_SWITCH_MACHINES_FIELDS,
+ optional_support_keys=(
+ OPTIONAL_ADDED_MACHINES_FIELDS +
+ OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def _add_switch_machine(
+ switch_id, exception_when_existing=True,
+ mac=None, port=None, session=None, owner_id=None, **kwargs
+):
+ """Add switch machine.
+
+ If underlying machine does not exist, also create the underlying
+ machine.
+ """
+ switch = _get_switch(switch_id, session=session)
+ machine = _add_machine_if_not_exist(
+ mac=mac, session=session, owner_id=owner_id, **kwargs
+ )
+ return _add_switch_machine_only(
+ switch, machine,
+ exception_when_existing,
+ port=port, session=session, **kwargs
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+def add_switch_machine(
+ switch_id, exception_when_existing=True,
+ mac=None, user=None, session=None,
+ owner_id=None, **kwargs
+):
+ """Add switch machine to a switch."""
+ return _add_switch_machine(
+ switch_id,
+ exception_when_existing=exception_when_existing,
+ mac=mac, session=session, owner_id=owner_id, **kwargs
+ )
+
+
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(
+ [
+ 'switches_machines',
+ 'duplicate_switches_machines',
+ 'fail_switches_machines'
+ ],
+ switches_machines=RESP_MACHINES_FIELDS,
+ duplicate_switches_machines=RESP_MACHINES_FIELDS
+)
+def add_switch_machines(
+ exception_when_existing=False,
+ data=[], user=None, session=None, owner_id=None
+):
+ """Add switch machines."""
+ switch_machines = []
+ duplicate_switch_machines = []
+ failed_switch_machines = []
+ switches_mapping = {}
+ switch_machines_mapping = {}
+ switch_ips = []
+ for item_data in data:
+ switch_ip = item_data['switch_ip']
+ if switch_ip not in switches_mapping:
+ switch_object = _get_switch_by_ip(
+ switch_ip, session=session,
+ exception_when_missing=False
+ )
+ if switch_object:
+ switch_ips.append(switch_ip)
+ switches_mapping[switch_ip] = switch_object
+ else:
+ logging.error(
+ 'switch %s does not exist' % switch_ip
+ )
+ item_data.pop('switch_ip')
+ failed_switch_machines.append(item_data)
+ else:
+ switch_object = switches_mapping[switch_ip]
+ if switch_object:
+ item_data.pop('switch_ip')
+ switch_machines_mapping.setdefault(
+ switch_object.id, []
+ ).append(item_data)
+
+ for switch_ip in switch_ips:
+ switch_object = switches_mapping[switch_ip]
+ switch_id = switch_object.id
+ machines = switch_machines_mapping[switch_id]
+ for machine in machines:
+ mac = machine['mac']
+ machine_object = _add_machine_if_not_exist(
+ mac=mac, session=session
+ )
+ switch_machine_object = _get_switch_machine(
+ switch_id, machine_object.id, session=session,
+ exception_when_missing=False
+ )
+ if switch_machine_object:
+ port = machine['port']
+ switch_machine_id = switch_machine_object.switch_machine_id
+ exist_port = switch_machine_object.port
+ if exist_port != port:
+ logging.error(
+ 'switch machine %s exist port %s is '
+ 'different from added port %s' % (
+ switch_machine_id,
+ exist_port, port
+ )
+ )
+ failed_switch_machines.append(machine)
+ else:
+ logging.error(
+ 'iswitch machine %s is dulicate, '
+ 'will not be override' % switch_machine_id
+ )
+ duplicate_switch_machines.append(machine)
+ else:
+ del machine['mac']
+ switch_machines.append(_add_switch_machine_only(
+ switch_object, machine_object,
+ exception_when_existing,
+ session=session, owner_id=owner_id, **machine
+ ))
+ return {
+ 'switches_machines': switch_machines,
+ 'duplicate_switches_machines': duplicate_switch_machines,
+ 'fail_switches_machines': failed_switch_machines
+ }
+
+
+@utils.supported_filters(optional_support_keys=['find_machines'])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_SWITCH_MACHINES
+)
+@utils.wrap_to_dict(RESP_ACTION_FIELDS)
+def poll_switch(switch_id, user=None, session=None, **kwargs):
+ """poll switch to get machines."""
+ from compass.tasks import client as celery_client
+ switch = _get_switch(switch_id, session=session)
+ celery_client.celery.send_task(
+ 'compass.tasks.pollswitch',
+ (user.email, switch.ip, switch.credentials),
+ queue=user.email,
+ exchange=user.email,
+ routing_key=user.email
+ )
+ return {
+ 'status': 'action %s sent' % kwargs,
+ 'details': {
+ }
+ }
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def get_switch_machine(
+ switch_id, machine_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get a switch machine by switch id and machine id."""
+ return _get_switch_machine(
+ switch_id, machine_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters([])
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_LIST_SWITCH_MACHINES
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def get_switchmachine(
+ switch_machine_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get a switch machine by switch_machine_id."""
+ return _get_switchmachine(
+ switch_machine_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_MACHINES_FIELDS + PATCHED_MACHINES_FIELDS
+ ),
+ ignore_support_keys=(
+ UPDATED_SWITCH_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
+ )
+)
+def _update_machine_if_necessary(
+ machine, session=None, **kwargs
+):
+ """Update machine is there is something to update."""
+ utils.update_db_object(
+ session, machine, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_SWITCH_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=(
+ UPDATED_MACHINES_FIELDS + PATCHED_MACHINES_FIELDS
+ )
+)
+def _update_switch_machine_only(switch_machine, session=None, **kwargs):
+ """Update switch machine."""
+ return utils.update_db_object(
+ session, switch_machine, **kwargs
+ )
+
+
+def _update_switch_machine(
+ switch_machine, session=None, **kwargs
+):
+ """Update switch machine.
+
+ If there are some attributes of underlying machine need to update,
+ also update them in underlying machine.
+ """
+ _update_machine_if_necessary(
+ switch_machine.machine, session=session, **kwargs
+ )
+ return _update_switch_machine_only(
+ switch_machine, session=session, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_MACHINES_FIELDS + UPDATED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(vlans=_check_vlans)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def update_switch_machine(
+ switch_id, machine_id, user=None,
+ session=None, **kwargs
+):
+ """Update switch machine by switch id and machine id."""
+ switch_machine = _get_switch_machine(
+ switch_id, machine_id, session=session
+ )
+ return _update_switch_machine(
+ switch_machine,
+ session=session, **kwargs
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=(
+ UPDATED_MACHINES_FIELDS + UPDATED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(vlans=_check_vlans)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def update_switchmachine(switch_machine_id, user=None, session=None, **kwargs):
+ """Update switch machine by switch_machine_id."""
+ switch_machine = _get_switchmachine(
+ switch_machine_id, session=session
+ )
+ return _update_switch_machine(
+ switch_machine,
+ session=session, **kwargs
+ )
+
+
+# replace [vlans, ipmi_credentials, tag, location] to
+# [patched_vlans, patched_ipmi_credentials, patched_tag,
+# patched_location] in kwargs. It tells db these fields will
+# be patched.
+@utils.replace_filters(
+ vlans='patched_vlans',
+ ipmi_credentials='patched_ipmi_credentials',
+ tag='patched_tag',
+ location='patched_location'
+)
+@utils.supported_filters(
+ optional_support_keys=(
+ PATCHED_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(patched_vlans=_check_vlans)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def patch_switch_machine(
+ switch_id, machine_id, user=None,
+ session=None, **kwargs
+):
+ """Patch switch machine by switch_id and machine_id."""
+ switch_machine = _get_switch_machine(
+ switch_id, machine_id, session=session
+ )
+ return _update_switch_machine(
+ switch_machine,
+ session=session, **kwargs
+ )
+
+
+# replace [vlans, ipmi_credentials, tag, location] to
+# [patched_vlans, patched_ipmi_credentials, patched_tag,
+# patched_location] in kwargs. It tells db these fields will
+# be patched.
+@utils.replace_filters(
+ vlans='patched_vlans',
+ ipmi_credentials='patched_ipmi_credentials',
+ tag='patched_tag',
+ location='patched_location'
+)
+@utils.supported_filters(
+ optional_support_keys=(
+ PATCHED_MACHINES_FIELDS + PATCHED_SWITCH_MACHINES_FIELDS
+ ),
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(patched_vlans=_check_vlans)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_ADD_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def patch_switchmachine(switch_machine_id, user=None, session=None, **kwargs):
+ """Patch switch machine by switch_machine_id."""
+ switch_machine = _get_switchmachine(
+ switch_machine_id, session=session
+ )
+ return _update_switch_machine(
+ switch_machine,
+ session=session, **kwargs
+ )
+
+
+def _del_switch_machine(
+ switch_machine, session=None
+):
+ """Delete switch machine.
+
+ If this is the last switch machine associated to underlying machine,
+ add a switch machine record to default switch to make the machine
+ searchable.
+ """
+ default_switch = _get_switch_by_ip(
+ setting.DEFAULT_SWITCH_IP, session=session
+ )
+ machine = switch_machine.machine
+ if len(machine.switch_machines) <= 1:
+ utils.add_db_object(
+ session, models.SwitchMachine,
+ False,
+ default_switch.id, machine.id,
+ port=switch_machine.port
+ )
+ return utils.del_db_object(session, switch_machine)
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def del_switch_machine(
+ switch_id, machine_id, user=None,
+ session=None, **kwargs
+):
+ """Delete switch machine by switch id and machine id."""
+ switch_machine = _get_switch_machine(
+ switch_id, machine_id, session=session
+ )
+ return _del_switch_machine(switch_machine, session=session)
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_DEL_SWITCH_MACHINE
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def del_switchmachine(switch_machine_id, user=None, session=None, **kwargs):
+ """Delete switch machine by switch_machine_id."""
+ switch_machine = _get_switchmachine(
+ switch_machine_id, session=session
+ )
+ return _del_switch_machine(switch_machine, session=session)
+
+
+@utils.supported_filters(
+ ['machine_id'],
+ optional_support_keys=UPDATED_SWITCH_MACHINES_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+def _add_machine_to_switch(
+ switch_id, machine_id, session=None, **kwargs
+):
+ """Add machine to switch."""
+ switch = _get_switch(switch_id, session=session)
+ from compass.db.api import machine as machine_api
+ machine = machine_api.get_machine_internal(
+ machine_id, session=session
+ )
+ _add_switch_machine_only(
+ switch, machine, False,
+ owner_id=machine.owner_id, **kwargs
+ )
+
+
+def _add_machines(switch, machines, session=None):
+ """Add machines to switch.
+
+ Args:
+ machines: list of dict which contains attributes to
+ add machine to switch.
+
+ machines example:
+ {{'machine_id': 1, 'port': 'ae20'}]
+ """
+ for machine in machines:
+ _add_machine_to_switch(
+ switch.id, session=session, **machine
+ )
+
+
+def _remove_machines(switch, machines, session=None):
+ """Remove machines from switch.
+
+ Args:
+ machines: list of machine id.
+
+ machines example:
+ [1,2]
+ """
+ utils.del_db_objects(
+ session, models.SwitchMachine,
+ switch_id=switch.id, machine_id=machines
+ )
+
+
+def _set_machines(switch, machines, session=None):
+ """Reset machines to a switch.
+
+ Args:
+ machines: list of dict which contains attributes to
+ add machine to switch.
+
+ machines example:
+ {{'machine_id': 1, 'port': 'ae20'}]
+ """
+ utils.del_db_objects(
+ session, models.SwitchMachine,
+ switch_id=switch.id
+ )
+ for switch_machine in machines:
+ _add_machine_to_switch(
+ switch.id, session=session, **switch_machine
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=[
+ 'add_machines', 'remove_machines', 'set_machines'
+ ]
+)
+@database.run_in_session()
+@user_api.check_user_permission(
+ permission.PERMISSION_UPDATE_SWITCH_MACHINES
+)
+@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
+def update_switch_machines(
+ switch_id, add_machines=[], remove_machines=[],
+ set_machines=None, user=None, session=None, **kwargs
+):
+ """update switch's machines"""
+ switch = _get_switch(switch_id, session=session)
+ if remove_machines:
+ _remove_machines(
+ switch, remove_machines, session=session
+ )
+ if add_machines:
+ _add_machines(
+ switch, add_machines, session=session
+ )
+ if set_machines is not None:
+ _set_machines(
+ switch, set_machines, session=session
+ )
+ return switch.switch_machines
diff --git a/compass-tasks/db/api/user.py b/compass-tasks/db/api/user.py
new file mode 100644
index 0000000..db039eb
--- /dev/null
+++ b/compass-tasks/db/api/user.py
@@ -0,0 +1,553 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User database operations."""
+import datetime
+import functools
+import logging
+import re
+
+from flask.ext.login import UserMixin
+
+from compass.db.api import database
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+SUPPORTED_FIELDS = ['email', 'is_admin', 'active']
+PERMISSION_SUPPORTED_FIELDS = ['name']
+SELF_UPDATED_FIELDS = ['email', 'firstname', 'lastname', 'password']
+ADMIN_UPDATED_FIELDS = ['is_admin', 'active']
+IGNORE_FIELDS = ['id', 'created_at', 'updated_at']
+UPDATED_FIELDS = [
+ 'email', 'firstname', 'lastname', 'password', 'is_admin', 'active'
+]
+ADDED_FIELDS = ['email', 'password']
+OPTIONAL_ADDED_FIELDS = ['is_admin', 'active']
+PERMISSION_ADDED_FIELDS = ['permission_id']
+RESP_FIELDS = [
+ 'id', 'email', 'is_admin', 'active', 'firstname',
+ 'lastname', 'created_at', 'updated_at'
+]
+RESP_TOKEN_FIELDS = [
+ 'id', 'user_id', 'token', 'expire_timestamp'
+]
+PERMISSION_RESP_FIELDS = [
+ 'id', 'user_id', 'permission_id', 'name', 'alias', 'description',
+ 'created_at', 'updated_at'
+]
+
+
+def _check_email(email):
+ """Check email is email format."""
+ if '@' not in email:
+ raise exception.InvalidParameter(
+ 'there is no @ in email address %s.' % email
+ )
+
+
+def _check_user_permission(user, permission, session=None):
+ """Check user has permission."""
+ if not user:
+ logging.info('empty user means the call is from internal')
+ return
+ if user.is_admin:
+ return
+
+ user_permission = utils.get_db_object(
+ session, models.UserPermission,
+ False, user_id=user.id, name=permission.name
+ )
+ if not user_permission:
+ raise exception.Forbidden(
+ 'user %s does not have permission %s' % (
+ user.email, permission.name
+ )
+ )
+
+
+def check_user_permission(permission):
+ """Decorator to check user having permission."""
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ user = kwargs.get('user')
+ if user is not None:
+ session = kwargs.get('session')
+ if session is None:
+ raise exception.DatabaseException(
+ 'wrapper check_user_permission does not run in session'
+ )
+ _check_user_permission(user, permission, session=session)
+ return func(*args, **kwargs)
+ else:
+ return func(*args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def check_user_admin():
+ """Decorator to check user is admin."""
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ user = kwargs.get('user')
+ if user is not None:
+ if not user.is_admin:
+ raise exception.Forbidden(
+ 'User %s is not admin.' % (
+ user.email
+ )
+ )
+ return func(*args, **kwargs)
+ else:
+ return func(*args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def check_user_admin_or_owner():
+ """Decorator to check user is admin or the owner of the resource."""
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(user_id, *args, **kwargs):
+ user = kwargs.get('user')
+ if user is not None:
+ session = kwargs.get('session')
+ if session is None:
+ raise exception.DatabaseException(
+ 'wrapper check_user_admin_or_owner is '
+ 'not called in session'
+ )
+ check_user = _get_user(user_id, session=session)
+ if not user.is_admin and user.id != check_user.id:
+ raise exception.Forbidden(
+ 'User %s is not admin or the owner of user %s.' % (
+ user.email, check_user.email
+ )
+ )
+
+ return func(
+ user_id, *args, **kwargs
+ )
+ else:
+ return func(
+ user_id, *args, **kwargs
+ )
+ return wrapper
+ return decorator
+
+
+def _add_user_permissions(user, session=None, **permission_filters):
+ """add permissions to a user."""
+ from compass.db.api import permission as permission_api
+ for api_permission in permission_api.list_permissions(
+ session=session, **permission_filters
+ ):
+ utils.add_db_object(
+ session, models.UserPermission, False,
+ user.id, api_permission['id']
+ )
+
+
+def _remove_user_permissions(user, session=None, **permission_filters):
+ """remove permissions from a user."""
+ from compass.db.api import permission as permission_api
+ permission_ids = [
+ api_permission['id']
+ for api_permission in permission_api.list_permissions(
+ session=session, **permission_filters
+ )
+ ]
+ utils.del_db_objects(
+ session, models.UserPermission,
+ user_id=user.id, permission_id=permission_ids
+ )
+
+
+def _set_user_permissions(user, session=None, **permission_filters):
+ """set permissions to a user."""
+ utils.del_db_objects(
+ session, models.UserPermission,
+ user_id=user.id
+ )
+ _add_user_permissions(session, user, **permission_filters)
+
+
+class UserWrapper(UserMixin):
+ """Wrapper class provided to flask."""
+
+ def __init__(
+ self, id, email, crypted_password,
+ active=True, is_admin=False,
+ expire_timestamp=None, token='', **kwargs
+ ):
+ self.id = id
+ self.email = email
+ self.password = crypted_password
+ self.active = active
+ self.is_admin = is_admin
+ self.expire_timestamp = expire_timestamp
+ if not token:
+ self.token = self.get_auth_token()
+ else:
+ self.token = token
+ super(UserWrapper, self).__init__()
+
+ def authenticate(self, password):
+ if not util.encrypt(password, self.password) == self.password:
+ raise exception.Unauthorized('%s password mismatch' % self.email)
+
+ def get_auth_token(self):
+ return util.encrypt(self.email)
+
+ def is_active(self):
+ return self.active
+
+ def get_id(self):
+ return self.token
+
+ def is_authenticated(self):
+ current_time = datetime.datetime.now()
+ return (
+ not self.expire_timestamp or
+ current_time < self.expire_timestamp
+ )
+
+ def __str__(self):
+ return '%s[email:%s,password:%s]' % (
+ self.__class__.__name__, self.email, self.password)
+
+
+@database.run_in_session()
+def get_user_object(email, session=None, **kwargs):
+ """get user and convert to UserWrapper object."""
+ user = utils.get_db_object(
+ session, models.User, False, email=email
+ )
+ if not user:
+ raise exception.Unauthorized(
+ '%s unauthorized' % email
+ )
+ user_dict = user.to_dict()
+ user_dict.update(kwargs)
+ return UserWrapper(**user_dict)
+
+
+@database.run_in_session(exception_when_in_session=False)
+def get_user_object_from_token(token, session=None):
+ """Get user from token and convert to UserWrapper object.
+
+ ::note:
+ get_user_object_from_token may be called in session.
+ """
+ expire_timestamp = {
+ 'ge': datetime.datetime.now()
+ }
+ user_token = utils.get_db_object(
+ session, models.UserToken, False,
+ token=token, expire_timestamp=expire_timestamp
+ )
+ if not user_token:
+ raise exception.Unauthorized(
+ 'invalid user token: %s' % token
+ )
+ user_dict = _get_user(
+ user_token.user_id, session=session
+ ).to_dict()
+ user_dict['token'] = token
+ expire_timestamp = user_token.expire_timestamp
+ user_dict['expire_timestamp'] = expire_timestamp
+ return UserWrapper(**user_dict)
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_TOKEN_FIELDS)
+def record_user_token(
+ token, expire_timestamp, user=None, session=None
+):
+ """record user token in database."""
+ user_token = utils.get_db_object(
+ session, models.UserToken, False,
+ user_id=user.id, token=token
+ )
+ if not user_token:
+ return utils.add_db_object(
+ session, models.UserToken, True,
+ token, user_id=user.id,
+ expire_timestamp=expire_timestamp
+ )
+ elif expire_timestamp > user_token.expire_timestamp:
+ return utils.update_db_object(
+ session, user_token, expire_timestamp=expire_timestamp
+ )
+ return user_token
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_TOKEN_FIELDS)
+def clean_user_token(token, user=None, session=None):
+ """clean user token in database."""
+ return utils.del_db_objects(
+ session, models.UserToken,
+ token=token, user_id=user.id
+ )
+
+
+def _get_user(user_id, session=None, **kwargs):
+ """Get user object by user id."""
+ if isinstance(user_id, (int, long)):
+ return utils.get_db_object(
+ session, models.User, id=user_id, **kwargs
+ )
+ raise exception.InvalidParameter(
+ 'user id %s type is not int compatible' % user_id
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@check_user_admin_or_owner()
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_user(
+ user_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """get a user."""
+ return _get_user(
+ user_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def get_current_user(
+ exception_when_missing=True, user=None,
+ session=None, **kwargs
+):
+ """get current user."""
+ return _get_user(
+ user.id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+
+
+@utils.supported_filters(
+ optional_support_keys=SUPPORTED_FIELDS
+)
+@database.run_in_session()
+@check_user_admin()
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_users(user=None, session=None, **filters):
+ """List all users."""
+ return utils.list_db_objects(
+ session, models.User, **filters
+ )
+
+
+@utils.input_validates(email=_check_email)
+@utils.supported_filters(
+ ADDED_FIELDS,
+ optional_support_keys=OPTIONAL_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@check_user_admin()
+@utils.wrap_to_dict(RESP_FIELDS)
+def add_user(
+ exception_when_existing=True, user=None,
+ session=None, email=None, **kwargs
+):
+ """Create a user and return created user object."""
+ add_user = utils.add_db_object(
+ session, models.User,
+ exception_when_existing, email,
+ **kwargs)
+ _add_user_permissions(
+ add_user,
+ session=session,
+ name=setting.COMPASS_DEFAULT_PERMISSIONS
+ )
+ return add_user
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@check_user_admin()
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_user(user_id, user=None, session=None, **kwargs):
+ """delete a user and return the deleted user object."""
+ del_user = _get_user(user_id, session=session)
+ return utils.del_db_object(session, del_user)
+
+
+@utils.supported_filters(
+ optional_support_keys=UPDATED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@utils.input_validates(email=_check_email)
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def update_user(user_id, user=None, session=None, **kwargs):
+ """Update a user and return the updated user object."""
+ update_user = _get_user(
+ user_id, session=session,
+ )
+ allowed_fields = set()
+ if user.is_admin:
+ allowed_fields |= set(ADMIN_UPDATED_FIELDS)
+ if user.id == update_user.id:
+ allowed_fields |= set(SELF_UPDATED_FIELDS)
+ unsupported_fields = set(kwargs) - allowed_fields
+ if unsupported_fields:
+ # The user is not allowed to update a user.
+ raise exception.Forbidden(
+ 'User %s has no permission to update user %s fields %s.' % (
+ user.email, user.email, unsupported_fields
+ )
+ )
+ return utils.update_db_object(session, update_user, **kwargs)
+
+
+@utils.supported_filters(optional_support_keys=PERMISSION_SUPPORTED_FIELDS)
+@database.run_in_session()
+@check_user_admin_or_owner()
+@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
+def get_permissions(
+ user_id, user=None, exception_when_missing=True,
+ session=None, **kwargs
+):
+ """List permissions of a user."""
+ get_user = _get_user(
+ user_id, session=session,
+ exception_when_missing=exception_when_missing
+ )
+ return utils.list_db_objects(
+ session, models.UserPermission, user_id=get_user.id, **kwargs
+ )
+
+
+def _get_permission(user_id, permission_id, session=None, **kwargs):
+ """Get user permission by user id and permission id."""
+ user = _get_user(user_id, session=session)
+ from compass.db.api import permission as permission_api
+ permission = permission_api.get_permission_internal(
+ permission_id, session=session
+ )
+ return utils.get_db_object(
+ session, models.UserPermission,
+ user_id=user.id, permission_id=permission.id,
+ **kwargs
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@check_user_admin_or_owner()
+@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
+def get_permission(
+ user_id, permission_id, exception_when_missing=True,
+ user=None, session=None, **kwargs
+):
+ """Get a permission of a user."""
+ return _get_permission(
+ user_id, permission_id,
+ exception_when_missing=exception_when_missing,
+ session=session,
+ **kwargs
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@check_user_admin_or_owner()
+@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
+def del_permission(user_id, permission_id, user=None, session=None, **kwargs):
+ """Delete a permission from a user."""
+ user_permission = _get_permission(
+ user_id, permission_id,
+ session=session, **kwargs
+ )
+ return utils.del_db_object(session, user_permission)
+
+
+@utils.supported_filters(
+ PERMISSION_ADDED_FIELDS,
+ ignore_support_keys=IGNORE_FIELDS
+)
+@database.run_in_session()
+@check_user_admin()
+@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
+def add_permission(
+ user_id, permission_id=None, exception_when_existing=True,
+ user=None, session=None
+):
+ """Add a permission to a user."""
+ get_user = _get_user(user_id, session=session)
+ from compass.db.api import permission as permission_api
+ get_permission = permission_api.get_permission_internal(
+ permission_id, session=session
+ )
+ return utils.add_db_object(
+ session, models.UserPermission, exception_when_existing,
+ get_user.id, get_permission.id
+ )
+
+
+def _get_permission_filters(permission_ids):
+ """Helper function to filter permissions."""
+ if permission_ids == 'all':
+ return {}
+ else:
+ return {'id': permission_ids}
+
+
+@utils.supported_filters(
+ optional_support_keys=[
+ 'add_permissions', 'remove_permissions', 'set_permissions'
+ ]
+)
+@database.run_in_session()
+@check_user_admin()
+@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
+def update_permissions(
+ user_id, add_permissions=[], remove_permissions=[],
+ set_permissions=None, user=None, session=None, **kwargs
+):
+ """update user permissions."""
+ update_user = _get_user(user_id, session=session)
+ if remove_permissions:
+ _remove_user_permissions(
+ update_user, session=session,
+ **_get_permission_filters(remove_permissions)
+ )
+ if add_permissions:
+ _add_user_permissions(
+ update_user, session=session,
+ **_get_permission_filters(add_permissions)
+ )
+ if set_permissions is not None:
+ _set_user_permissions(
+ update_user, session=session,
+ **_get_permission_filters(set_permissions)
+ )
+ return update_user.user_permissions
diff --git a/compass-tasks/db/api/user_log.py b/compass-tasks/db/api/user_log.py
new file mode 100644
index 0000000..70de9db
--- /dev/null
+++ b/compass-tasks/db/api/user_log.py
@@ -0,0 +1,82 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""UserLog database operations."""
+import logging
+
+from compass.db.api import database
+from compass.db.api import user as user_api
+from compass.db.api import utils
+from compass.db import exception
+from compass.db import models
+
+
+SUPPORTED_FIELDS = ['user_email', 'timestamp']
+USER_SUPPORTED_FIELDS = ['timestamp']
+RESP_FIELDS = ['user_id', 'action', 'timestamp']
+
+
+@database.run_in_session()
+def log_user_action(user_id, action, session=None):
+ """Log user action."""
+ utils.add_db_object(
+ session, models.UserLog, True, user_id=user_id, action=action
+ )
+
+
+@utils.supported_filters(optional_support_keys=USER_SUPPORTED_FIELDS)
+@database.run_in_session()
+@user_api.check_user_admin_or_owner()
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_user_actions(user_id, user=None, session=None, **filters):
+ """list user actions of a user."""
+ list_user = user_api.get_user(user_id, user=user, session=session)
+ return utils.list_db_objects(
+ session, models.UserLog, order_by=['timestamp'],
+ user_id=list_user['id'], **filters
+ )
+
+
+@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
+@user_api.check_user_admin()
+@database.run_in_session()
+@utils.wrap_to_dict(RESP_FIELDS)
+def list_actions(user=None, session=None, **filters):
+ """list actions of all users."""
+ return utils.list_db_objects(
+ session, models.UserLog, order_by=['timestamp'], **filters
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_admin_or_owner()
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_user_actions(user_id, user=None, session=None, **filters):
+ """delete actions of a user."""
+ del_user = user_api.get_user(user_id, user=user, session=session)
+ return utils.del_db_objects(
+ session, models.UserLog, user_id=del_user['id'], **filters
+ )
+
+
+@utils.supported_filters()
+@database.run_in_session()
+@user_api.check_user_admin()
+@utils.wrap_to_dict(RESP_FIELDS)
+def del_actions(user=None, session=None, **filters):
+ """delete actions of all users."""
+ return utils.del_db_objects(
+ session, models.UserLog, **filters
+ )
diff --git a/compass-tasks/db/api/utils.py b/compass-tasks/db/api/utils.py
new file mode 100644
index 0000000..a44f26e
--- /dev/null
+++ b/compass-tasks/db/api/utils.py
@@ -0,0 +1,1286 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utils for database usage."""
+
+import functools
+import inspect
+import logging
+import netaddr
+import re
+
+from inspect import isfunction
+from sqlalchemy import and_
+from sqlalchemy import or_
+
+from compass.db import exception
+from compass.db import models
+from compass.utils import util
+
+
+def model_query(session, model):
+ """model query.
+
+ Return sqlalchemy query object.
+ """
+ if not issubclass(model, models.BASE):
+ raise exception.DatabaseException("model should be sublass of BASE!")
+
+ return session.query(model)
+
+
+def _default_list_condition_func(col_attr, value, condition_func):
+ """The default condition func for a list of data.
+
+ Given the condition func for single item of data, this function
+ wrap the condition_func and return another condition func using
+ or_ to merge the conditions of each single item to deal with a
+ list of data item.
+
+ Args:
+ col_attr: the colomn name
+ value: the column value need to be compared.
+ condition_func: the sqlalchemy condition object like ==
+
+ Examples:
+ col_attr is name, value is ['a', 'b', 'c'] and
+ condition_func is ==, the returned condition is
+ name == 'a' or name == 'b' or name == 'c'
+ """
+ conditions = []
+ for sub_value in value:
+ condition = condition_func(col_attr, sub_value)
+ if condition is not None:
+ conditions.append(condition)
+ if conditions:
+ return or_(*conditions)
+ else:
+ return None
+
+
+def _one_item_list_condition_func(col_attr, value, condition_func):
+ """The wrapper condition func to deal with one item data list.
+
+ For simplification, it is used to reduce generating too complex
+ sql conditions.
+ """
+ if value:
+ return condition_func(col_attr, value[0])
+ else:
+ return None
+
+
+def _model_condition_func(
+ col_attr, value,
+ item_condition_func,
+ list_condition_func=_default_list_condition_func
+):
+ """Return sql condition based on value type."""
+ if isinstance(value, list):
+ if not value:
+ return None
+ if len(value) == 1:
+ return item_condition_func(col_attr, value)
+ return list_condition_func(
+ col_attr, value, item_condition_func
+ )
+ else:
+ return item_condition_func(col_attr, value)
+
+
+def _between_condition(col_attr, value):
+ """Return sql range condition."""
+ if value[0] is not None and value[1] is not None:
+ return col_attr.between(value[0], value[1])
+ if value[0] is not None:
+ return col_attr >= value[0]
+ if value[1] is not None:
+ return col_attr <= value[1]
+ return None
+
+
+def model_order_by(query, model, order_by):
+ """append order by into sql query model."""
+ if not order_by:
+ return query
+ order_by_cols = []
+ for key in order_by:
+ if isinstance(key, tuple):
+ key, is_desc = key
+ else:
+ is_desc = False
+ if isinstance(key, basestring):
+ if hasattr(model, key):
+ col_attr = getattr(model, key)
+ else:
+ continue
+ else:
+ col_attr = key
+ if is_desc:
+ order_by_cols.append(col_attr.desc())
+ else:
+ order_by_cols.append(col_attr)
+ return query.order_by(*order_by_cols)
+
+
+def _model_condition(col_attr, value):
+ """Generate condition for one column.
+
+ Example for col_attr is name:
+ value is 'a': name == 'a'
+ value is ['a']: name == 'a'
+ value is ['a', 'b']: name == 'a' or name == 'b'
+ value is {'eq': 'a'}: name == 'a'
+ value is {'lt': 'a'}: name < 'a'
+ value is {'le': 'a'}: name <= 'a'
+ value is {'gt': 'a'}: name > 'a'
+ value is {'ge': 'a'}: name >= 'a'
+ value is {'ne': 'a'}: name != 'a'
+ value is {'in': ['a', 'b']}: name in ['a', 'b']
+ value is {'notin': ['a', 'b']}: name not in ['a', 'b']
+ value is {'startswith': 'abc'}: name like 'abc%'
+ value is {'endswith': 'abc'}: name like '%abc'
+ value is {'like': 'abc'}: name like '%abc%'
+ value is {'between': ('a', 'c')}: name >= 'a' and name <= 'c'
+ value is [{'lt': 'a'}]: name < 'a'
+ value is [{'lt': 'a'}, {'gt': c'}]: name < 'a' or name > 'c'
+ value is {'lt': 'c', 'gt': 'a'}: name > 'a' and name < 'c'
+
+ If value is a list, the condition is the or relationship among
+ conditions of each item.
+ If value is dict and there are multi keys in the dict, the relationship
+ is and conditions of each key.
+ Otherwise the condition is to compare the column with the value.
+ """
+ if isinstance(value, list):
+ basetype_values = []
+ composite_values = []
+ for item in value:
+ if isinstance(item, (list, dict)):
+ composite_values.append(item)
+ else:
+ basetype_values.append(item)
+ conditions = []
+ if basetype_values:
+ if len(basetype_values) == 1:
+ condition = (col_attr == basetype_values[0])
+ else:
+ condition = col_attr.in_(basetype_values)
+ conditions.append(condition)
+ for composite_value in composite_values:
+ condition = _model_condition(col_attr, composite_value)
+ if condition is not None:
+ conditions.append(condition)
+ if not conditions:
+ return None
+ if len(conditions) == 1:
+ return conditions[0]
+ return or_(*conditions)
+ elif isinstance(value, dict):
+ conditions = []
+ if 'eq' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['eq'],
+ lambda attr, data: attr == data,
+ lambda attr, data, item_condition_func: attr.in_(data)
+ ))
+ if 'lt' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['lt'],
+ lambda attr, data: attr < data,
+ _one_item_list_condition_func
+ ))
+ if 'gt' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['gt'],
+ lambda attr, data: attr > data,
+ _one_item_list_condition_func
+ ))
+ if 'le' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['le'],
+ lambda attr, data: attr <= data,
+ _one_item_list_condition_func
+ ))
+ if 'ge' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['ge'],
+ lambda attr, data: attr >= data,
+ _one_item_list_condition_func
+ ))
+ if 'ne' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['ne'],
+ lambda attr, data: attr != data,
+ lambda attr, data, item_condition_func: attr.notin_(data)
+ ))
+ if 'in' in value:
+ conditions.append(col_attr.in_(value['in']))
+ if 'notin' in value:
+ conditions.append(col_attr.notin_(value['notin']))
+ if 'startswith' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['startswith'],
+ lambda attr, data: attr.like('%s%%' % data)
+ ))
+ if 'endswith' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['endswith'],
+ lambda attr, data: attr.like('%%%s' % data)
+ ))
+ if 'like' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['like'],
+ lambda attr, data: attr.like('%%%s%%' % data)
+ ))
+ if 'between' in value:
+ conditions.append(_model_condition_func(
+ col_attr, value['between'],
+ _between_condition
+ ))
+ conditions = [
+ condition
+ for condition in conditions
+ if condition is not None
+ ]
+ if not conditions:
+ return None
+ if len(conditions) == 1:
+ return conditions[0]
+ return and_(conditions)
+ else:
+ condition = (col_attr == value)
+ return condition
+
+
+def model_filter(query, model, **filters):
+ """Append conditons to query for each possible column."""
+ for key, value in filters.items():
+ if isinstance(key, basestring):
+ if hasattr(model, key):
+ col_attr = getattr(model, key)
+ else:
+ continue
+ else:
+ col_attr = key
+ condition = _model_condition(col_attr, value)
+ if condition is not None:
+ query = query.filter(condition)
+ return query
+
+
+def replace_output(**output_mapping):
+ """Decorator to recursively relace output by output mapping.
+
+ The replacement detail is described in _replace_output.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ return _replace_output(
+ func(*args, **kwargs), **output_mapping
+ )
+ return wrapper
+ return decorator
+
+
+def _replace_output(data, **output_mapping):
+ """Helper to replace output data.
+
+ Example:
+ data = {'a': 'hello'}
+ output_mapping = {'a': 'b'}
+ returns: {'b': 'hello'}
+
+ data = {'a': {'b': 'hello'}}
+ output_mapping = {'a': 'b'}
+ returns: {'b': {'b': 'hello'}}
+
+ data = {'a': {'b': 'hello'}}
+ output_mapping = {'a': {'b': 'c'}}
+ returns: {'a': {'c': 'hello'}}
+
+ data = [{'a': 'hello'}, {'a': 'hi'}]
+ output_mapping = {'a': 'b'}
+ returns: [{'b': 'hello'}, {'b': 'hi'}]
+ """
+ if isinstance(data, list):
+ return [
+ _replace_output(item, **output_mapping)
+ for item in data
+ ]
+ if not isinstance(data, dict):
+ raise exception.InvalidResponse(
+ '%s type is not dict' % data
+ )
+ info = {}
+ for key, value in data.items():
+ if key in output_mapping:
+ output_key = output_mapping[key]
+ if isinstance(output_key, basestring):
+ info[output_key] = value
+ else:
+ info[key] = (
+ _replace_output(value, **output_key)
+ )
+ else:
+ info[key] = value
+ return info
+
+
+def get_wrapped_func(func):
+ """Get wrapped function instance.
+
+ Example:
+ @dec1
+ @dec2
+ myfunc(*args, **kwargs)
+
+ get_wrapped_func(myfunc) returns function object with
+ following attributes:
+ __name__: 'myfunc'
+ args: args
+ kwargs: kwargs
+ otherwise myfunc is function object with following attributes:
+ __name__: partial object ...
+ args: ...
+ kwargs: ...
+ """
+ if func.func_closure:
+ for closure in func.func_closure:
+ if isfunction(closure.cell_contents):
+ return get_wrapped_func(closure.cell_contents)
+ return func
+ else:
+ return func
+
+
+def wrap_to_dict(support_keys=[], **filters):
+ """Decrator to convert returned object to dict.
+
+ The details is decribed in _wrapper_dict.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ return _wrapper_dict(
+ func(*args, **kwargs), support_keys, **filters
+ )
+ return wrapper
+ return decorator
+
+
+def _wrapper_dict(data, support_keys, **filters):
+ """Helper for warpping db object into dictionary.
+
+ If data is list, convert it to a list of dict
+ If data is Base model, convert it to dict
+ for the data as a dict, filter it with the supported keys.
+ For each filter_key, filter_value in filters, also filter
+ data[filter_key] by filter_value recursively if it exists.
+
+ Example:
+ data is models.Switch, it will be converted to
+ {
+ 'id': 1, 'ip': '10.0.0.1', 'ip_int': 123456,
+ 'credentials': {'version': 2, 'password': 'abc'}
+ }
+ Then if support_keys are ['id', 'ip', 'credentials'],
+ it will be filtered to {
+ 'id': 1, 'ip': '10.0.0.1',
+ 'credentials': {'version': 2, 'password': 'abc'}
+ }
+ Then if filters is {'credentials': ['version']},
+ it will be filtered to {
+ 'id': 1, 'ip': '10.0.0.1',
+ 'credentials': {'version': 2}
+ }
+ """
+ logging.debug(
+ 'wrap dict %s by support_keys=%s filters=%s',
+ data, support_keys, filters
+ )
+ if isinstance(data, list):
+ return [
+ _wrapper_dict(item, support_keys, **filters)
+ for item in data
+ ]
+ if isinstance(data, models.HelperMixin):
+ data = data.to_dict()
+ if not isinstance(data, dict):
+ raise exception.InvalidResponse(
+ 'response %s type is not dict' % data
+ )
+ info = {}
+ try:
+ for key in support_keys:
+ if key in data and data[key] is not None:
+ if key in filters:
+ filter_keys = filters[key]
+ if isinstance(filter_keys, dict):
+ info[key] = _wrapper_dict(
+ data[key], filter_keys.keys(),
+ **filter_keys
+ )
+ else:
+ info[key] = _wrapper_dict(
+ data[key], filter_keys
+ )
+ else:
+ info[key] = data[key]
+ return info
+ except Exception as error:
+ logging.exception(error)
+ raise error
+
+
+def replace_filters(**kwarg_mapping):
+ """Decorator to replace kwargs.
+
+ Examples:
+ kwargs: {'a': 'b'}, kwarg_mapping: {'a': 'c'}
+ replaced kwargs to decorated func:
+ {'c': 'b'}
+
+ replace_filters is used to replace caller's input
+ to make it understandable by models.py.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ replaced_kwargs = {}
+ for key, value in kwargs.items():
+ if key in kwarg_mapping:
+ replaced_kwargs[kwarg_mapping[key]] = value
+ else:
+ replaced_kwargs[key] = value
+ return func(*args, **replaced_kwargs)
+ return wrapper
+ return decorator
+
+
+def supported_filters(
+ support_keys=[],
+ optional_support_keys=[],
+ ignore_support_keys=[],
+):
+ """Decorator to check kwargs keys.
+
+ keys in kwargs and in ignore_support_keys will be removed.
+ If any unsupported keys found, a InvalidParameter
+ exception raises.
+
+ Args:
+ support_keys: keys that must exist.
+ optional_support_keys: keys that may exist.
+ ignore_support_keys: keys should be ignored.
+
+ Assumption: args without default value is supposed to exist.
+ You can add them in support_keys or not but we will make sure
+ it appears when we call the decorated function.
+ We do best match on both args and kwargs to make sure if the
+ key appears or not.
+
+ Examples:
+ decorated func: func(a, b, c=3, d=4, **kwargs)
+
+ support_keys=['e'] and call func(e=5):
+ raises: InvalidParameter: missing declared arg
+ support_keys=['e'] and call func(1,2,3,4,5,e=6):
+ raises: InvalidParameter: caller sending more args
+ support_keys=['e'] and call func(1,2):
+ raises: InvalidParameter: supported keys ['e'] missing
+ support_keys=['d', 'e'] and call func(1,2,e=3):
+ raises: InvalidParameter: supported keys ['d'] missing
+ support_keys=['d', 'e'] and call func(1,2,d=4, e=3):
+ passed
+ support_keys=['d'], optional_support_keys=['e']
+ and call func(1,2, d=3):
+ passed
+ support_keys=['d'], optional_support_keys=['e']
+ and call func(1,2, d=3, e=4, f=5):
+ raises: InvalidParameter: unsupported keys ['f']
+ support_keys=['d'], optional_support_keys=['e'],
+ ignore_support_keys=['f']
+ and call func(1,2, d=3, e=4, f=5):
+ passed to decorated keys: func(1,2, d=3, e=4)
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **filters):
+ wrapped_func = get_wrapped_func(func)
+ argspec = inspect.getargspec(wrapped_func)
+ wrapped_args = argspec.args
+ args_defaults = argspec.defaults
+ # wrapped_must_args are positional args caller must pass in.
+ if args_defaults:
+ wrapped_must_args = wrapped_args[:-len(args_defaults)]
+ else:
+ wrapped_must_args = wrapped_args[:]
+ # make sure any positional args without default value in
+ # decorated function should appear in args or filters.
+ if len(args) < len(wrapped_must_args):
+ remain_args = wrapped_must_args[len(args):]
+ for remain_arg in remain_args:
+ if remain_arg not in filters:
+ raise exception.InvalidParameter(
+ 'function missing declared arg %s '
+ 'while caller sends args %s' % (
+ remain_arg, args
+ )
+ )
+ # make sure args should be no more than positional args
+ # declared in decorated function.
+ if len(args) > len(wrapped_args):
+ raise exception.InvalidParameter(
+ 'function definition args %s while the caller '
+ 'sends args %s' % (
+ wrapped_args, args
+ )
+ )
+ # exist_args are positional args caller has given.
+ exist_args = dict(zip(wrapped_args, args)).keys()
+ must_support_keys = set(support_keys)
+ all_support_keys = must_support_keys | set(optional_support_keys)
+ wrapped_supported_keys = set(filters) | set(exist_args)
+ unsupported_keys = (
+ set(filters) - set(wrapped_args) -
+ all_support_keys - set(ignore_support_keys)
+ )
+ # unsupported_keys are the keys that are not in support_keys,
+ # optional_support_keys, ignore_support_keys and are not passed in
+ # by positional args. It means the decorated function may
+ # not understand these parameters.
+ if unsupported_keys:
+ raise exception.InvalidParameter(
+ 'filter keys %s are not supported for %s' % (
+ list(unsupported_keys), wrapped_func
+ )
+ )
+ # missing_keys are the keys that must exist but missing in
+ # both positional args or kwargs.
+ missing_keys = must_support_keys - wrapped_supported_keys
+ if missing_keys:
+ raise exception.InvalidParameter(
+ 'filter keys %s not found for %s' % (
+ list(missing_keys), wrapped_func
+ )
+ )
+ # We filter kwargs to eliminate ignore_support_keys in kwargs
+ # passed to decorated function.
+ filtered_filters = dict([
+ (key, value)
+ for key, value in filters.items()
+ if key not in ignore_support_keys
+ ])
+ return func(*args, **filtered_filters)
+ return wrapper
+ return decorator
+
+
+def input_filters(
+ **filters
+):
+ """Decorator to filter kwargs.
+
+ For key in kwargs, if the key exists and filters
+ and the return of call filters[key] is False, the key
+ will be removed from kwargs.
+
+ The function definition of filters[key] is
+ func(value, *args, **kwargs) compared with decorated
+ function func(*args, **kwargs)
+
+ The function is used to filter kwargs in case some
+ kwargs should be removed conditionally depends on the
+ related filters.
+
+ Examples:
+ filters={'a': func(value, *args, **kwargs)}
+ @input_filters(**filters)
+ decorated_func(*args, **kwargs)
+ func returns False.
+ Then when call decorated_func(a=1, b=2)
+ it will be actually called the decorated func with
+ b=2. a=1 will be removed since it does not pass filtering.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ filtered_kwargs = {}
+ for key, value in kwargs.items():
+ if key in filters:
+ if filters[key](value, *args, **kwargs):
+ filtered_kwargs[key] = value
+ else:
+ logging.debug(
+ 'ignore filtered input key %s' % key
+ )
+ else:
+ filtered_kwargs[key] = value
+ return func(*args, **filtered_kwargs)
+ return wrapper
+ return decorator
+
+
+def _obj_equal_or_subset(check, obj):
+ """Used by output filter to check if obj is in check."""
+ if check == obj:
+ return True
+ if not issubclass(obj.__class__, check.__class__):
+ return False
+ if isinstance(obj, dict):
+ return _dict_equal_or_subset(check, obj)
+ elif isinstance(obj, list):
+ return _list_equal_or_subset(check, obj)
+ else:
+ return False
+
+
+def _list_equal_or_subset(check_list, obj_list):
+ """Used by output filter to check if obj_list is in check_list"""
+ if not isinstance(check_list, list):
+ return False
+ return set(check_list).issubset(set(obj_list))
+
+
+def _dict_equal_or_subset(check_dict, obj_dict):
+ """Used by output filter to check if obj_dict in check_dict."""
+ if not isinstance(check_dict, dict):
+ return False
+ for key, value in check_dict.items():
+ if (
+ key not in obj_dict or
+ not _obj_equal_or_subset(check_dict[key], obj_dict[key])
+ ):
+ return False
+ return True
+
+
+def general_filter_callback(general_filter, obj):
+ """General filter function to filter output.
+
+ Since some fields stored in database is json encoded and
+ we want to do the deep match for the json encoded field to
+ do the filtering in some cases, we introduces the output_filters
+ and general_filter_callback to deal with this kind of cases.
+
+ We do special treatment for key 'resp_eq' to check if
+ obj is the recursively subset of general_filter['resp_eq']
+
+
+ Example:
+ obj: 'b'
+ general_filter: {}
+ returns: True
+
+ obj: 'b'
+ general_filter: {'resp_in': ['a', 'b']}
+ returns: True
+
+ obj: 'b'
+ general_filter: {'resp_in': ['a']}
+ returns: False
+
+ obj: 'b'
+ general_filter: {'resp_eq': 'b'}
+ returns: True
+
+ obj: 'b'
+ general_filter: {'resp_eq': 'a'}
+ returns: False
+
+ obj: 'b'
+ general_filter: {'resp_range': ('a', 'c')}
+ returns: True
+
+ obj: 'd'
+ general_filter: {'resp_range': ('a', 'c')}
+ returns: False
+
+ If there are multi keys in dict, the output is filtered
+ by and relationship.
+
+ If the general_filter is a list, the output is filtered
+ by or relationship.
+
+ Supported general filters: [
+ 'resp_eq', 'resp_in', 'resp_lt',
+ 'resp_le', 'resp_gt', 'resp_ge',
+ 'resp_match', 'resp_range'
+ ]
+ """
+ if isinstance(general_filter, list):
+ if not general_filter:
+ return True
+ return any([
+ general_filter_callback(item, obj)
+ for item in general_filter
+ ])
+ elif isinstance(general_filter, dict):
+ if 'resp_eq' in general_filter:
+ if not _obj_equal_or_subset(
+ general_filter['resp_eq'], obj
+ ):
+ return False
+ if 'resp_in' in general_filter:
+ in_filters = general_filter['resp_in']
+ if not any([
+ _obj_equal_or_subset(in_filer, obj)
+ for in_filer in in_filters
+ ]):
+ return False
+ if 'resp_lt' in general_filter:
+ if obj >= general_filter['resp_lt']:
+ return False
+ if 'resp_le' in general_filter:
+ if obj > general_filter['resp_le']:
+ return False
+ if 'resp_gt' in general_filter:
+ if obj <= general_filter['resp_gt']:
+ return False
+ if 'resp_ge' in general_filter:
+ if obj < general_filter['resp_gt']:
+ return False
+ if 'resp_match' in general_filter:
+ if not re.match(general_filter['resp_match'], obj):
+ return False
+ if 'resp_range' in general_filter:
+ resp_range = general_filter['resp_range']
+ if not isinstance(resp_range, list):
+ resp_range = [resp_range]
+ in_range = False
+ for range_start, range_end in resp_range:
+ if range_start <= obj <= range_end:
+ in_range = True
+ if not in_range:
+ return False
+ return True
+ else:
+ return True
+
+
+def filter_output(filter_callbacks, kwargs, obj, missing_ok=False):
+ """Filter ouput.
+
+ For each key in filter_callbacks, if it exists in kwargs,
+ kwargs[key] tells what we need to filter. If the call of
+ filter_callbacks[key] returns False, it tells the obj should be
+ filtered out of output.
+ """
+ for callback_key, callback_value in filter_callbacks.items():
+ if callback_key not in kwargs:
+ continue
+ if callback_key not in obj:
+ if missing_ok:
+ continue
+ else:
+ raise exception.InvalidResponse(
+ '%s is not in %s' % (callback_key, obj)
+ )
+ if not callback_value(
+ kwargs[callback_key], obj[callback_key]
+ ):
+ return False
+ return True
+
+
+def output_filters(missing_ok=False, **filter_callbacks):
+ """Decorator to filter output list.
+
+ Each filter_callback should have the definition like:
+ func({'resp_eq': 'a'}, 'a')
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ filtered_obj_list = []
+ obj_list = func(*args, **kwargs)
+ for obj in obj_list:
+ if filter_output(
+ filter_callbacks, kwargs, obj, missing_ok
+ ):
+ filtered_obj_list.append(obj)
+ return filtered_obj_list
+ return wrapper
+ return decorator
+
+
+def _input_validates(args_validators, kwargs_validators, *args, **kwargs):
+ """Used by input_validators to validate inputs."""
+ for i, value in enumerate(args):
+ if i < len(args_validators) and args_validators[i]:
+ args_validators[i](value)
+ for key, value in kwargs.items():
+ if kwargs_validators.get(key):
+ kwargs_validators[key](value)
+
+
+def input_validates(*args_validators, **kwargs_validators):
+ """Decorator to validate input.
+
+ Each validator should have definition like:
+ func('00:01:02:03:04:05')
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ _input_validates(
+ args_validators, kwargs_validators,
+ *args, **kwargs
+ )
+ return func(*args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def _input_validates_with_args(
+ args_validators, kwargs_validators, *args, **kwargs
+):
+ """Validate input with validators.
+
+ Each validator takes the arguments of the decorated function
+ as its arguments. The function definition is like:
+ func(value, *args, **kwargs) compared with the decorated
+ function func(*args, **kwargs).
+ """
+ for i, value in enumerate(args):
+ if i < len(args_validators) and args_validators[i]:
+ args_validators[i](value, *args, **kwargs)
+ for key, value in kwargs.items():
+ if kwargs_validators.get(key):
+ kwargs_validators[key](value, *args, **kwargs)
+
+
+def input_validates_with_args(
+ *args_validators, **kwargs_validators
+):
+ """Decorator to validate input."""
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ _input_validates_with_args(
+ args_validators, kwargs_validators,
+ *args, **kwargs
+ )
+ return func(*args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def _output_validates_with_args(
+ kwargs_validators, obj, *args, **kwargs
+):
+ """Validate output with validators.
+
+ Each validator takes the arguments of the decorated function
+ as its arguments. The function definition is like:
+ func(value, *args, **kwargs) compared with the decorated
+ function func(*args, **kwargs).
+ """
+ if isinstance(obj, list):
+ for item in obj:
+ _output_validates_with_args(
+ kwargs_validators, item, *args, **kwargs
+ )
+ return
+ if isinstance(obj, models.HelperMixin):
+ obj = obj.to_dict()
+ if not isinstance(obj, dict):
+ raise exception.InvalidResponse(
+ 'response %s type is not dict' % str(obj)
+ )
+ try:
+ for key, value in obj.items():
+ if key in kwargs_validators:
+ kwargs_validators[key](value, *args, **kwargs)
+ except Exception as error:
+ logging.exception(error)
+ raise error
+
+
+def output_validates_with_args(**kwargs_validators):
+ """Decorator to validate output.
+
+ The validator can take the arguments of the decorated
+ function as its arguments.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ obj = func(*args, **kwargs)
+ if isinstance(obj, list):
+ for obj_item in obj:
+ _output_validates_with_args(
+ kwargs_validators, obj_item,
+ *args, **kwargs
+ )
+ else:
+ _output_validates_with_args(
+ kwargs_validators, obj,
+ *args, **kwargs
+ )
+ return obj
+ return wrapper
+ return decorator
+
+
+def _output_validates(kwargs_validators, obj):
+ """Validate output.
+
+ Each validator has following signature:
+ func(value)
+ """
+ if isinstance(obj, list):
+ for item in obj:
+ _output_validates(kwargs_validators, item)
+ return
+ if isinstance(obj, models.HelperMixin):
+ obj = obj.to_dict()
+ if not isinstance(obj, dict):
+ raise exception.InvalidResponse(
+ 'response %s type is not dict' % str(obj)
+ )
+ try:
+ for key, value in obj.items():
+ if key in kwargs_validators:
+ kwargs_validators[key](value)
+ except Exception as error:
+ logging.exception(error)
+ raise error
+
+
+def output_validates(**kwargs_validators):
+ """Decorator to validate output."""
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ obj = func(*args, **kwargs)
+ if isinstance(obj, list):
+ for obj_item in obj:
+ _output_validates(kwargs_validators, obj_item)
+ else:
+ _output_validates(kwargs_validators, obj)
+ return obj
+ return wrapper
+ return decorator
+
+
+def get_db_object(session, table, exception_when_missing=True, **kwargs):
+ """Get db object.
+
+ If not exception_when_missing and the db object can not be found,
+ return None instead of raising exception.
+ """
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s get db object %s from table %s',
+ id(session), kwargs, table.__name__)
+ db_object = model_filter(
+ model_query(session, table), table, **kwargs
+ ).first()
+ logging.debug(
+ 'session %s got db object %s', id(session), db_object
+ )
+ if db_object:
+ return db_object
+
+ if not exception_when_missing:
+ return None
+
+ raise exception.RecordNotExists(
+ 'Cannot find the record in table %s: %s' % (
+ table.__name__, kwargs
+ )
+ )
+
+
+def add_db_object(session, table, exception_when_existing=True,
+ *args, **kwargs):
+ """Create db object.
+
+ If not exception_when_existing and the db object exists,
+ Instead of raising exception, updating the existing db object.
+ """
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s add object %s atributes %s to table %s',
+ id(session), args, kwargs, table.__name__)
+ argspec = inspect.getargspec(table.__init__)
+ arg_names = argspec.args[1:]
+ arg_defaults = argspec.defaults
+ if not arg_defaults:
+ arg_defaults = []
+ if not (
+ len(arg_names) - len(arg_defaults) <= len(args) <= len(arg_names)
+ ):
+ raise exception.InvalidParameter(
+ 'arg names %s does not match arg values %s' % (
+ arg_names, args)
+ )
+ db_keys = dict(zip(arg_names, args))
+ if db_keys:
+ db_object = session.query(table).filter_by(**db_keys).first()
+ else:
+ db_object = None
+
+ new_object = False
+ if db_object:
+ logging.debug(
+ 'got db object %s: %s', db_keys, db_object
+ )
+ if exception_when_existing:
+ raise exception.DuplicatedRecord(
+ '%s exists in table %s' % (db_keys, table.__name__)
+ )
+ else:
+ db_object = table(**db_keys)
+ new_object = True
+
+ for key, value in kwargs.items():
+ setattr(db_object, key, value)
+
+ if new_object:
+ session.add(db_object)
+ session.flush()
+ db_object.initialize()
+ db_object.validate()
+ logging.debug(
+ 'session %s db object %s added', id(session), db_object
+ )
+ return db_object
+
+
+def list_db_objects(session, table, order_by=[], **filters):
+ """List db objects.
+
+ If order by given, the db objects should be sorted by the ordered keys.
+ """
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s list db objects by filters %s in table %s',
+ id(session), filters, table.__name__
+ )
+ db_objects = model_order_by(
+ model_filter(
+ model_query(session, table),
+ table,
+ **filters
+ ),
+ table,
+ order_by
+ ).all()
+ logging.debug(
+ 'session %s got listed db objects: %s',
+ id(session), db_objects
+ )
+ return db_objects
+
+
+def del_db_objects(session, table, **filters):
+ """delete db objects."""
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s delete db objects by filters %s in table %s',
+ id(session), filters, table.__name__
+ )
+ query = model_filter(
+ model_query(session, table), table, **filters
+ )
+ db_objects = query.all()
+ query.delete(synchronize_session=False)
+ logging.debug(
+ 'session %s db objects %s deleted', id(session), db_objects
+ )
+ return db_objects
+
+
+def update_db_objects(session, table, updates={}, **filters):
+ """Update db objects."""
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s update db objects by filters %s in table %s',
+ id(session), filters, table.__name__)
+ db_objects = model_filter(
+ model_query(session, table), table, **filters
+ ).all()
+ for db_object in db_objects:
+ logging.debug('update db object %s: %s', db_object, updates)
+ update_db_object(session, db_object, **updates)
+ logging.debug(
+ 'session %s db objects %s updated',
+ id(session), db_objects
+ )
+ return db_objects
+
+
+def update_db_object(session, db_object, **kwargs):
+ """Update db object."""
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s update db object %s by value %s',
+ id(session), db_object, kwargs
+ )
+ for key, value in kwargs.items():
+ setattr(db_object, key, value)
+ session.flush()
+ db_object.update()
+ db_object.validate()
+ logging.debug(
+ 'session %s db object %s updated',
+ id(session), db_object
+ )
+ return db_object
+
+
+def del_db_object(session, db_object):
+ """Delete db object."""
+ if not session:
+ raise exception.DatabaseException('session param is None')
+ with session.begin(subtransactions=True):
+ logging.debug(
+ 'session %s delete db object %s',
+ id(session), db_object
+ )
+ session.delete(db_object)
+ logging.debug(
+ 'session %s db object %s deleted',
+ id(session), db_object
+ )
+ return db_object
+
+
+def check_ip(ip):
+ """Check ip is ip address formatted."""
+ try:
+ netaddr.IPAddress(ip)
+ except Exception as error:
+ logging.exception(error)
+ raise exception.InvalidParameter(
+ 'ip address %s format uncorrect' % ip
+ )
+
+
+def check_mac(mac):
+ """Check mac is mac address formatted."""
+ try:
+ netaddr.EUI(mac)
+ except Exception as error:
+ logging.exception(error)
+ raise exception.InvalidParameter(
+ 'invalid mac address %s' % mac
+ )
+
+
+NAME_PATTERN = re.compile(r'[a-zA-Z0-9][a-zA-Z0-9_-]*')
+
+
+def check_name(name):
+ """Check name meeting name format requirement."""
+ if not NAME_PATTERN.match(name):
+ raise exception.InvalidParameter(
+ 'name %s does not match the pattern %s' % (
+ name, NAME_PATTERN.pattern
+ )
+ )
+
+
+def _check_ipmi_credentials_ip(ip):
+ check_ip(ip)
+
+
+def check_ipmi_credentials(ipmi_credentials):
+ """Check ipmi credentials format is correct."""
+ if not ipmi_credentials:
+ return
+ if not isinstance(ipmi_credentials, dict):
+ raise exception.InvalidParameter(
+ 'invalid ipmi credentials %s' % ipmi_credentials
+
+ )
+ for key in ipmi_credentials:
+ if key not in ['ip', 'username', 'password']:
+ raise exception.InvalidParameter(
+ 'unrecognized field %s in ipmi credentials %s' % (
+ key, ipmi_credentials
+ )
+ )
+ for key in ['ip', 'username', 'password']:
+ if key not in ipmi_credentials:
+ raise exception.InvalidParameter(
+ 'no field %s in ipmi credentials %s' % (
+ key, ipmi_credentials
+ )
+ )
+ check_ipmi_credential_field = '_check_ipmi_credentials_%s' % key
+ this_module = globals()
+ if check_ipmi_credential_field in this_module:
+ this_module[check_ipmi_credential_field](
+ ipmi_credentials[key]
+ )
+ else:
+ logging.debug(
+ 'function %s is not defined', check_ipmi_credential_field
+ )
+
+
+def _check_switch_credentials_version(version):
+ if version not in ['1', '2c', '3']:
+ raise exception.InvalidParameter(
+ 'unknown snmp version %s' % version
+ )
+
+
+def check_switch_credentials(credentials):
+ """Check switch credentials format is correct."""
+ if not credentials:
+ return
+ if not isinstance(credentials, dict):
+ raise exception.InvalidParameter(
+ 'credentials %s is not dict' % credentials
+ )
+ for key in credentials:
+ if key not in ['version', 'community']:
+ raise exception.InvalidParameter(
+ 'unrecognized key %s in credentials %s' % (key, credentials)
+ )
+ for key in ['version', 'community']:
+ if key not in credentials:
+ raise exception.InvalidParameter(
+ 'there is no %s field in credentials %s' % (key, credentials)
+ )
+
+ key_check_func_name = '_check_switch_credentials_%s' % key
+ this_module = globals()
+ if key_check_func_name in this_module:
+ this_module[key_check_func_name](
+ credentials[key]
+ )
+ else:
+ logging.debug(
+ 'function %s is not defined',
+ key_check_func_name
+ )
diff --git a/compass-tasks/db/callback.py b/compass-tasks/db/callback.py
new file mode 100644
index 0000000..35798bc
--- /dev/null
+++ b/compass-tasks/db/callback.py
@@ -0,0 +1,204 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Metadata Callback methods."""
+import logging
+import netaddr
+import random
+import re
+import socket
+
+from compass.db import exception
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+CALLBACK_GLOBALS = globals()
+CALLBACK_LOCALS = locals()
+CALLBACK_CONFIGS = util.load_configs(
+ setting.CALLBACK_DIR,
+ config_name_suffix='.py',
+ env_globals=CALLBACK_GLOBALS,
+ env_locals=CALLBACK_LOCALS
+)
+for callback_config in CALLBACK_CONFIGS:
+ CALLBACK_LOCALS.update(callback_config)
+
+
+def default_proxy(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_PROXY
+
+
+def proxy_options(name, **kwargs):
+ return [setting.COMPASS_SUPPORTED_PROXY]
+
+
+def default_noproxy(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DEFAULT_NOPROXY
+
+
+def noproxy_options(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DEFAULT_NOPROXY
+
+
+def default_ntp_server(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_NTP_SERVER
+
+
+def ntp_server_options(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_NTP_SERVER
+
+
+def default_dns_servers(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DNS_SERVERS
+
+
+def dns_servers_options(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DNS_SERVERS
+
+
+def default_domain(name, **kwargs):
+ if setting.COMPASS_SUPPORTED_DOMAINS:
+ return setting.COMPASS_SUPPORTED_DOMAINS[0]
+ else:
+ return None
+
+
+def domain_options(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DOMAINS
+
+
+def default_search_path(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DOMAINS
+
+
+def search_path_options(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DOMAINS
+
+
+def default_gateway(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_DEFAULT_GATEWAY
+
+
+def default_gateway_options(name, **kwargs):
+ return [setting.COMPASS_SUPPORTED_DEFAULT_GATEWAY]
+
+
+def default_localrepo(name, **kwargs):
+ return setting.COMPASS_SUPPORTED_LOCAL_REPO
+
+
+def default_localrepo_options(name, **kwargs):
+ return [setting.COMPASS_SUPPORTED_LOCAL_REPO]
+
+
+def autofill_callback_default(name, config, **kwargs):
+ if config is None:
+ if (
+ 'autofill_types' not in kwargs or
+ not (set(kwargs['autofill_types']) & set(kwargs))
+ ):
+ return None
+ if 'default_value' not in kwargs:
+ return None
+ return kwargs['default_value']
+ return config
+
+
+def autofill_callback_random_option(name, config, **kwargs):
+ if config is None:
+ if (
+ 'autofill_types' not in kwargs or
+ not (set(kwargs['autofill_types']) & set(kwargs))
+ ):
+ return None
+ if 'options' not in kwargs or not kwargs['options']:
+ return None
+ return random.choice(kwargs['options'])
+ return config
+
+
+def autofill_no_proxy(name, config, **kwargs):
+ logging.debug(
+ 'autofill %s config %s by params %s',
+ name, config, kwargs
+ )
+ if 'cluster' in kwargs:
+ if config is None:
+ config = []
+ if 'default_value' in kwargs:
+ for default_no_proxy in kwargs['default_value']:
+ if default_no_proxy and default_no_proxy not in config:
+ config.append(default_no_proxy)
+ cluster = kwargs['cluster']
+ for clusterhost in cluster.clusterhosts:
+ host = clusterhost.host
+ hostname = host.name
+ if hostname not in config:
+ config.append(hostname)
+ for host_network in host.host_networks:
+ if host_network.is_mgmt:
+ ip = host_network.ip
+ if ip not in config:
+ config.append(ip)
+ if not config:
+ return config
+ return [no_proxy for no_proxy in config if no_proxy]
+
+
+def autofill_network_mapping(name, config, **kwargs):
+ logging.debug(
+ 'autofill %s config %s by params %s',
+ name, config, kwargs
+ )
+ if not config:
+ return config
+ if isinstance(config, basestring):
+ config = {
+ 'interface': config,
+ 'subnet': None
+ }
+ if not isinstance(config, dict):
+ return config
+ if 'interface' not in config:
+ return config
+ subnet = None
+ interface = config['interface']
+ if 'cluster' in kwargs:
+ cluster = kwargs['cluster']
+ for clusterhost in cluster.clusterhosts:
+ host = clusterhost.host
+ for host_network in host.host_networks:
+ if host_network.interface == interface:
+ subnet = host_network.subnet.subnet
+ elif 'clusterhost' in kwargs:
+ clusterhost = kwargs['clusterhost']
+ host = clusterhost.host
+ for host_network in host.host_networks:
+ if host_network.interface == interface:
+ subnet = host_network.subnet.subnet
+ if not subnet:
+ raise exception.InvalidParameter(
+ 'interface %s not found in host(s)' % interface
+ )
+ if 'subnet' not in config or not config['subnet']:
+ config['subnet'] = subnet
+ else:
+ if config['subnet'] != subnet:
+ raise exception.InvalidParameter(
+ 'subnet %s in config is not equal to subnet %s in hosts' % (
+ config['subnet'], subnet
+ )
+ )
+ return config
diff --git a/compass-tasks/db/config_validation/__init__.py b/compass-tasks/db/config_validation/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-tasks/db/config_validation/__init__.py
diff --git a/compass-tasks/db/config_validation/default_validator.py b/compass-tasks/db/config_validation/default_validator.py
new file mode 100644
index 0000000..224447f
--- /dev/null
+++ b/compass-tasks/db/config_validation/default_validator.py
@@ -0,0 +1,131 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Default config validation function."""
+
+from sqlalchemy import or_
+
+from compass.db.models import OSConfigField
+from compass.db.models import OSConfigMetadata
+from compass.db import validator
+
+MAPPER = {
+ "os_id": {
+ "metaTable": OSConfigMetadata,
+ "metaFieldTable": OSConfigField
+ }
+ # "adapter_id": {
+ # "metaTable": AdapterConfigMetadata,
+ # "metaFieldTable": AdapterConfigField
+ # }
+}
+
+
+def validate_config(session, config, id_name, id_value, patch=True):
+ """Validates config.
+
+ Validates the given config value according to the config
+ metadata of the asscoiated os_id or adapter_id. Returns
+ a tuple (status, message).
+ """
+ if id_name not in MAPPER.keys():
+ return (False, "Invalid id type %s" % id_name)
+
+ meta_table = MAPPER[id_name]['metaTable']
+ metafield_table = MAPPER[id_name]['metaFieldTable']
+ with session.begin(subtransactions=True):
+ name_col = name_col = getattr(meta_table, 'name')
+ id_col = getattr(meta_table, id_name)
+
+ return _validate_config_helper(session, config,
+ name_col, id_col, id_value,
+ meta_table, metafield_table,
+ patch)
+
+
+def _validate_config_helper(session, config,
+ name_col, id_col, id_value,
+ meta_table, metafield_table, patch=True):
+
+ with session.begin(subtransactions=True):
+ for elem in config:
+
+ obj = session.query(meta_table).filter(name_col == elem)\
+ .filter(or_(id_col is None,
+ id_col == id_value)).first()
+
+ if not obj and "_type" not in config[elem]:
+ return (False, "Invalid metadata '%s'!" % elem)
+
+ if "_type" in config[elem]:
+ # Metadata is a variable
+ metadata_name = config[elem]['_type']
+ obj = session.query(meta_table).filter_by(name=metadata_name)\
+ .first()
+
+ if not obj:
+ err_msg = ("Invalid metatdata '%s' or missing '_type'"
+ "to indicate this is a variable metatdata."
+ % elem)
+ return (False, err_msg)
+
+ # TODO(Grace): validate metadata here
+ del config[elem]['_type']
+
+ fields = obj.fields
+
+ if not fields:
+ is_valid, message = _validate_config_helper(session,
+ config[elem],
+ name_col, id_col,
+ id_value,
+ meta_table,
+ metafield_table,
+ patch)
+ if not is_valid:
+ return (False, message)
+
+ else:
+ field_config = config[elem]
+ for key in field_config:
+ field = session.query(metafield_table)\
+ .filter_by(field=key).first()
+ if not field:
+ # The field is not in schema
+ return (False, "Invalid field '%s'!" % key)
+
+ value = field_config[key]
+ if field.is_required and value is None:
+ # The value of this field is required
+ # and cannot be none
+ err = "The value of field '%s' cannot be null!" % key
+ return (False, err)
+
+ if field.validator:
+ func = getattr(validator, field.validator)
+ if not func or not func(value):
+ err_msg = ("The value of the field '%s' is "
+ "invalid format or None!" % key)
+ return (False, err_msg)
+
+ # This is a PUT request. We need to check presence of all
+ # required fields.
+ if not patch:
+ for field in fields:
+ name = field.field
+ if field.is_required and name not in field_config:
+ return (False,
+ "Missing required field '%s'" % name)
+
+ return (True, None)
diff --git a/compass-tasks/db/config_validation/extension/__init__.py b/compass-tasks/db/config_validation/extension/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-tasks/db/config_validation/extension/__init__.py
diff --git a/compass-tasks/db/config_validation/extension/openstack.py b/compass-tasks/db/config_validation/extension/openstack.py
new file mode 100644
index 0000000..6b3af69
--- /dev/null
+++ b/compass-tasks/db/config_validation/extension/openstack.py
@@ -0,0 +1,18 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def validate_cluster_config():
+ # TODO(xiaodong): Add openstack specific validation here.
+ pass
diff --git a/compass-tasks/db/exception.py b/compass-tasks/db/exception.py
new file mode 100644
index 0000000..44556c9
--- /dev/null
+++ b/compass-tasks/db/exception.py
@@ -0,0 +1,116 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Custom exception"""
+import traceback
+
+
+class DatabaseException(Exception):
+ """Base class for all database exceptions."""
+ def __init__(self, message):
+ super(DatabaseException, self).__init__(message)
+ self.traceback = traceback.format_exc()
+ self.status_code = 400
+
+ def to_dict(self):
+ return {'message': str(self)}
+
+
+class RecordNotExists(DatabaseException):
+ """Define the exception for referring non-existing object in DB."""
+ def __init__(self, message):
+ super(RecordNotExists, self).__init__(message)
+ self.status_code = 404
+
+
+class DuplicatedRecord(DatabaseException):
+ """Define the exception for trying to insert an existing object in DB."""
+ def __init__(self, message):
+ super(DuplicatedRecord, self).__init__(message)
+ self.status_code = 409
+
+
+class Unauthorized(DatabaseException):
+ """Define the exception for invalid user login."""
+ def __init__(self, message):
+ super(Unauthorized, self).__init__(message)
+ self.status_code = 401
+
+
+class UserDisabled(DatabaseException):
+ """Define the exception that a disabled user tries to do some operations.
+
+ """
+ def __init__(self, message):
+ super(UserDisabled, self).__init__(message)
+ self.status_code = 403
+
+
+class Forbidden(DatabaseException):
+ """Define the exception that a user is trying to make some action
+
+ without the right permission.
+
+ """
+ def __init__(self, message):
+ super(Forbidden, self).__init__(message)
+ self.status_code = 403
+
+
+class NotAcceptable(DatabaseException):
+ """The data is not acceptable."""
+ def __init__(self, message):
+ super(NotAcceptable, self).__init__(message)
+ self.status_code = 406
+
+
+class InvalidParameter(DatabaseException):
+ """Define the exception that the request has invalid or missing parameters.
+
+ """
+ def __init__(self, message):
+ super(InvalidParameter, self).__init__(message)
+ self.status_code = 400
+
+
+class InvalidResponse(DatabaseException):
+ """Define the exception that the response is invalid.
+
+ """
+ def __init__(self, message):
+ super(InvalidResponse, self).__init__(message)
+ self.status_code = 400
+
+
+class MultiDatabaseException(DatabaseException):
+ """Define the exception composites with multi exceptions."""
+ def __init__(self, exceptions):
+ super(MultiDatabaseException, self).__init__('multi exceptions')
+ self.exceptions = exceptions
+ self.status_code = 400
+
+ @property
+ def traceback(self):
+ tracebacks = []
+ for exception in self.exceptions:
+ tracebacks.append(exception.trackback)
+
+ def to_dict(self):
+ dict_info = super(MultiDatabaseException, self).to_dict()
+ dict_info.update({
+ 'exceptions': [
+ exception.to_dict() for exception in self.exceptions
+ ]
+ })
+ return dict_info
diff --git a/compass-tasks/db/models.py b/compass-tasks/db/models.py
new file mode 100644
index 0000000..d4b0324
--- /dev/null
+++ b/compass-tasks/db/models.py
@@ -0,0 +1,1924 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Database model"""
+import copy
+import datetime
+import logging
+import netaddr
+import re
+import simplejson as json
+
+from sqlalchemy import BigInteger
+from sqlalchemy import Boolean
+from sqlalchemy import Column
+from sqlalchemy import ColumnDefault
+from sqlalchemy import DateTime
+from sqlalchemy import Enum
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.ext.hybrid import hybrid_property
+from sqlalchemy import Float
+from sqlalchemy import ForeignKey
+from sqlalchemy import Integer
+from sqlalchemy.orm import relationship, backref
+from sqlalchemy import String
+from sqlalchemy import Table
+from sqlalchemy import Text
+from sqlalchemy.types import TypeDecorator
+from sqlalchemy import UniqueConstraint
+
+from compass.db import exception
+from compass.utils import util
+
+
+BASE = declarative_base()
+
+
+class JSONEncoded(TypeDecorator):
+ """Represents an immutable structure as a json-encoded string."""
+
+ impl = Text
+
+ def process_bind_param(self, value, dialect):
+ if value is not None:
+ value = json.dumps(value)
+ return value
+
+ def process_result_value(self, value, dialect):
+ if value is not None:
+ value = json.loads(value)
+ return value
+
+
+class TimestampMixin(object):
+ """Provides table fields for each row created/updated timestamp."""
+ created_at = Column(DateTime, default=lambda: datetime.datetime.now())
+ updated_at = Column(DateTime, default=lambda: datetime.datetime.now(),
+ onupdate=lambda: datetime.datetime.now())
+
+
+class HelperMixin(object):
+ """Provides general fuctions for all compass table models."""
+
+ def initialize(self):
+ self.update()
+
+ def update(self):
+ pass
+
+ @staticmethod
+ def type_compatible(value, column_type):
+ """Check if value type is compatible with the column type."""
+ if value is None:
+ return True
+ if not hasattr(column_type, 'python_type'):
+ return True
+ column_python_type = column_type.python_type
+ if isinstance(value, column_python_type):
+ return True
+ if issubclass(column_python_type, basestring):
+ return isinstance(value, basestring)
+ if column_python_type in [int, long]:
+ return type(value) in [int, long]
+ if column_python_type in [float]:
+ return type(value) in [float]
+ if column_python_type in [bool]:
+ return type(value) in [bool]
+ return False
+
+ def validate(self):
+ """Generate validate function to make sure the record is legal."""
+ columns = self.__mapper__.columns
+ for key, column in columns.items():
+ value = getattr(self, key)
+ if not self.type_compatible(value, column.type):
+ raise exception.InvalidParameter(
+ 'column %s value %r type is unexpected: %s' % (
+ key, value, column.type
+ )
+ )
+
+ def to_dict(self):
+ """General function to convert record to dict.
+
+ Convert all columns not starting with '_' to
+ {<column_name>: <column_value>}
+ """
+ keys = self.__mapper__.columns.keys()
+ dict_info = {}
+ for key in keys:
+ if key.startswith('_'):
+ continue
+ value = getattr(self, key)
+ if value is not None:
+ if isinstance(value, datetime.datetime):
+ value = util.format_datetime(value)
+ dict_info[key] = value
+ return dict_info
+
+
+class StateMixin(TimestampMixin, HelperMixin):
+ """Provides general fields and functions for state related table."""
+
+ state = Column(
+ Enum(
+ 'UNINITIALIZED', 'INITIALIZED', 'UPDATE_PREPARING',
+ 'INSTALLING', 'SUCCESSFUL', 'ERROR'
+ ),
+ ColumnDefault('UNINITIALIZED')
+ )
+ percentage = Column(Float, default=0.0)
+ message = Column(Text, default='')
+ severity = Column(
+ Enum('INFO', 'WARNING', 'ERROR'),
+ ColumnDefault('INFO')
+ )
+ ready = Column(Boolean, default=False)
+
+ def update(self):
+ # In state table, some field information is redundant.
+ # The update function to make sure all related fields
+ # are set to correct state.
+ if self.ready:
+ self.state = 'SUCCESSFUL'
+ if self.state in ['UNINITIALIZED', 'INITIALIZED']:
+ self.percentage = 0.0
+ self.severity = 'INFO'
+ self.message = ''
+ if self.state == 'INSTALLING':
+ if self.severity == 'ERROR':
+ self.state = 'ERROR'
+ elif self.percentage >= 1.0:
+ self.state = 'SUCCESSFUL'
+ self.percentage = 1.0
+ if self.state == 'SUCCESSFUL':
+ self.percentage = 1.0
+ super(StateMixin, self).update()
+
+
+class LogHistoryMixin(TimestampMixin, HelperMixin):
+ """Provides general fields and functions for LogHistory related tables."""
+ position = Column(Integer, default=0)
+ partial_line = Column(Text, default='')
+ percentage = Column(Float, default=0.0)
+ message = Column(Text, default='')
+ severity = Column(
+ Enum('ERROR', 'WARNING', 'INFO'),
+ ColumnDefault('INFO')
+ )
+ line_matcher_name = Column(
+ String(80), default='start'
+ )
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ if not self.filename:
+ raise exception.InvalidParameter(
+ 'filename is not set in %s' % self.id
+ )
+
+
+class HostNetwork(BASE, TimestampMixin, HelperMixin):
+ """Host network table."""
+ __tablename__ = 'host_network'
+
+ id = Column(Integer, primary_key=True)
+ host_id = Column(
+ Integer,
+ ForeignKey('host.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ interface = Column(
+ String(80), nullable=False)
+ subnet_id = Column(
+ Integer,
+ ForeignKey('subnet.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ user_id = Column(Integer, ForeignKey('user.id'))
+ ip_int = Column(BigInteger, nullable=False)
+ is_mgmt = Column(Boolean, default=False)
+ is_promiscuous = Column(Boolean, default=False)
+
+ __table_args__ = (
+ UniqueConstraint('host_id', 'interface', name='interface_constraint'),
+ UniqueConstraint('ip_int', 'user_id', name='ip_constraint')
+ )
+
+ def __init__(self, host_id, interface, user_id, **kwargs):
+ self.host_id = host_id
+ self.interface = interface
+ self.user_id = user_id
+ super(HostNetwork, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'HostNetwork[%s=%s]' % (self.interface, self.ip)
+
+ @property
+ def ip(self):
+ return str(netaddr.IPAddress(self.ip_int))
+
+ @ip.setter
+ def ip(self, value):
+ self.ip_int = int(netaddr.IPAddress(value))
+
+ @property
+ def netmask(self):
+ return str(netaddr.IPNetwork(self.subnet.subnet).netmask)
+
+ def update(self):
+ self.host.config_validated = False
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ super(HostNetwork, self).validate()
+ if not self.subnet:
+ raise exception.InvalidParameter(
+ 'subnet is not set in %s interface %s' % (
+ self.host_id, self.interface
+ )
+ )
+ if not self.ip_int:
+ raise exception.InvalidParameter(
+ 'ip is not set in %s interface %s' % (
+ self.host_id, self.interface
+ )
+ )
+ ip = netaddr.IPAddress(self.ip_int)
+ subnet = netaddr.IPNetwork(self.subnet.subnet)
+ if ip not in subnet:
+ raise exception.InvalidParameter(
+ 'ip %s is not in subnet %s' % (
+ str(ip), str(subnet)
+ )
+ )
+
+ def to_dict(self):
+ dict_info = super(HostNetwork, self).to_dict()
+ dict_info['ip'] = self.ip
+ dict_info['interface'] = self.interface
+ dict_info['netmask'] = self.netmask
+ dict_info['subnet'] = self.subnet.subnet
+ dict_info['user_id'] = self.user_id
+ return dict_info
+
+
+class ClusterHostLogHistory(BASE, LogHistoryMixin):
+ """clusterhost installing log history for each file.
+
+ """
+ __tablename__ = 'clusterhost_log_history'
+
+ clusterhost_id = Column(
+ 'id', Integer,
+ ForeignKey('clusterhost.id', onupdate='CASCADE', ondelete='CASCADE'),
+ primary_key=True
+ )
+ filename = Column(String(80), primary_key=True, nullable=False)
+ cluster_id = Column(
+ Integer,
+ ForeignKey('cluster.id')
+ )
+ host_id = Column(
+ Integer,
+ ForeignKey('host.id')
+ )
+
+ def __init__(self, clusterhost_id, filename, **kwargs):
+ self.clusterhost_id = clusterhost_id
+ self.filename = filename
+ super(ClusterHostLogHistory, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'ClusterHostLogHistory[%s:%s]' % (
+ self.clusterhost_id, self.filename
+ )
+
+ def initialize(self):
+ self.cluster_id = self.clusterhost.cluster_id
+ self.host_id = self.clusterhost.host_id
+ super(ClusterHostLogHistory, self).initialize()
+
+
+class HostLogHistory(BASE, LogHistoryMixin):
+ """host installing log history for each file.
+
+ """
+ __tablename__ = 'host_log_history'
+
+ id = Column(
+ Integer,
+ ForeignKey('host.id', onupdate='CASCADE', ondelete='CASCADE'),
+ primary_key=True)
+ filename = Column(String(80), primary_key=True, nullable=False)
+
+ def __init__(self, id, filename, **kwargs):
+ self.id = id
+ self.filename = filename
+ super(HostLogHistory, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'HostLogHistory[%s:%s]' % (self.id, self.filename)
+
+
+class ClusterHostState(BASE, StateMixin):
+ """ClusterHost state table."""
+ __tablename__ = 'clusterhost_state'
+
+ id = Column(
+ Integer,
+ ForeignKey(
+ 'clusterhost.id',
+ onupdate='CASCADE', ondelete='CASCADE'
+ ),
+ primary_key=True
+ )
+
+ def __str__(self):
+ return 'ClusterHostState[%s state %s percentage %s]' % (
+ self.id, self.state, self.percentage
+ )
+
+ def update(self):
+ """Update clusterhost state.
+
+ When clusterhost state is updated, the underlying host state
+ may be updated accordingly.
+ """
+ super(ClusterHostState, self).update()
+ host_state = self.clusterhost.host.state
+ if self.state == 'INITIALIZED':
+ if host_state.state in ['UNINITIALIZED', 'UPDATE_PREPARING']:
+ host_state.state = 'INITIALIZED'
+ host_state.update()
+ elif self.state == 'INSTALLING':
+ if host_state.state in [
+ 'UNINITIALIZED', 'UPDATE_PREPARING', 'INITIALIZED'
+ ]:
+ host_state.state = 'INSTALLING'
+ host_state.update()
+ elif self.state == 'SUCCESSFUL':
+ if host_state.state != 'SUCCESSFUL':
+ host_state.state = 'SUCCESSFUL'
+ host_state.update()
+
+
+class ClusterHost(BASE, TimestampMixin, HelperMixin):
+ """ClusterHost table."""
+ __tablename__ = 'clusterhost'
+
+ clusterhost_id = Column('id', Integer, primary_key=True)
+ cluster_id = Column(
+ Integer,
+ ForeignKey('cluster.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ host_id = Column(
+ Integer,
+ ForeignKey('host.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ # the list of role names.
+ _roles = Column('roles', JSONEncoded, default=[])
+ _patched_roles = Column('patched_roles', JSONEncoded, default=[])
+ config_step = Column(String(80), default='')
+ package_config = Column(JSONEncoded, default={})
+ config_validated = Column(Boolean, default=False)
+ deployed_package_config = Column(JSONEncoded, default={})
+
+ log_histories = relationship(
+ ClusterHostLogHistory,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('clusterhost')
+ )
+
+ __table_args__ = (
+ UniqueConstraint('cluster_id', 'host_id', name='constraint'),
+ )
+
+ state = relationship(
+ ClusterHostState,
+ uselist=False,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('clusterhost')
+ )
+
+ def __init__(self, cluster_id, host_id, **kwargs):
+ self.cluster_id = cluster_id
+ self.host_id = host_id
+ self.state = ClusterHostState()
+ super(ClusterHost, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'ClusterHost[%s:%s]' % (self.clusterhost_id, self.name)
+
+ def update(self):
+ if self.host.reinstall_os:
+ if self.state in ['SUCCESSFUL', 'ERROR']:
+ if self.config_validated:
+ self.state.state = 'INITIALIZED'
+ else:
+ self.state.state = 'UNINITIALIZED'
+ self.cluster.update()
+ self.host.update()
+ self.state.update()
+ super(ClusterHost, self).update()
+
+ @property
+ def name(self):
+ return '%s.%s' % (self.host.name, self.cluster.name)
+
+ @property
+ def patched_package_config(self):
+ return self.package_config
+
+ @patched_package_config.setter
+ def patched_package_config(self, value):
+ package_config = copy.deepcopy(self.package_config)
+ self.package_config = util.merge_dict(package_config, value)
+ logging.debug(
+ 'patch clusterhost %s package_config: %s',
+ self.clusterhost_id, value
+ )
+ self.config_validated = False
+
+ @property
+ def put_package_config(self):
+ return self.package_config
+
+ @put_package_config.setter
+ def put_package_config(self, value):
+ package_config = copy.deepcopy(self.package_config)
+ package_config.update(value)
+ self.package_config = package_config
+ logging.debug(
+ 'put clusterhost %s package_config: %s',
+ self.clusterhost_id, value
+ )
+ self.config_validated = False
+
+ @property
+ def patched_os_config(self):
+ return self.host.os_config
+
+ @patched_os_config.setter
+ def patched_os_config(self, value):
+ host = self.host
+ host.patched_os_config = value
+
+ @property
+ def put_os_config(self):
+ return self.host.os_config
+
+ @put_os_config.setter
+ def put_os_config(self, value):
+ host = self.host
+ host.put_os_config = value
+
+ @property
+ def deployed_os_config(self):
+ return self.host.deployed_os_config
+
+ @deployed_os_config.setter
+ def deployed_os_config(self, value):
+ host = self.host
+ host.deployed_os_config = value
+
+ @hybrid_property
+ def os_name(self):
+ return self.host.os_name
+
+ @os_name.expression
+ def os_name(cls):
+ return cls.host.os_name
+
+ @hybrid_property
+ def clustername(self):
+ return self.cluster.name
+
+ @clustername.expression
+ def clustername(cls):
+ return cls.cluster.name
+
+ @hybrid_property
+ def hostname(self):
+ return self.host.hostname
+
+ @hostname.expression
+ def hostname(cls):
+ return Host.hostname
+
+ @property
+ def distributed_system_installed(self):
+ return self.state.state == 'SUCCESSFUL'
+
+ @property
+ def resintall_os(self):
+ return self.host.reinstall_os
+
+ @property
+ def reinstall_distributed_system(self):
+ return self.cluster.reinstall_distributed_system
+
+ @property
+ def os_installed(self):
+ return self.host.os_installed
+
+ @property
+ def roles(self):
+ # only the role exists in flavor roles will be returned.
+ # the role will be sorted as the order defined in flavor
+ # roles.
+ # duplicate role names will be removed.
+ # The returned value is a list of dict like
+ # [{'name': 'allinone', 'optional': False}]
+ role_names = list(self._roles)
+ if not role_names:
+ return []
+ cluster_roles = self.cluster.flavor['roles']
+ if not cluster_roles:
+ return []
+ roles = []
+ for cluster_role in cluster_roles:
+ if cluster_role['name'] in role_names:
+ roles.append(cluster_role)
+ return roles
+
+ @roles.setter
+ def roles(self, value):
+ """value should be a list of role name."""
+ self._roles = list(value)
+ self.config_validated = False
+
+ @property
+ def patched_roles(self):
+ patched_role_names = list(self._patched_roles)
+ if not patched_role_names:
+ return []
+ cluster_roles = self.cluster.flavor['roles']
+ if not cluster_roles:
+ return []
+ roles = []
+ for cluster_role in cluster_roles:
+ if cluster_role['name'] in patched_role_names:
+ roles.append(cluster_role)
+ return roles
+
+ @patched_roles.setter
+ def patched_roles(self, value):
+ """value should be a list of role name."""
+ # if value is an empty list, we empty the field
+ if value:
+ roles = list(self._roles)
+ roles.extend(value)
+ self._roles = roles
+ patched_roles = list(self._patched_roles)
+ patched_roles.extend(value)
+ self._patched_roles = patched_roles
+ self.config_validated = False
+ else:
+ self._patched_roles = list(value)
+ self.config_validated = False
+
+ @hybrid_property
+ def owner(self):
+ return self.cluster.owner
+
+ @owner.expression
+ def owner(cls):
+ return cls.cluster.owner
+
+ def state_dict(self):
+ """Get clusterhost state dict.
+
+ The clusterhost state_dict is different from
+ clusterhost.state.to_dict. The main difference is state_dict
+ show the progress of both installing os on host and installing
+ distributed system on clusterhost. While clusterhost.state.to_dict
+ only shows the progress of installing distributed system on
+ clusterhost.
+ """
+ cluster = self.cluster
+ host = self.host
+ host_state = host.state_dict()
+ if not cluster.flavor_name:
+ return host_state
+ clusterhost_state = self.state.to_dict()
+ if clusterhost_state['state'] in ['ERROR', 'SUCCESSFUL']:
+ return clusterhost_state
+ if (
+ clusterhost_state['state'] in 'INSTALLING' and
+ clusterhost_state['percentage'] > 0
+ ):
+ clusterhost_state['percentage'] = min(
+ 1.0, (
+ 0.5 + clusterhost_state['percentage'] / 2
+ )
+ )
+ return clusterhost_state
+
+ host_state['percentage'] = host_state['percentage'] / 2
+ if host_state['state'] == 'SUCCESSFUL':
+ host_state['state'] = 'INSTALLING'
+ return host_state
+
+ def to_dict(self):
+ dict_info = self.host.to_dict()
+ dict_info.update(super(ClusterHost, self).to_dict())
+ state_dict = self.state_dict()
+ dict_info.update({
+ 'distributed_system_installed': self.distributed_system_installed,
+ 'reinstall_distributed_system': self.reinstall_distributed_system,
+ 'owner': self.owner,
+ 'clustername': self.clustername,
+ 'name': self.name,
+ 'state': state_dict['state']
+ })
+ dict_info['roles'] = self.roles
+ dict_info['patched_roles'] = self.patched_roles
+ return dict_info
+
+
+class HostState(BASE, StateMixin):
+ """Host state table."""
+ __tablename__ = 'host_state'
+
+ id = Column(
+ Integer,
+ ForeignKey('host.id', onupdate='CASCADE', ondelete='CASCADE'),
+ primary_key=True
+ )
+
+ def __str__(self):
+ return 'HostState[%s state %s percentage %s]' % (
+ self.id, self.state, self.percentage
+ )
+
+ def update(self):
+ """Update host state.
+
+ When host state is updated, all clusterhosts on the
+ host will update their state if necessary.
+ """
+ super(HostState, self).update()
+ host = self.host
+ if self.state == 'INSTALLING':
+ host.reinstall_os = False
+ for clusterhost in self.host.clusterhosts:
+ if clusterhost.state in [
+ 'SUCCESSFUL', 'ERROR'
+ ]:
+ clusterhost.state = 'INSTALLING'
+ clusterhost.state.update()
+ elif self.state == 'UNINITIALIZED':
+ for clusterhost in self.host.clusterhosts:
+ if clusterhost.state in [
+ 'INITIALIZED', 'INSTALLING', 'SUCCESSFUL', 'ERROR'
+ ]:
+ clusterhost.state = 'UNINITIALIZED'
+ clusterhost.state.update()
+ elif self.state == 'UPDATE_PREPARING':
+ for clusterhost in self.host.clusterhosts:
+ if clusterhost.state in [
+ 'INITIALIZED', 'INSTALLING', 'SUCCESSFUL', 'ERROR'
+ ]:
+ clusterhost.state = 'UPDATE_PREPARING'
+ clusterhost.state.update()
+ elif self.state == 'INITIALIZED':
+ for clusterhost in self.host.clusterhosts:
+ if clusterhost.state in [
+ 'INSTALLING', 'SUCCESSFUL', 'ERROR'
+ ]:
+ clusterhost.state = 'INITIALIZED'
+ clusterhost.state.update()
+
+
+class Host(BASE, TimestampMixin, HelperMixin):
+ """Host table."""
+ __tablename__ = 'host'
+
+ name = Column(String(80), nullable=True)
+ config_step = Column(String(80), default='')
+ os_config = Column(JSONEncoded, default={})
+ config_validated = Column(Boolean, default=False)
+ deployed_os_config = Column(JSONEncoded, default={})
+ os_name = Column(String(80))
+ creator_id = Column(Integer, ForeignKey('user.id'))
+ owner = Column(String(80))
+ os_installer = Column(JSONEncoded, default={})
+
+ __table_args__ = (
+ UniqueConstraint('name', 'owner', name='constraint'),
+ )
+
+ id = Column(
+ Integer,
+ ForeignKey('machine.id', onupdate='CASCADE', ondelete='CASCADE'),
+ primary_key=True
+ )
+ reinstall_os = Column(Boolean, default=True)
+
+ host_networks = relationship(
+ HostNetwork,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('host')
+ )
+ clusterhosts = relationship(
+ ClusterHost,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('host')
+ )
+ state = relationship(
+ HostState,
+ uselist=False,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('host')
+ )
+ log_histories = relationship(
+ HostLogHistory,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('host')
+ )
+
+ def __str__(self):
+ return 'Host[%s:%s]' % (self.id, self.name)
+
+ @hybrid_property
+ def mac(self):
+ machine = self.machine
+ if machine:
+ return machine.mac
+ else:
+ return None
+
+ @property
+ def os_id(self):
+ return self.os_name
+
+ @os_id.setter
+ def os_id(self, value):
+ self.os_name = value
+
+ @hybrid_property
+ def hostname(self):
+ return self.name
+
+ @hostname.expression
+ def hostname(cls):
+ return cls.name
+
+ @property
+ def patched_os_config(self):
+ return self.os_config
+
+ @patched_os_config.setter
+ def patched_os_config(self, value):
+ os_config = copy.deepcopy(self.os_config)
+ self.os_config = util.merge_dict(os_config, value)
+ logging.debug('patch host os config in %s: %s', self.id, value)
+ self.config_validated = False
+
+ @property
+ def put_os_config(self):
+ return self.os_config
+
+ @put_os_config.setter
+ def put_os_config(self, value):
+ os_config = copy.deepcopy(self.os_config)
+ os_config.update(value)
+ self.os_config = os_config
+ logging.debug('put host os config in %s: %s', self.id, value)
+ self.config_validated = False
+
+ def __init__(self, id, **kwargs):
+ self.id = id
+ self.state = HostState()
+ super(Host, self).__init__(**kwargs)
+
+ def update(self):
+ creator = self.creator
+ if creator:
+ self.owner = creator.email
+ if self.reinstall_os:
+ if self.state in ['SUCCESSFUL', 'ERROR']:
+ if self.config_validated:
+ self.state.state = 'INITIALIZED'
+ else:
+ self.state.state = 'UNINITIALIZED'
+ self.state.update()
+ self.state.update()
+ super(Host, self).update()
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to the column in future.
+ super(Host, self).validate()
+ creator = self.creator
+ if not creator:
+ raise exception.InvalidParameter(
+ 'creator is not set in host %s' % self.id
+ )
+ os_name = self.os_name
+ if not os_name:
+ raise exception.InvalidParameter(
+ 'os is not set in host %s' % self.id
+ )
+ os_installer = self.os_installer
+ if not os_installer:
+ raise exception.Invalidparameter(
+ 'os_installer is not set in host %s' % self.id
+ )
+
+ @property
+ def os_installed(self):
+ return self.state.state == 'SUCCESSFUL'
+
+ @property
+ def clusters(self):
+ return [clusterhost.cluster for clusterhost in self.clusterhosts]
+
+ def state_dict(self):
+ return self.state.to_dict()
+
+ def to_dict(self):
+ """Host dict contains its underlying machine dict."""
+ dict_info = self.machine.to_dict()
+ dict_info.update(super(Host, self).to_dict())
+ state_dict = self.state_dict()
+ ip = None
+ for host_network in self.host_networks:
+ if host_network.is_mgmt:
+ ip = host_network.ip
+ dict_info.update({
+ 'machine_id': self.machine.id,
+ 'os_installed': self.os_installed,
+ 'hostname': self.hostname,
+ 'ip': ip,
+ 'networks': [
+ host_network.to_dict()
+ for host_network in self.host_networks
+ ],
+ 'os_id': self.os_id,
+ 'clusters': [cluster.to_dict() for cluster in self.clusters],
+ 'state': state_dict['state']
+ })
+ return dict_info
+
+
+class ClusterState(BASE, StateMixin):
+ """Cluster state table."""
+ __tablename__ = 'cluster_state'
+
+ id = Column(
+ Integer,
+ ForeignKey('cluster.id', onupdate='CASCADE', ondelete='CASCADE'),
+ primary_key=True
+ )
+ total_hosts = Column(
+ Integer,
+ default=0
+ )
+ installing_hosts = Column(
+ Integer,
+ default=0
+ )
+ completed_hosts = Column(
+ Integer,
+ default=0
+ )
+ failed_hosts = Column(
+ Integer,
+ default=0
+ )
+
+ def __init__(self, **kwargs):
+ super(ClusterState, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'ClusterState[%s state %s percentage %s]' % (
+ self.id, self.state, self.percentage
+ )
+
+ def to_dict(self):
+ dict_info = super(ClusterState, self).to_dict()
+ dict_info['status'] = {
+ 'total_hosts': self.total_hosts,
+ 'installing_hosts': self.installing_hosts,
+ 'completed_hosts': self.completed_hosts,
+ 'failed_hosts': self.failed_hosts
+ }
+ return dict_info
+
+ def update(self):
+ # all fields of cluster state should be calculated by
+ # its each underlying clusterhost state.
+ cluster = self.cluster
+ clusterhosts = cluster.clusterhosts
+ self.total_hosts = len(clusterhosts)
+ self.installing_hosts = 0
+ self.failed_hosts = 0
+ self.completed_hosts = 0
+ if not cluster.flavor_name:
+ for clusterhost in clusterhosts:
+ host = clusterhost.host
+ host_state = host.state.state
+ if host_state == 'INSTALLING':
+ self.installing_hosts += 1
+ elif host_state == 'ERROR':
+ self.failed_hosts += 1
+ elif host_state == 'SUCCESSFUL':
+ self.completed_hosts += 1
+ else:
+ for clusterhost in clusterhosts:
+ clusterhost_state = clusterhost.state.state
+ if clusterhost_state == 'INSTALLING':
+ self.installing_hosts += 1
+ elif clusterhost_state == 'ERROR':
+ self.failed_hosts += 1
+ elif clusterhost_state == 'SUCCESSFUL':
+ self.completed_hosts += 1
+ if self.total_hosts:
+ if self.completed_hosts == self.total_hosts:
+ self.percentage = 1.0
+ else:
+ self.percentage = (
+ float(self.completed_hosts)
+ /
+ float(self.total_hosts)
+ )
+ if self.state == 'SUCCESSFUL':
+ self.state = 'INSTALLING'
+ self.ready = False
+ self.message = (
+ 'total %s, installing %s, completed: %s, error %s'
+ ) % (
+ self.total_hosts, self.installing_hosts,
+ self.completed_hosts, self.failed_hosts
+ )
+ if self.failed_hosts:
+ self.severity = 'ERROR'
+
+ super(ClusterState, self).update()
+ if self.state == 'INSTALLING':
+ cluster.reinstall_distributed_system = False
+
+
+class Cluster(BASE, TimestampMixin, HelperMixin):
+ """Cluster table."""
+ __tablename__ = 'cluster'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(80), nullable=False)
+ reinstall_distributed_system = Column(Boolean, default=True)
+ config_step = Column(String(80), default='')
+ os_name = Column(String(80))
+ flavor_name = Column(String(80), nullable=True)
+ # flavor dict got from flavor id.
+ flavor = Column(JSONEncoded, default={})
+ os_config = Column(JSONEncoded, default={})
+ package_config = Column(JSONEncoded, default={})
+ deployed_os_config = Column(JSONEncoded, default={})
+ deployed_package_config = Column(JSONEncoded, default={})
+ config_validated = Column(Boolean, default=False)
+ adapter_name = Column(String(80))
+ creator_id = Column(Integer, ForeignKey('user.id'))
+ owner = Column(String(80))
+ clusterhosts = relationship(
+ ClusterHost,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('cluster')
+ )
+ state = relationship(
+ ClusterState,
+ uselist=False,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('cluster')
+ )
+ __table_args__ = (
+ UniqueConstraint('name', 'creator_id', name='constraint'),
+ )
+
+ def __init__(self, name, creator_id, **kwargs):
+ self.name = name
+ self.creator_id = creator_id
+ self.state = ClusterState()
+ super(Cluster, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'Cluster[%s:%s]' % (self.id, self.name)
+
+ def update(self):
+ creator = self.creator
+ if creator:
+ self.owner = creator.email
+ if self.reinstall_distributed_system:
+ if self.state in ['SUCCESSFUL', 'ERROR']:
+ if self.config_validated:
+ self.state.state = 'INITIALIZED'
+ else:
+ self.state.state = 'UNINITIALIZED'
+ self.state.update()
+ self.state.update()
+ super(Cluster, self).update()
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ super(Cluster, self).validate()
+ creator = self.creator
+ if not creator:
+ raise exception.InvalidParameter(
+ 'creator is not set in cluster %s' % self.id
+ )
+ os_name = self.os_name
+ if not os_name:
+ raise exception.InvalidParameter(
+ 'os is not set in cluster %s' % self.id
+ )
+ adapter_name = self.adapter_name
+ if not adapter_name:
+ raise exception.InvalidParameter(
+ 'adapter is not set in cluster %s' % self.id
+ )
+ flavor_name = self.flavor_name
+ if flavor_name:
+ if 'name' not in self.flavor:
+ raise exception.InvalidParameter(
+ 'key name does not exist in flavor %s' % (
+ self.flavor
+ )
+ )
+ if flavor_name != self.flavor['name']:
+ raise exception.InvalidParameter(
+ 'flavor name %s is not match '
+ 'the name key in flavor %s' % (
+ flavor_name, self.flavor
+ )
+ )
+ else:
+ if self.flavor:
+ raise exception.InvalidParameter(
+ 'flavor %s is not empty' % self.flavor
+ )
+
+ @property
+ def os_id(self):
+ return self.os_name
+
+ @os_id.setter
+ def os_id(self, value):
+ self.os_name = value
+
+ @property
+ def adapter_id(self):
+ return self.adapter_name
+
+ @adapter_id.setter
+ def adapter_id(self, value):
+ self.adapter_name = value
+
+ @property
+ def flavor_id(self):
+ if self.flavor_name:
+ return '%s:%s' % (self.adapter_name, self.flavor_name)
+ else:
+ return None
+
+ @flavor_id.setter
+ def flavor_id(self, value):
+ if value:
+ _, flavor_name = value.split(':', 1)
+ self.flavor_name = flavor_name
+ else:
+ self.flavor_name = value
+
+ @property
+ def patched_os_config(self):
+ return self.os_config
+
+ @patched_os_config.setter
+ def patched_os_config(self, value):
+ os_config = copy.deepcopy(self.os_config)
+ self.os_config = util.merge_dict(os_config, value)
+ logging.debug('patch cluster %s os config: %s', self.id, value)
+ self.config_validated = False
+
+ @property
+ def put_os_config(self):
+ return self.os_config
+
+ @put_os_config.setter
+ def put_os_config(self, value):
+ os_config = copy.deepcopy(self.os_config)
+ os_config.update(value)
+ self.os_config = os_config
+ logging.debug('put cluster %s os config: %s', self.id, value)
+ self.config_validated = False
+
+ @property
+ def patched_package_config(self):
+ return self.package_config
+
+ @patched_package_config.setter
+ def patched_package_config(self, value):
+ package_config = copy.deepcopy(self.package_config)
+ self.package_config = util.merge_dict(package_config, value)
+ logging.debug('patch cluster %s package config: %s', self.id, value)
+ self.config_validated = False
+
+ @property
+ def put_package_config(self):
+ return self.package_config
+
+ @put_package_config.setter
+ def put_package_config(self, value):
+ package_config = dict(self.package_config)
+ package_config.update(value)
+ self.package_config = package_config
+ logging.debug('put cluster %s package config: %s', self.id, value)
+ self.config_validated = False
+
+ @property
+ def distributed_system_installed(self):
+ return self.state.state == 'SUCCESSFUL'
+
+ def state_dict(self):
+ return self.state.to_dict()
+
+ def to_dict(self):
+ dict_info = super(Cluster, self).to_dict()
+ dict_info['distributed_system_installed'] = (
+ self.distributed_system_installed
+ )
+ dict_info['os_id'] = self.os_id
+ dict_info['adapter_id'] = self.adapter_id
+ dict_info['flavor_id'] = self.flavor_id
+ return dict_info
+
+
+# User, Permission relation table
+class UserPermission(BASE, HelperMixin, TimestampMixin):
+ """User permission table."""
+ __tablename__ = 'user_permission'
+ id = Column(Integer, primary_key=True)
+ user_id = Column(
+ Integer,
+ ForeignKey('user.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ permission_id = Column(
+ Integer,
+ ForeignKey('permission.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ __table_args__ = (
+ UniqueConstraint('user_id', 'permission_id', name='constraint'),
+ )
+
+ def __init__(self, user_id, permission_id, **kwargs):
+ self.user_id = user_id
+ self.permission_id = permission_id
+
+ def __str__(self):
+ return 'UserPermission[%s:%s]' % (self.id, self.name)
+
+ @hybrid_property
+ def name(self):
+ return self.permission.name
+
+ def to_dict(self):
+ dict_info = self.permission.to_dict()
+ dict_info.update(super(UserPermission, self).to_dict())
+ return dict_info
+
+
+class Permission(BASE, HelperMixin, TimestampMixin):
+ """Permission table."""
+ __tablename__ = 'permission'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(80), unique=True, nullable=False)
+ alias = Column(String(100))
+ description = Column(Text)
+ user_permissions = relationship(
+ UserPermission,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('permission')
+ )
+
+ def __init__(self, name, **kwargs):
+ self.name = name
+ super(Permission, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'Permission[%s:%s]' % (self.id, self.name)
+
+
+class UserToken(BASE, HelperMixin):
+ """user token table."""
+ __tablename__ = 'user_token'
+
+ id = Column(Integer, primary_key=True)
+ user_id = Column(
+ Integer,
+ ForeignKey('user.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ token = Column(String(256), unique=True, nullable=False)
+ expire_timestamp = Column(DateTime, nullable=True)
+
+ def __init__(self, token, **kwargs):
+ self.token = token
+ super(UserToken, self).__init__(**kwargs)
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ super(UserToken, self).validate()
+ if not self.user:
+ raise exception.InvalidParameter(
+ 'user is not set in token: %s' % self.token
+ )
+
+
+class UserLog(BASE, HelperMixin):
+ """User log table."""
+ __tablename__ = 'user_log'
+
+ id = Column(Integer, primary_key=True)
+ user_id = Column(
+ Integer,
+ ForeignKey('user.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ action = Column(Text)
+ timestamp = Column(DateTime, default=lambda: datetime.datetime.now())
+
+ @hybrid_property
+ def user_email(self):
+ return self.user.email
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ super(UserLog, self).validate()
+ if not self.user:
+ raise exception.InvalidParameter(
+ 'user is not set in user log: %s' % self.id
+ )
+
+
+class User(BASE, HelperMixin, TimestampMixin):
+ """User table."""
+ __tablename__ = 'user'
+
+ id = Column(Integer, primary_key=True)
+ email = Column(String(80), unique=True, nullable=False)
+ crypted_password = Column('password', String(225))
+ firstname = Column(String(80))
+ lastname = Column(String(80))
+ is_admin = Column(Boolean, default=False)
+ active = Column(Boolean, default=True)
+ user_permissions = relationship(
+ UserPermission,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('user')
+ )
+ user_logs = relationship(
+ UserLog,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('user')
+ )
+ user_tokens = relationship(
+ UserToken,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('user')
+ )
+ clusters = relationship(
+ Cluster,
+ backref=backref('creator')
+ )
+ hosts = relationship(
+ Host,
+ backref=backref('creator')
+ )
+
+ def __init__(self, email, **kwargs):
+ self.email = email
+ super(User, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'User[%s]' % self.email
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ super(User, self).validate()
+ if not self.crypted_password:
+ raise exception.InvalidParameter(
+ 'password is not set in user : %s' % self.email
+ )
+
+ @property
+ def password(self):
+ return '***********'
+
+ @password.setter
+ def password(self, password):
+ # password stored in database is crypted.
+ self.crypted_password = util.encrypt(password)
+
+ @hybrid_property
+ def permissions(self):
+ permissions = []
+ for user_permission in self.user_permissions:
+ permissions.append(user_permission.permission)
+
+ return permissions
+
+ def to_dict(self):
+ dict_info = super(User, self).to_dict()
+ dict_info['permissions'] = [
+ permission.to_dict()
+ for permission in self.permissions
+ ]
+ return dict_info
+
+
+class SwitchMachine(BASE, HelperMixin, TimestampMixin):
+ """Switch Machine table."""
+ __tablename__ = 'switch_machine'
+ switch_machine_id = Column(
+ 'id', Integer, primary_key=True
+ )
+ switch_id = Column(
+ Integer,
+ ForeignKey('switch.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ machine_id = Column(
+ Integer,
+ ForeignKey('machine.id', onupdate='CASCADE', ondelete='CASCADE')
+ )
+ owner_id = Column(Integer, ForeignKey('user.id'))
+ port = Column(String(80), nullable=True)
+ vlans = Column(JSONEncoded, default=[])
+ __table_args__ = (
+ UniqueConstraint('switch_id', 'machine_id', name='constraint'),
+ )
+
+ def __init__(self, switch_id, machine_id, **kwargs):
+ self.switch_id = switch_id
+ self.machine_id = machine_id
+ super(SwitchMachine, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'SwitchMachine[%s port %s]' % (
+ self.switch_machine_id, self.port
+ )
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ super(SwitchMachine, self).validate()
+ if not self.switch:
+ raise exception.InvalidParameter(
+ 'switch is not set in %s' % self.id
+ )
+ if not self.machine:
+ raise exception.Invalidparameter(
+ 'machine is not set in %s' % self.id
+ )
+ if not self.port:
+ raise exception.InvalidParameter(
+ 'port is not set in %s' % self.id
+ )
+
+ @hybrid_property
+ def mac(self):
+ return self.machine.mac
+
+ @hybrid_property
+ def tag(self):
+ return self.machine.tag
+
+ @property
+ def switch_ip(self):
+ return self.switch.ip
+
+ @hybrid_property
+ def switch_ip_int(self):
+ return self.switch.ip_int
+
+ @switch_ip_int.expression
+ def switch_ip_int(cls):
+ return Switch.ip_int
+
+ @hybrid_property
+ def switch_vendor(self):
+ return self.switch.vendor
+
+ @switch_vendor.expression
+ def switch_vendor(cls):
+ return Switch.vendor
+
+ @property
+ def patched_vlans(self):
+ return self.vlans
+
+ @patched_vlans.setter
+ def patched_vlans(self, value):
+ if not value:
+ return
+ vlans = list(self.vlans)
+ for item in value:
+ if item not in vlans:
+ vlans.append(item)
+ self.vlans = vlans
+
+ @property
+ def filtered(self):
+ """Check if switch machine should be filtered.
+
+ port should be composed with <port_prefix><port_number><port_suffix>
+ For each filter in switch machine filters,
+ if filter_type is allow and port match the pattern, the switch
+ machine is allowed to be got by api. If filter_type is deny and
+ port match the pattern, the switch machine is not allowed to be got
+ by api.
+ If not filter is matched, if the last filter is allow, deny all
+ unmatched switch machines, if the last filter is deny, allow all
+ unmatched switch machines.
+ If no filter defined, allow all switch machines.
+ if ports defined in filter and 'all' in ports, the switch machine is
+ matched. if ports defined in filter and 'all' not in ports,
+ the switch machine with the port name in ports will be matched.
+ If the port pattern matches
+ <<port_prefix><port_number><port_suffix> and port number is in the
+ range of [port_start, port_end], the switch machine is matched.
+ """
+ filters = self.switch.machine_filters
+ port = self.port
+ unmatched_allowed = True
+ ports_pattern = re.compile(r'(\D*)(\d+)-(\d+)(\D*)')
+ port_pattern = re.compile(r'(\D*)(\d+)(\D*)')
+ port_match = port_pattern.match(port)
+ if port_match:
+ port_prefix = port_match.group(1)
+ port_number = int(port_match.group(2))
+ port_suffix = port_match.group(3)
+ else:
+ port_prefix = ''
+ port_number = 0
+ port_suffix = ''
+ for port_filter in filters:
+ filter_type = port_filter.get('filter_type', 'allow')
+ denied = filter_type != 'allow'
+ unmatched_allowed = denied
+ if 'ports' in port_filter:
+ if 'all' in port_filter['ports']:
+ return denied
+ if port in port_filter['ports']:
+ return denied
+ if port_match:
+ for port_or_ports in port_filter['ports']:
+ ports_match = ports_pattern.match(port_or_ports)
+ if ports_match:
+ filter_port_prefix = ports_match.group(1)
+ filter_port_start = int(ports_match.group(2))
+ filter_port_end = int(ports_match.group(3))
+ filter_port_suffix = ports_match.group(4)
+ if (
+ filter_port_prefix == port_prefix and
+ filter_port_suffix == port_suffix and
+ filter_port_start <= port_number and
+ port_number <= filter_port_end
+ ):
+ return denied
+ else:
+ filter_port_prefix = port_filter.get('port_prefix', '')
+ filter_port_suffix = port_filter.get('port_suffix', '')
+ if (
+ port_match and
+ port_prefix == filter_port_prefix and
+ port_suffix == filter_port_suffix
+ ):
+ if (
+ 'port_start' not in port_filter or
+ port_number >= port_filter['port_start']
+ ) and (
+ 'port_end' not in port_filter or
+ port_number <= port_filter['port_end']
+ ):
+ return denied
+ return not unmatched_allowed
+
+ def to_dict(self):
+ dict_info = self.machine.to_dict()
+ dict_info.update(super(SwitchMachine, self).to_dict())
+ dict_info['switch_ip'] = self.switch.ip
+ return dict_info
+
+
+class Machine(BASE, HelperMixin, TimestampMixin):
+ """Machine table."""
+ __tablename__ = 'machine'
+ id = Column(Integer, primary_key=True)
+ mac = Column(String(24), unique=True, nullable=False)
+ ipmi_credentials = Column(JSONEncoded, default={})
+ tag = Column(JSONEncoded, default={})
+ location = Column(JSONEncoded, default={})
+ owner_id = Column(Integer, ForeignKey('user.id'))
+ machine_attributes = Column(JSONEncoded, default={})
+
+ switch_machines = relationship(
+ SwitchMachine,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('machine')
+ )
+ host = relationship(
+ Host,
+ uselist=False,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('machine')
+ )
+
+ def __init__(self, mac, **kwargs):
+ self.mac = mac
+ super(Machine, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'Machine[%s:%s]' % (self.id, self.mac)
+
+ def validate(self):
+ # TODO(xicheng): some validation can be moved to column.
+ super(Machine, self).validate()
+ try:
+ netaddr.EUI(self.mac)
+ except Exception:
+ raise exception.InvalidParameter(
+ 'mac address %s format uncorrect' % self.mac
+ )
+
+ @property
+ def patched_ipmi_credentials(self):
+ return self.ipmi_credentials
+
+ @patched_ipmi_credentials.setter
+ def patched_ipmi_credentials(self, value):
+ if not value:
+ return
+ ipmi_credentials = copy.deepcopy(self.ipmi_credentials)
+ self.ipmi_credentials = util.merge_dict(ipmi_credentials, value)
+
+ @property
+ def patched_tag(self):
+ return self.tag
+
+ @patched_tag.setter
+ def patched_tag(self, value):
+ if not value:
+ return
+ tag = copy.deepcopy(self.tag)
+ tag.update(value)
+ self.tag = value
+
+ @property
+ def patched_location(self):
+ return self.location
+
+ @patched_location.setter
+ def patched_location(self, value):
+ if not value:
+ return
+ location = copy.deepcopy(self.location)
+ location.update(value)
+ self.location = location
+
+ def to_dict(self):
+ # TODO(xicheng): move the filling of switches
+ # to db/api.
+ dict_info = {}
+ dict_info['switches'] = [
+ {
+ 'switch_ip': switch_machine.switch_ip,
+ 'port': switch_machine.port,
+ 'vlans': switch_machine.vlans
+ }
+ for switch_machine in self.switch_machines
+ if not switch_machine.filtered
+ ]
+ if dict_info['switches']:
+ dict_info.update(dict_info['switches'][0])
+ dict_info.update(super(Machine, self).to_dict())
+ return dict_info
+
+
+class Switch(BASE, HelperMixin, TimestampMixin):
+ """Switch table."""
+ __tablename__ = 'switch'
+ id = Column(Integer, primary_key=True)
+ ip_int = Column('ip', BigInteger, unique=True, nullable=False)
+ credentials = Column(JSONEncoded, default={})
+ vendor = Column(String(256), nullable=True)
+ state = Column(Enum('initialized', 'unreachable', 'notsupported',
+ 'repolling', 'error', 'under_monitoring',
+ name='switch_state'),
+ ColumnDefault('initialized'))
+ # filters is json formatted list, each element has following format:
+ # keys: ['filter_type', 'ports', 'port_prefix', 'port_suffix',
+ # 'port_start', 'port_end'].
+ # each port name is divided into <port_prefix><port_number><port_suffix>
+ # filter_type is one of ['allow', 'deny'], default is 'allow'
+ # ports is a list of port name.
+ # port_prefix is the prefix that filtered port should start with.
+ # port_suffix is the suffix that filtered posrt should end with.
+ # port_start is integer that the port number should start with.
+ # port_end is the integer that the port number should end with.
+ _filters = Column('filters', JSONEncoded, default=[])
+ switch_machines = relationship(
+ SwitchMachine,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('switch')
+ )
+
+ def __str__(self):
+ return 'Switch[%s:%s]' % (self.id, self.ip)
+
+ @classmethod
+ def parse_filters(cls, filters):
+ """parse filters set from outside to standard format.
+
+ api can set switch filters with the flexible format, this
+ function will parse the flexible format filters.
+
+ Supported format:
+ as string:
+ allow ports ae10,ae20
+ allow port_prefix ae port_start 30 port_end 40
+ deny ports all
+ as python object:
+ [{
+ 'filter_type': 'allow',
+ 'ports': ['ae10', 'ae20']
+ },{
+ 'filter_type': 'allow',
+ 'port_prefix': 'ae',
+ 'port_suffix': '',
+ 'port_start': 30,
+ 'port_end': 40
+ },{
+ 'filter_type': 'deny',
+ 'ports': ['all']
+ }]
+ """
+ if isinstance(filters, basestring):
+ filters = filters.replace('\r\n', '\n').replace('\n', ';')
+ filters = [
+ machine_filter for machine_filter in filters.split(';')
+ if machine_filter
+ ]
+ if not isinstance(filters, list):
+ filters = [filters]
+ machine_filters = []
+ for machine_filter in filters:
+ if not machine_filter:
+ continue
+ if isinstance(machine_filter, basestring):
+ filter_dict = {}
+ filter_items = [
+ item for item in machine_filter.split() if item
+ ]
+ if filter_items[0] in ['allow', 'deny']:
+ filter_dict['filter_type'] = filter_items[0]
+ filter_items = filter_items[1:]
+ elif filter_items[0] not in [
+ 'ports', 'port_prefix', 'port_suffix',
+ 'port_start', 'port_end'
+ ]:
+ raise exception.InvalidParameter(
+ 'unrecognized filter type %s' % filter_items[0]
+ )
+ while filter_items:
+ if len(filter_items) >= 2:
+ filter_dict[filter_items[0]] = filter_items[1]
+ filter_items = filter_items[2:]
+ else:
+ filter_dict[filter_items[0]] = ''
+ filter_items = filter_items[1:]
+ machine_filter = filter_dict
+ if not isinstance(machine_filter, dict):
+ raise exception.InvalidParameter(
+ 'filter %s is not dict' % machine_filter
+ )
+ if 'filter_type' in machine_filter:
+ if machine_filter['filter_type'] not in ['allow', 'deny']:
+ raise exception.InvalidParameter(
+ 'filter_type should be `allow` or `deny` in %s' % (
+ machine_filter
+ )
+ )
+ if 'ports' in machine_filter:
+ if isinstance(machine_filter['ports'], basestring):
+ machine_filter['ports'] = [
+ port_or_ports
+ for port_or_ports in machine_filter['ports'].split(',')
+ if port_or_ports
+ ]
+ if not isinstance(machine_filter['ports'], list):
+ raise exception.InvalidParameter(
+ '`ports` type is not list in filter %s' % (
+ machine_filter
+ )
+ )
+ for port_or_ports in machine_filter['ports']:
+ if not isinstance(port_or_ports, basestring):
+ raise exception.InvalidParameter(
+ '%s type is not basestring in `ports` %s' % (
+ port_or_ports, machine_filter['ports']
+ )
+ )
+ for key in ['port_start', 'port_end']:
+ if key in machine_filter:
+ if isinstance(machine_filter[key], basestring):
+ if machine_filter[key].isdigit():
+ machine_filter[key] = int(machine_filter[key])
+ if not isinstance(machine_filter[key], (int, long)):
+ raise exception.InvalidParameter(
+ '`%s` type is not int in filer %s' % (
+ key, machine_filter
+ )
+ )
+ machine_filters.append(machine_filter)
+ return machine_filters
+
+ @classmethod
+ def format_filters(cls, filters):
+ """format json formatted filters to string."""
+ filter_strs = []
+ for machine_filter in filters:
+ filter_properties = []
+ filter_properties.append(
+ machine_filter.get('filter_type', 'allow')
+ )
+ if 'ports' in machine_filter:
+ filter_properties.append(
+ 'ports ' + ','.join(machine_filter['ports'])
+ )
+ if 'port_prefix' in machine_filter:
+ filter_properties.append(
+ 'port_prefix ' + machine_filter['port_prefix']
+ )
+ if 'port_suffix' in machine_filter:
+ filter_properties.append(
+ 'port_suffix ' + machine_filter['port_suffix']
+ )
+ if 'port_start' in machine_filter:
+ filter_properties.append(
+ 'port_start ' + str(machine_filter['port_start'])
+ )
+ if 'port_end' in machine_filter:
+ filter_properties.append(
+ 'port_end ' + str(machine_filter['port_end'])
+ )
+ filter_strs.append(' '.join(filter_properties))
+ return ';'.join(filter_strs)
+
+ def __init__(self, ip_int, **kwargs):
+ self.ip_int = ip_int
+ super(Switch, self).__init__(**kwargs)
+
+ @property
+ def ip(self):
+ return str(netaddr.IPAddress(self.ip_int))
+
+ @ip.setter
+ def ip(self, ipaddr):
+ self.ip_int = int(netaddr.IPAddress(ipaddr))
+
+ @property
+ def patched_credentials(self):
+ return self.credentials
+
+ @patched_credentials.setter
+ def patched_credentials(self, value):
+ if not value:
+ return
+ credentials = copy.deepcopy(self.credentials)
+ self.credentials = util.merge_dict(credentials, value)
+
+ @property
+ def machine_filters(self):
+ return self._filters
+
+ @machine_filters.setter
+ def machine_filters(self, value):
+ if not value:
+ return
+ self._filters = self.parse_filters(value)
+
+ @property
+ def put_machine_filters(self):
+ return self._filters
+
+ @put_machine_filters.setter
+ def put_machine_filters(self, value):
+ if not value:
+ return
+ self._filters = self.parse_filters(value)
+
+ @property
+ def patched_machine_filters(self):
+ return self._filters
+
+ @patched_machine_filters.setter
+ def patched_machine_filters(self, value):
+ if not value:
+ return
+ filters = list(self.machine_filters)
+ self._filters = self.parse_filters(value) + filters
+
+ def to_dict(self):
+ dict_info = super(Switch, self).to_dict()
+ dict_info['ip'] = self.ip
+ dict_info['filters'] = self.format_filters(self._filters)
+ return dict_info
+
+
+class Subnet(BASE, TimestampMixin, HelperMixin):
+ """network table."""
+ __tablename__ = 'subnet'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(80), unique=True, nullable=True)
+ subnet = Column(String(80), unique=True, nullable=False)
+
+ host_networks = relationship(
+ HostNetwork,
+ passive_deletes=True, passive_updates=True,
+ cascade='all, delete-orphan',
+ backref=backref('subnet')
+ )
+
+ def __init__(self, subnet, **kwargs):
+ self.subnet = subnet
+ super(Subnet, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'Subnet[%s:%s]' % (self.id, self.subnet)
+
+ def to_dict(self):
+ dict_info = super(Subnet, self).to_dict()
+ if not self.name:
+ dict_info['name'] = self.subnet
+ return dict_info
+
+
+# TODO(grace): move this global variable into HealthCheckReport.
+HEALTH_REPORT_STATES = ('verifying', 'success', 'finished', 'error')
+
+
+class HealthCheckReport(BASE, HelperMixin):
+ """Health check report table."""
+ __tablename__ = 'health_check_report'
+
+ cluster_id = Column(
+ Integer,
+ ForeignKey('cluster.id', onupdate='CASCADE', ondelete='CASCADE'),
+ primary_key=True
+ )
+ name = Column(String(80), nullable=False, primary_key=True)
+ display_name = Column(String(100))
+ report = Column(JSONEncoded, default={})
+ category = Column(String(80), default='')
+ state = Column(
+ Enum(*HEALTH_REPORT_STATES, name='report_state'),
+ ColumnDefault('verifying'),
+ nullable=False
+ )
+ error_message = Column(Text, default='')
+
+ def __init__(self, cluster_id, name, **kwargs):
+ self.cluster_id = cluster_id
+ self.name = name
+ if 'state' in kwargs and kwargs['state'] not in HEALTH_REPORT_STATES:
+ err_msg = 'State value %s is not accepted.' % kwargs['state']
+ raise exception.InvalidParameter(err_msg)
+
+ super(HealthCheckReport, self).__init__(**kwargs)
+
+ def __str__(self):
+ return 'HealthCheckReport[cluster_id: %s, name: %s]' % (
+ self.cluster_id, self.name
+ )
diff --git a/compass-tasks/db/v1/model.py b/compass-tasks/db/v1/model.py
new file mode 100644
index 0000000..d74e355
--- /dev/null
+++ b/compass-tasks/db/v1/model.py
@@ -0,0 +1,724 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""database model."""
+from datetime import datetime
+from hashlib import md5
+import logging
+import simplejson as json
+import uuid
+
+from sqlalchemy import Column, ColumnDefault, Integer, String
+from sqlalchemy import Float, Enum, DateTime, ForeignKey, Text, Boolean
+from sqlalchemy import UniqueConstraint
+from sqlalchemy.orm import relationship, backref
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.ext.hybrid import hybrid_property
+
+from compass.utils import util
+
+from flask.ext.login import UserMixin
+from itsdangerous import URLSafeTimedSerializer
+
+BASE = declarative_base()
+# TODO(grace) SECRET_KEY should be generated when installing compass
+# and save to a config file or DB
+SECRET_KEY = "abcd"
+
+# This is used for generating a token by user's ID and
+# decode the ID from this token
+login_serializer = URLSafeTimedSerializer(SECRET_KEY)
+
+
+class User(BASE, UserMixin):
+ """User table."""
+ __tablename__ = 'user'
+ id = Column(Integer, primary_key=True)
+ email = Column(String(80), unique=True)
+ password = Column(String(225), default='')
+ active = Column(Boolean, default=True)
+
+ def __init__(self, email, password, **kwargs):
+ self.email = email
+ self.password = self._set_password(password)
+
+ def __repr__(self):
+ return '<User name: %s>' % self.email
+
+ def _set_password(self, password):
+ return self._hash_password(password)
+
+ def get_password(self):
+ return self.password
+
+ def valid_password(self, password):
+ return self.password == self._hash_password(password)
+
+ def get_auth_token(self):
+ return login_serializer.dumps(self.id)
+
+ def is_active(self):
+ return self.active
+
+ def _hash_password(self, password):
+ return md5(password).hexdigest()
+
+
+class SwitchConfig(BASE):
+ """Swtich Config table.
+
+ :param id: The unique identifier of the switch config.
+ :param ip: The IP address of the switch.
+ :param filter_port: The port of the switch which need to be filtered.
+ """
+ __tablename__ = 'switch_config'
+ id = Column(Integer, primary_key=True)
+ ip = Column(String(80))
+ filter_port = Column(String(16))
+ __table_args__ = (UniqueConstraint('ip', 'filter_port', name='filter1'), )
+
+ def __init__(self, **kwargs):
+ super(SwitchConfig, self).__init__(**kwargs)
+
+
+class Switch(BASE):
+ """Switch table.
+
+ :param id: the unique identifier of the switch. int as primary key.
+ :param ip: the IP address of the switch.
+ :param vendor_info: the name of the vendor
+ :param credential_data: used for accessing and retrieving information
+ from the switch. Store json format as string.
+ :param state: Enum.'initialized/repolling': polling switch not complete to
+ learn all MAC addresses of devices connected to the switch;
+ 'unreachable': one of the final state, indicates that the
+ switch is unreachable at this time, no MAC address could be
+ retrieved from the switch.
+ 'notsupported': one of the final state, indicates that the
+ vendor found is not supported yet, no MAC address will be
+ retrieved from the switch.
+ 'error': one of the final state, indicates that something
+ wrong happend.
+ 'under_monitoring': one of the final state, indicates that
+ MAC addresses has been learned successfully from the switch.
+ :param err_msg: Error message when polling switch failed.
+ :param machines: refer to list of Machine connected to the switch.
+ """
+ __tablename__ = 'switch'
+
+ id = Column(Integer, primary_key=True)
+ ip = Column(String(80), unique=True)
+ credential_data = Column(Text)
+ vendor_info = Column(String(256), nullable=True)
+ state = Column(Enum('initialized', 'unreachable', 'notsupported',
+ 'repolling', 'error', 'under_monitoring',
+ name='switch_state'),
+ default='initialized')
+ err_msg = Column(Text)
+
+ def __init__(self, **kwargs):
+ super(Switch, self).__init__(**kwargs)
+
+ def __repr__(self):
+ return '<Switch ip: %r, credential: %r, vendor: %r, state: %s>'\
+ % (self.ip, self.credential, self.vendor, self.state)
+
+ @hybrid_property
+ def vendor(self):
+ """vendor property getter"""
+ return self.vendor_info
+
+ @vendor.setter
+ def vendor(self, value):
+ """vendor property setter"""
+ self.vendor_info = value
+
+ @property
+ def credential(self):
+ """credential data getter.
+
+ :returns: python primitive dictionary object.
+ """
+ if self.credential_data:
+ try:
+ credential = json.loads(self.credential_data)
+ return credential
+ except Exception as error:
+ logging.error('failed to load credential data %s: %s',
+ self.id, self.credential_data)
+ logging.exception(error)
+ raise error
+ else:
+ return {}
+
+ @credential.setter
+ def credential(self, value):
+ """credential property setter
+
+ :param value: dict of configuration data needed to update.
+ """
+ if value:
+ try:
+ credential = {}
+ if self.credential_data:
+ credential = json.loads(self.credential_data)
+
+ credential.update(value)
+ self.credential_data = json.dumps(credential)
+
+ except Exception as error:
+ logging.error('failed to dump credential data %s: %s',
+ self.id, value)
+ logging.exception(error)
+ raise error
+
+ else:
+ self.credential_data = json.dumps({})
+
+ logging.debug('switch now is %s', self)
+
+
+class Machine(BASE):
+ """Machine table.
+
+ .. note::
+ currently, we are taking care of management plane.
+ Therefore, we assume one machine is connected to one switch.
+
+ :param id: int, identity as primary key
+ :param mac: string, the MAC address of the machine.
+ :param switch_id: switch id that this machine connected on to.
+ :param port: nth port of the switch that this machine connected.
+ :param vlan: vlan id that this machine connected on to.
+ :param update_timestamp: last time this entry got updated.
+ :param switch: refer to the Switch the machine connects to.
+ """
+ __tablename__ = 'machine'
+
+ id = Column(Integer, primary_key=True)
+ mac = Column(String(24), default='')
+ port = Column(String(16), default='')
+ vlan = Column(Integer, default=0)
+ update_timestamp = Column(DateTime, default=datetime.now,
+ onupdate=datetime.now)
+ switch_id = Column(Integer, ForeignKey('switch.id',
+ onupdate='CASCADE',
+ ondelete='SET NULL'))
+ __table_args__ = (UniqueConstraint('mac', 'switch_id',
+ name='unique_machine'),)
+ switch = relationship('Switch', backref=backref('machines',
+ lazy='dynamic'))
+
+ def __init__(self, **kwargs):
+ super(Machine, self).__init__(**kwargs)
+
+ def __repr__(self):
+ return '<Machine %r: port=%r vlan=%r switch=%r>' % (
+ self.mac, self.port, self.vlan, self.switch)
+
+
+class HostState(BASE):
+ """The state of the ClusterHost.
+
+ :param id: int, identity as primary key.
+ :param state: Enum. 'UNINITIALIZED': the host is ready to setup.
+ 'INSTALLING': the host is not installing.
+ 'READY': the host is setup.
+ 'ERROR': the host has error.
+ :param progress: float, the installing progress from 0 to 1.
+ :param message: the latest installing message.
+ :param severity: Enum, the installing message severity.
+ ('INFO', 'WARNING', 'ERROR')
+ :param update_timestamp: the lastest timestamp the entry got updated.
+ :param host: refer to ClusterHost.
+ :param os_progress: float, the installing progress of OS from 0 to 1.
+ """
+ __tablename__ = "host_state"
+
+ id = Column(Integer, ForeignKey('cluster_host.id',
+ onupdate='CASCADE',
+ ondelete='CASCADE'),
+ primary_key=True)
+ state = Column(Enum('UNINITIALIZED', 'INSTALLING', 'READY', 'ERROR'),
+ ColumnDefault('UNINITIALIZED'))
+ progress = Column(Float, ColumnDefault(0.0))
+ message = Column(Text)
+ severity = Column(Enum('INFO', 'WARNING', 'ERROR'), ColumnDefault('INFO'))
+ update_timestamp = Column(DateTime, default=datetime.now,
+ onupdate=datetime.now)
+ host = relationship('ClusterHost', backref=backref('state',
+ uselist=False))
+
+ os_progress = Column(Float, ColumnDefault(0.0))
+ os_message = Column(Text)
+ os_severity = Column(
+ Enum('INFO', 'WARNING', 'ERROR'),
+ ColumnDefault('INFO')
+ )
+ """
+ this is added by Lei for separating os and package progress purposes
+ os_state = Column(Enum('UNINITIALIZED', 'INSTALLING', 'OS_READY', 'ERROR'),
+ ColumnDefault('UNINITIALIZED'))
+ """
+
+ def __init__(self, **kwargs):
+ super(HostState, self).__init__(**kwargs)
+
+ @hybrid_property
+ def hostname(self):
+ """hostname getter"""
+ return self.host.hostname
+
+ @hybrid_property
+ def fullname(self):
+ """fullname getter"""
+ return self.host.fullname
+
+ def __repr__(self):
+ return (
+ '<HostState %r: state=%r, progress=%s, '
+ 'message=%s, severity=%s, os_progress=%s>'
+ ) % (
+ self.hostname, self.state, self.progress,
+ self.message, self.severity, self.os_progress
+ )
+
+
+class ClusterState(BASE):
+ """The state of the Cluster.
+
+ :param id: int, identity as primary key.
+ :param state: Enum, 'UNINITIALIZED': the cluster is ready to setup.
+ 'INSTALLING': the cluster is not installing.
+ 'READY': the cluster is setup.
+ 'ERROR': the cluster has error.
+ :param progress: float, the installing progress from 0 to 1.
+ :param message: the latest installing message.
+ :param severity: Enum, the installing message severity.
+ ('INFO', 'WARNING', 'ERROR').
+ :param update_timestamp: the lastest timestamp the entry got updated.
+ :param cluster: refer to Cluster.
+ """
+ __tablename__ = 'cluster_state'
+ id = Column(Integer, ForeignKey('cluster.id',
+ onupdate='CASCADE',
+ ondelete='CASCADE'),
+ primary_key=True)
+ state = Column(Enum('UNINITIALIZED', 'INSTALLING', 'READY', 'ERROR'),
+ ColumnDefault('UNINITIALIZED'))
+ progress = Column(Float, ColumnDefault(0.0))
+ message = Column(Text)
+ severity = Column(Enum('INFO', 'WARNING', 'ERROR'), ColumnDefault('INFO'))
+ update_timestamp = Column(DateTime, default=datetime.now,
+ onupdate=datetime.now)
+ cluster = relationship('Cluster', backref=backref('state',
+ uselist=False))
+
+ def __init__(self, **kwargs):
+ super(ClusterState, self).__init__(**kwargs)
+
+ @hybrid_property
+ def clustername(self):
+ """clustername getter"""
+ return self.cluster.name
+
+ def __repr__(self):
+ return (
+ '<ClusterState %r: state=%r, progress=%s, '
+ 'message=%s, severity=%s>'
+ ) % (
+ self.clustername, self.state, self.progress,
+ self.message, self.severity
+ )
+
+
+class Cluster(BASE):
+ """Cluster configuration information.
+
+ :param id: int, identity as primary key.
+ :param name: str, cluster name.
+ :param mutable: bool, if the Cluster is mutable.
+ :param security_config: str stores json formatted security information.
+ :param networking_config: str stores json formatted networking information.
+ :param partition_config: string stores json formatted parition information.
+ :param adapter_id: the refer id in the Adapter table.
+ :param raw_config: str stores json formatted other cluster information.
+ :param adapter: refer to the Adapter.
+ :param state: refer to the ClusterState.
+ """
+ __tablename__ = 'cluster'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(80), unique=True)
+ mutable = Column(Boolean, default=True)
+ security_config = Column(Text)
+ networking_config = Column(Text)
+ partition_config = Column(Text)
+ adapter_id = Column(Integer, ForeignKey('adapter.id',
+ onupdate='CASCADE',
+ ondelete='SET NULL'),
+ nullable=True)
+ raw_config = Column(Text)
+ adapter = relationship("Adapter", backref=backref('clusters',
+ lazy='dynamic'))
+
+ def __init__(self, **kwargs):
+ if 'name' not in kwargs or not kwargs['name']:
+ kwargs['name'] = str(uuid.uuid4())
+
+ super(Cluster, self).__init__(**kwargs)
+
+ def __repr__(self):
+ return '<Cluster %r: config=%r>' % (self.name, self.config)
+
+ @property
+ def partition(self):
+ """partition getter"""
+ if self.partition_config:
+ try:
+ return json.loads(self.partition_config)
+ except Exception as error:
+ logging.error('failed to load security config %s: %s',
+ self.id, self.partition_config)
+ logging.exception(error)
+ raise error
+ else:
+ return {}
+
+ @partition.setter
+ def partition(self, value):
+ """partition setter"""
+ logging.debug('cluster %s set partition %s', self.id, value)
+ if value:
+ try:
+ self.partition_config = json.dumps(value)
+ except Exception as error:
+ logging.error('failed to dump partition config %s: %s',
+ self.id, value)
+ logging.exception(error)
+ raise error
+ else:
+ self.partition_config = None
+
+ @property
+ def security(self):
+ """security getter"""
+ if self.security_config:
+ try:
+ return json.loads(self.security_config)
+ except Exception as error:
+ logging.error('failed to load security config %s: %s',
+ self.id, self.security_config)
+ logging.exception(error)
+ raise error
+ else:
+ return {}
+
+ @security.setter
+ def security(self, value):
+ """security setter"""
+ logging.debug('cluster %s set security %s', self.id, value)
+ if value:
+ try:
+ self.security_config = json.dumps(value)
+ except Exception as error:
+ logging.error('failed to dump security config %s: %s',
+ self.id, value)
+ logging.exception(error)
+ raise error
+ else:
+ self.security_config = None
+
+ @property
+ def networking(self):
+ """networking getter"""
+ if self.networking_config:
+ try:
+ return json.loads(self.networking_config)
+ except Exception as error:
+ logging.error('failed to load networking config %s: %s',
+ self.id, self.networking_config)
+ logging.exception(error)
+ raise error
+ else:
+ return {}
+
+ @networking.setter
+ def networking(self, value):
+ """networking setter."""
+ logging.debug('cluster %s set networking %s', self.id, value)
+ if value:
+ try:
+ self.networking_config = json.dumps(value)
+ except Exception as error:
+ logging.error('failed to dump networking config %s: %s',
+ self.id, value)
+ logging.exception(error)
+ raise error
+ else:
+ self.networking_config = None
+
+ @hybrid_property
+ def config(self):
+ """get config from security, networking, partition."""
+ config = {}
+ if self.raw_config:
+ try:
+ config = json.loads(self.raw_config)
+ except Exception as error:
+ logging.error('failed to load raw config %s: %s',
+ self.id, self.raw_config)
+ logging.exception(error)
+ raise error
+
+ util.merge_dict(config, {'security': self.security})
+ util.merge_dict(config, {'networking': self.networking})
+ util.merge_dict(config, {'partition': self.partition})
+ util.merge_dict(config, {'clusterid': self.id,
+ 'clustername': self.name})
+ return config
+
+ @config.setter
+ def config(self, value):
+ """set config to security, networking, partition."""
+ logging.debug('cluster %s set config %s', self.id, value)
+ if not value:
+ self.security = None
+ self.networking = None
+ self.partition = None
+ self.raw_config = None
+ return
+
+ self.security = value.get('security')
+ self.networking = value.get('networking')
+ self.partition = value.get('partition')
+
+ try:
+ self.raw_config = json.dumps(value)
+ except Exception as error:
+ logging.error('failed to dump raw config %s: %s',
+ self.id, value)
+ logging.exception(error)
+ raise error
+
+
+class ClusterHost(BASE):
+ """ClusterHost information.
+
+ :param id: int, identity as primary key.
+ :param machine_id: int, the id of the Machine.
+ :param cluster_id: int, the id of the Cluster.
+ :param mutable: if the ClusterHost information is mutable.
+ :param hostname: str, host name.
+ :param config_data: string, json formatted config data.
+ :param cluster: refer to Cluster the host in.
+ :param machine: refer to the Machine the host on.
+ :param state: refer to HostState indicates the host state.
+ """
+ __tablename__ = 'cluster_host'
+
+ id = Column(Integer, primary_key=True)
+
+ machine_id = Column(Integer, ForeignKey('machine.id',
+ onupdate='CASCADE',
+ ondelete='CASCADE'),
+ nullable=True, unique=True)
+
+ cluster_id = Column(Integer, ForeignKey('cluster.id',
+ onupdate='CASCADE',
+ ondelete='SET NULL'),
+ nullable=True)
+
+ hostname = Column(String(80))
+ config_data = Column(Text)
+ mutable = Column(Boolean, default=True)
+ __table_args__ = (UniqueConstraint('cluster_id', 'hostname',
+ name='unique_host'),)
+
+ cluster = relationship("Cluster",
+ backref=backref('hosts', lazy='dynamic'))
+ machine = relationship("Machine",
+ backref=backref('host', uselist=False))
+
+ def __init__(self, **kwargs):
+ if 'hostname' not in kwargs or not kwargs['hostname']:
+ kwargs['hostname'] = str(uuid.uuid4())
+
+ super(ClusterHost, self).__init__(**kwargs)
+
+ def __repr__(self):
+ return '<ClusterHost %r: cluster=%r machine=%r>' % (
+ self.hostname, self.cluster, self.machine)
+
+ @hybrid_property
+ def fullname(self):
+ return '%s.%s' % (self.hostname, self.cluster.id)
+
+ @property
+ def config(self):
+ """config getter."""
+ config = {}
+ try:
+ if self.config_data:
+ config.update(json.loads(self.config_data))
+
+ config.update({
+ 'hostid': self.id,
+ 'hostname': self.hostname,
+ })
+ if self.cluster:
+ config.update({
+ 'clusterid': self.cluster.id,
+ 'clustername': self.cluster.name,
+ 'fullname': self.fullname,
+ })
+
+ if self.machine:
+ util.merge_dict(
+ config, {
+ 'networking': {
+ 'interfaces': {
+ 'management': {
+ 'mac': self.machine.mac
+ }
+ }
+ },
+ 'switch_port': self.machine.port,
+ 'vlan': self.machine.vlan,
+ })
+ if self.machine.switch:
+ util.merge_dict(
+ config, {'switch_ip': self.machine.switch.ip})
+
+ except Exception as error:
+ logging.error('failed to load config %s: %s',
+ self.hostname, self.config_data)
+ logging.exception(error)
+ raise error
+
+ return config
+
+ @config.setter
+ def config(self, value):
+ """config setter"""
+ if not self.config_data:
+ config = {
+ }
+ self.config_data = json.dumps(config)
+
+ if value:
+ try:
+ config = json.loads(self.config_data)
+ util.merge_dict(config, value)
+
+ self.config_data = json.dumps(config)
+ except Exception as error:
+ logging.error('failed to dump config %s: %s',
+ self.hostname, value)
+ logging.exception(error)
+ raise error
+
+
+class LogProgressingHistory(BASE):
+ """host installing log history for each file.
+
+ :param id: int, identity as primary key.
+ :param pathname: str, the full path of the installing log file. unique.
+ :param position: int, the position of the log file it has processed.
+ :param partial_line: str, partial line of the log.
+ :param progressing: float, indicate the installing progress between 0 to 1.
+ :param message: str, str, the installing message.
+ :param severity: Enum, the installing message severity.
+ ('ERROR', 'WARNING', 'INFO')
+ :param line_matcher_name: str, the line matcher name of the log processor.
+ :param update_timestamp: datetime, the latest timestamp the entry updated.
+ """
+ __tablename__ = 'log_progressing_history'
+ id = Column(Integer, primary_key=True)
+ pathname = Column(String(80), unique=True)
+ position = Column(Integer, ColumnDefault(0))
+ partial_line = Column(Text)
+ progress = Column(Float, ColumnDefault(0.0))
+ message = Column(Text)
+ severity = Column(Enum('ERROR', 'WARNING', 'INFO'), ColumnDefault('INFO'))
+ line_matcher_name = Column(String(80), ColumnDefault('start'))
+ update_timestamp = Column(DateTime, default=datetime.now,
+ onupdate=datetime.now)
+
+ def __init__(self, **kwargs):
+ super(LogProgressingHistory, self).__init__(**kwargs)
+
+ def __repr__(self):
+ return (
+ 'LogProgressingHistory[%r: position %r,'
+ 'partial_line %r,progress %r,message %r,'
+ 'severity %r]'
+ ) % (
+ self.pathname, self.position,
+ self.partial_line,
+ self.progress,
+ self.message,
+ self.severity
+ )
+
+
+class Adapter(BASE):
+ """Table stores ClusterHost installing Adapter information.
+
+ :param id: int, identity as primary key.
+ :param name: string, adapter name, unique.
+ :param os: string, os name for installing the host.
+ :param target_system: string, target system to be installed on the host.
+ :param clusters: refer to the list of Cluster.
+ """
+ __tablename__ = 'adapter'
+ id = Column(Integer, primary_key=True)
+ name = Column(String(80), unique=True)
+ os = Column(String(80))
+ target_system = Column(String(80))
+ __table_args__ = (
+ UniqueConstraint('os', 'target_system', name='unique_adapter'),)
+
+ def __init__(self, **kwargs):
+ super(Adapter, self).__init__(**kwargs)
+
+ def __repr__(self):
+ return '<Adapter %r: os %r, target_system %r>' % (
+ self.name, self.os, self.target_system
+ )
+
+
+class Role(BASE):
+ """The Role table stores avaiable roles of one target system.
+
+ .. note::
+ the host can be deployed to one or several roles in the cluster.
+
+ :param id: int, identity as primary key.
+ :param name: role name.
+ :param target_system: str, the target_system.
+ :param description: str, the description of the role.
+ """
+ __tablename__ = 'role'
+ id = Column(Integer, primary_key=True)
+ name = Column(String(80), unique=True)
+ target_system = Column(String(80))
+ description = Column(Text)
+
+ def __init__(self, **kwargs):
+ super(Role, self).__init__(**kwargs)
+
+ def __repr__(self):
+ return '<Role %r : target_system %r, description:%r>' % (
+ self.name, self.target_system, self.description)
diff --git a/compass-tasks/db/validator.py b/compass-tasks/db/validator.py
new file mode 100644
index 0000000..730bb52
--- /dev/null
+++ b/compass-tasks/db/validator.py
@@ -0,0 +1,195 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Validator methods."""
+import logging
+import netaddr
+import re
+import socket
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+def is_valid_ip(name, ip_addr, **kwargs):
+ """Valid the format of an IP address."""
+ if isinstance(ip_addr, list):
+ return all([
+ is_valid_ip(name, item, **kwargs) for item in ip_addr
+ ])
+ try:
+ netaddr.IPAddress(ip_addr)
+ except Exception:
+ logging.debug('%s invalid ip addr %s', name, ip_addr)
+ return False
+ return True
+
+
+def is_valid_network(name, ip_network, **kwargs):
+ """Valid the format of an Ip network."""
+ if isinstance(ip_network, list):
+ return all([
+ is_valid_network(name, item, **kwargs) for item in ip_network
+ ])
+ try:
+ netaddr.IPNetwork(ip_network)
+ except Exception:
+ logging.debug('%s invalid network %s', name, ip_network)
+ return False
+ return True
+
+
+def is_valid_netmask(name, ip_addr, **kwargs):
+ """Valid the format of a netmask."""
+ if isinstance(ip_addr, list):
+ return all([
+ is_valid_netmask(name, item, **kwargs) for item in ip_addr
+ ])
+ if not is_valid_ip(ip_addr):
+ return False
+ ip = netaddr.IPAddress(ip_addr)
+ if ip.is_netmask():
+ return True
+ logging.debug('%s invalid netmask %s', name, ip_addr)
+ return False
+
+
+def is_valid_gateway(name, ip_addr, **kwargs):
+ """Valid the format of gateway."""
+ if isinstance(ip_addr, list):
+ return all([
+ is_valid_gateway(name, item, **kwargs) for item in ip_addr
+ ])
+ if not is_valid_ip(ip_addr):
+ return False
+ ip = netaddr.IPAddress(ip_addr)
+ if ip.is_private() or ip.is_public():
+ return True
+ logging.debug('%s invalid gateway %s', name, ip_addr)
+ return False
+
+
+def is_valid_dns(name, dns, **kwargs):
+ """Valid the format of DNS."""
+ if isinstance(dns, list):
+ return all([is_valid_dns(name, item, **kwargs) for item in dns])
+ if is_valid_ip(dns):
+ return True
+ try:
+ socket.gethostbyname_ex(dns)
+ except Exception:
+ logging.debug('%s invalid dns name %s', name, dns)
+ return False
+ return True
+
+
+def is_valid_url(name, url, **kwargs):
+ """Valid the format of url."""
+ if isinstance(url, list):
+ return all([
+ is_valid_url(name, item, **kwargs) for item in url
+ ])
+ if re.match(
+ r'^(http|https|ftp)://([0-9A-Za-z_-]+)(\.[0-9a-zA-Z_-]+)*'
+ r'(:\d+)?(/[0-9a-zA-Z_-]+)*$',
+ url
+ ):
+ return True
+ logging.debug(
+ '%s invalid url %s', name, url
+ )
+ return False
+
+
+def is_valid_domain(name, domain, **kwargs):
+ """Validate the format of domain."""
+ if isinstance(domain, list):
+ return all([
+ is_valid_domain(name, item, **kwargs) for item in domain
+ ])
+ if re.match(
+ r'^([0-9a-zA-Z_-]+)(\.[0-9a-zA-Z_-]+)*$',
+ domain
+ ):
+ return True
+ logging.debug(
+ '%s invalid domain %s', name, domain
+ )
+ return False
+
+
+def is_valid_username(name, username, **kwargs):
+ """Valid the format of username."""
+ if bool(username):
+ return True
+ logging.debug(
+ '%s username is empty', name
+ )
+
+
+def is_valid_password(name, password, **kwargs):
+ """Valid the format of password."""
+ if bool(password):
+ return True
+ logging.debug('%s password is empty', name)
+ return False
+
+
+def is_valid_partition(name, partition, **kwargs):
+ """Valid the format of partition name."""
+ if name != 'swap' and not name.startswith('/'):
+ logging.debug(
+ '%s is not started with / or swap', name
+ )
+ return False
+ if 'size' not in partition and 'percentage' not in partition:
+ logging.debug(
+ '%s partition does not contain sie or percentage',
+ name
+ )
+ return False
+ return True
+
+
+def is_valid_percentage(name, percentage, **kwargs):
+ """Valid the percentage."""
+ if 0 <= percentage <= 100:
+ return True
+ logging.debug('%s invalid percentage %s', name, percentage)
+
+
+def is_valid_port(name, port, **kwargs):
+ """Valid the format of port."""
+ if 0 < port < 65536:
+ return True
+ logging.debug('%s invalid port %s', name, port)
+
+
+def is_valid_size(name, size, **kwargs):
+ if re.match(r'^(\d+)(K|M|G|T)$', size):
+ return True
+ logging.debug('%s invalid size %s', name, size)
+ return False
+
+
+VALIDATOR_GLOBALS = globals()
+VALIDATOR_LOCALS = locals()
+VALIDATOR_CONFIGS = util.load_configs(
+ setting.VALIDATOR_DIR,
+ config_name_suffix='.py',
+ env_globals=VALIDATOR_GLOBALS,
+ env_locals=VALIDATOR_LOCALS
+)
+for validator_config in VALIDATOR_CONFIGS:
+ VALIDATOR_LOCALS.update(validator_config)
diff --git a/compass-tasks/deployment/__init__.py b/compass-tasks/deployment/__init__.py
new file mode 100644
index 0000000..cbd36e0
--- /dev/null
+++ b/compass-tasks/deployment/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = "Grace Yu (grace.yu@huawei.com)"
diff --git a/compass-tasks/deployment/deploy_manager.py b/compass-tasks/deployment/deploy_manager.py
new file mode 100644
index 0000000..baf7cd6
--- /dev/null
+++ b/compass-tasks/deployment/deploy_manager.py
@@ -0,0 +1,237 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = "Grace Yu (grace.yu@huawei.com)"
+
+"""Module to get configs from provider and isntallers and update
+ them to provider and installers.
+"""
+from compass.deployment.installers.installer import OSInstaller
+from compass.deployment.installers.installer import PKInstaller
+from compass.deployment.utils import constants as const
+from compass.utils import util
+
+
+import logging
+
+
+class DeployManager(object):
+ """Deploy manager module."""
+ def __init__(self, adapter_info, cluster_info, hosts_info):
+ """Init deploy manager."""
+ self.os_installer = None
+ self.pk_installer = None
+
+ # Get OS installer
+ os_installer_name = adapter_info[const.OS_INSTALLER][const.NAME]
+ self.os_installer = DeployManager._get_installer(OSInstaller,
+ os_installer_name,
+ adapter_info,
+ cluster_info,
+ hosts_info)
+
+ # Get package installer
+ pk_info = adapter_info.setdefault(const.PK_INSTALLER, {})
+ if pk_info:
+ pk_installer_name = pk_info[const.NAME]
+ self.pk_installer = DeployManager._get_installer(PKInstaller,
+ pk_installer_name,
+ adapter_info,
+ cluster_info,
+ hosts_info)
+
+ @staticmethod
+ def _get_installer(installer_type, name, adapter_info, cluster_info,
+ hosts_info):
+ """Get installer instance."""
+ callback = getattr(installer_type, 'get_installer')
+ installer = callback(name, adapter_info, cluster_info, hosts_info)
+
+ return installer
+
+ def deploy(self):
+ """Deploy the cluster."""
+ deployed_config = self.deploy_os()
+ package_deployed_config = self.deploy_target_system()
+
+ util.merge_dict(deployed_config, package_deployed_config)
+
+ return deployed_config
+
+ def check_cluster_health(self, callback_url):
+ logging.info("DeployManager check_cluster_health...........")
+ self.pk_installer.check_cluster_health(callback_url)
+
+ def clean_progress(self):
+ """Clean previous installation log and progress."""
+ self.clean_os_installtion_progress()
+ self.clean_package_installation_progress()
+
+ def clean_os_installtion_progress(self):
+ # OS installer cleans previous installing progress.
+ if self.os_installer:
+ self.os_installer.clean_progress()
+
+ def clean_package_installation_progress(self):
+ # Package installer cleans previous installing progress.
+ if self.pk_installer:
+ self.pk_installer.clean_progress()
+
+ def prepare_for_deploy(self):
+ self.clean_progress()
+
+ def deploy_os(self):
+ """Deploy OS to hosts which need to in the cluster.
+
+ Return OS deployed config.
+ """
+ if not self.os_installer:
+ return {}
+
+ pk_installer_config = {}
+ if self.pk_installer:
+ # generate target system config which will be installed by OS
+ # installer right after OS installation is completed.
+ pk_installer_config = self.pk_installer.generate_installer_config()
+ logging.debug('[DeployManager]package installer config is %s',
+ pk_installer_config)
+
+ # Send package installer config info to OS installer.
+ self.os_installer.set_package_installer_config(pk_installer_config)
+
+ # start to deploy OS
+ return self.os_installer.deploy()
+
+ def deploy_target_system(self):
+ """Deploy target system to all hosts in the cluster.
+
+ Return package deployed config.
+ """
+ if not self.pk_installer:
+ return {}
+
+ return self.pk_installer.deploy()
+
+ def redeploy_os(self):
+ """Redeploy OS for this cluster without changing configurations."""
+ if not self.os_installer:
+ logging.info("Redeploy_os: No OS installer found!")
+ return
+
+ self.os_installer.redeploy()
+ logging.info("Start to redeploy OS for cluster.")
+
+ def redeploy_target_system(self):
+ """Redeploy target system for the cluster without changing config."""
+ if not self.pk_installer:
+ logging.info("Redeploy_target_system: No installer found!")
+ return
+
+ self.pk_installer.deploy()
+ logging.info("Start to redeploy target system.")
+
+ def redeploy(self):
+ """Redeploy the cluster without changing configurations."""
+ self.redeploy_os()
+ self.redeploy_target_system()
+
+ def remove_hosts(self, package_only=False, delete_cluster=False):
+ """Remove hosts from both OS and/or package installlers server side."""
+ if self.os_installer and not package_only:
+ self.os_installer.delete_hosts()
+
+ if self.pk_installer:
+ self.pk_installer.delete_hosts(delete_cluster=delete_cluster)
+
+ def os_installed(self):
+ if self.os_installer:
+ self.os_installer.ready()
+ if self.pk_installer:
+ self.pk_installer.os_ready()
+
+ def cluster_os_installed(self):
+ if self.os_installer:
+ self.os_installer.cluster_ready()
+ if self.pk_installer:
+ self.pk_installer.cluster_os_ready()
+
+ def package_installed(self):
+ if self.pk_installer:
+ self.pk_installer.ready()
+
+ def cluster_installed(self):
+ if self.pk_installer:
+ self.pk_installer.cluster_ready()
+
+
+class Patcher(DeployManager):
+ """Patcher Module."""
+ def __init__(self, adapter_info, cluster_info, hosts_info, cluster_hosts):
+ self.pk_installer = None
+ self.cluster_info = cluster_info
+ registered_roles = cluster_info['flavor']['roles']
+
+ pk_info = adapter_info.setdefault(const.PK_INSTALLER, {})
+ if pk_info:
+ pk_installer_name = pk_info[const.NAME]
+ self.pk_installer = Patcher._get_installer(PKInstaller,
+ pk_installer_name,
+ adapter_info,
+ cluster_info,
+ hosts_info)
+
+ patched_role_mapping = {}
+ for role in registered_roles:
+ patched_role_mapping[role] = []
+ for host in cluster_hosts:
+ if len(host['patched_roles']) == 0:
+ continue
+ for role in host['patched_roles']:
+ patched_role_mapping[role['name']].append(host)
+ self.patched_role_mapping = patched_role_mapping
+
+ def patch(self):
+ patched_config = self.pk_installer.patch(self.patched_role_mapping)
+
+ return patched_config
+
+
+class PowerManager(object):
+ """Manage host to power on, power off, and reset."""
+
+ def __init__(self, adapter_info, cluster_info, hosts_info):
+ os_installer_name = adapter_info[const.OS_INSTALLER][const.NAME]
+ self.os_installer = DeployManager._get_installer(OSInstaller,
+ os_installer_name,
+ adapter_info,
+ cluster_info,
+ hosts_info)
+
+ def poweron(self):
+ if not self.os_installer:
+ logging.info("No OS installer found, cannot power on machine!")
+ return
+ self.os_installer.poweron()
+
+ def poweroff(self):
+ if not self.os_installer:
+ logging.info("No OS installer found, cannot power on machine!")
+ return
+ self.os_installer.poweroff()
+
+ def reset(self):
+ if not self.os_installer:
+ logging.info("No OS installer found, cannot power on machine!")
+ return
+ self.os_installer.reset()
diff --git a/compass-tasks/deployment/installers/__init__.py b/compass-tasks/deployment/installers/__init__.py
new file mode 100644
index 0000000..0296be5
--- /dev/null
+++ b/compass-tasks/deployment/installers/__init__.py
@@ -0,0 +1,21 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = "Grace Yu (grace.yu@huawei.com)"
+
+
+"""modules to read/write cluster/host config from installers.
+
+ .. moduleauthor:: Grace Yu <grace.yu@huawei.com>
+"""
diff --git a/compass-tasks/deployment/installers/config_manager.py b/compass-tasks/deployment/installers/config_manager.py
new file mode 100644
index 0000000..597c3a6
--- /dev/null
+++ b/compass-tasks/deployment/installers/config_manager.py
@@ -0,0 +1,527 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = "baigk baiguoku@huawei.com)"
+
+from collections import defaultdict
+from copy import deepcopy
+import json
+import logging
+import netaddr
+
+from compass.deployment.utils import constants as const
+
+ip_generator_map = {}
+
+
+def get_ip_addr(ip_ranges):
+ def _get_ip_addr():
+ for ip_range in ip_ranges:
+ for ip in netaddr.iter_iprange(*ip_range):
+ yield str(ip)
+
+ s = json.dumps(ip_ranges)
+ if s not in ip_generator_map:
+ ip_generator_map[s] = _get_ip_addr()
+ return ip_generator_map[s]
+ else:
+ return ip_generator_map[s]
+
+
+class AdapterInfo(object):
+ def __init__(self, adapter_info):
+ self.adapter_info = adapter_info
+ self.name = self.adapter_info.get(const.NAME)
+ self.dist_system_name = self.name
+ self.health_check_cmd = self.adapter_info.get(const.HEALTH_CHECK_CMD)
+
+ self.os_installer = self.adapter_info.setdefault(
+ const.OS_INSTALLER, {}
+ )
+ self.os_installer.setdefault(const.INSTALLER_SETTINGS, {})
+
+ self.package_installer = self.adapter_info.setdefault(
+ const.PK_INSTALLER, {}
+ )
+ self.package_installer.setdefault(const.INSTALLER_SETTINGS, {})
+
+ self.metadata = self.adapter_info.setdefault(const.METADATA, {})
+ self.os_metadata = self.metadata.setdefault(const.OS_CONFIG, {})
+ self.package_metadata = self.metadata.setdefault(const.PK_CONFIG, {})
+
+ self.flavors = dict([(f[const.FLAVOR_NAME], f)
+ for f in self.adapter_info.get(const.FLAVOR, [])])
+
+ @property
+ def flavor_list(self):
+ return self.flavors.values()
+
+ def get_flavor(self, flavor_name):
+ return self.flavors.get(flavor_name)
+
+
+class ClusterInfo(object):
+ def __init__(self, cluster_info):
+ self.cluster_info = cluster_info
+ self.id = self.cluster_info.get(const.ID)
+ self.name = self.cluster_info.get(const.NAME)
+ self.os_version = self.cluster_info.get(const.OS_VERSION)
+ self.flavor = self.cluster_info.setdefault(
+ const.FLAVOR, {}
+ )
+ self.os_config = self.cluster_info.setdefault(
+ const.OS_CONFIG, {}
+ )
+ self.package_config = self.cluster_info.setdefault(
+ const.PK_CONFIG, {}
+ )
+ self.deployed_os_config = self.cluster_info.setdefault(
+ const.DEPLOYED_OS_CONFIG, {}
+ )
+ self.deployed_package_config = self.cluster_info.setdefault(
+ const.DEPLOYED_PK_CONFIG, {}
+ )
+ self.network_mapping = self.package_config.setdefault(
+ const.NETWORK_MAPPING, {}
+ )
+
+ os_config_general = self.os_config.setdefault(
+ const.OS_CONFIG_GENERAL, {}
+ )
+ self.domain = os_config_general.setdefault(const.DOMAIN, None)
+ self.hosts = []
+
+ def add_host(self, host):
+ self.hosts.append(host)
+
+ @property
+ def roles_mapping(self):
+ deploy_config = self.deployed_package_config
+ return deploy_config.setdefault(
+ const.ROLES_MAPPING, self._get_cluster_roles_mapping()
+ )
+
+ def _get_cluster_roles_mapping(self):
+ """The ouput format will be as below, for example:
+
+ {
+ "controller": [{
+ "hostname": "xxx",
+ "management": {
+ "interface": "eth0",
+ "ip": "192.168.1.10",
+ "netmask": "255.255.255.0",
+ "subnet": "192.168.1.0/24",
+ "is_mgmt": True,
+ "is_promiscuous": False
+ },
+ ...
+ }],
+ ...
+ }
+ """
+ mapping = defaultdict(list)
+ for host in self.hosts:
+ for role, value in host.roles_mapping.iteritems():
+ mapping[role].append(value)
+
+ return dict(mapping)
+
+ def _get_cluster_patched_roles_mapping(self):
+ mapping = defaultdict(list)
+ for host in self.hosts:
+ for role, value in host.patched_roles_mapping.iteritems():
+ mapping[role].append(value)
+
+ return dict(mapping)
+
+ @property
+ def base_info(self):
+ return {
+ const.ID: self.id,
+ const.NAME: self.name,
+ const.OS_VERSION: self.os_version
+ }
+
+
+class HostInfo(object):
+ def __init__(self, host_info, cluster_info):
+ self.host_info = host_info
+ self.cluster_info = cluster_info
+ self.id = self.host_info.get(const.ID)
+ self.name = self.host_info.get(const.NAME)
+ self.mac = self.host_info.get(const.MAC_ADDR)
+ self.hostname = self.host_info.get(const.HOSTNAME)
+ self.networks = self.host_info.setdefault(const.NETWORKS, {})
+ self.os_config = self.host_info.setdefault(const.OS_CONFIG, {})
+
+ self.package_config = self.host_info.setdefault(const.PK_CONFIG, {})
+ self.roles = self.host_info.setdefault(const.ROLES, [])
+ self.patched_roles = self.host_info.setdefault(const.PATCHED_ROLES, [])
+ self.ipmi = deepcopy(self.host_info.setdefault(const.IPMI, {}))
+ self.reinstall_os_flag = self.host_info.get(const.REINSTALL_OS_FLAG)
+ self.deployed_os_config = self.host_info.setdefault(
+ const.DEPLOYED_OS_CONFIG, {}
+ )
+ self.deployed_package_config = self.host_info.setdefault(
+ const.DEPLOYED_PK_CONFIG, {}
+ )
+
+ os_general_config = self.os_config.setdefault(
+ const.OS_CONFIG_GENERAL, {}
+ )
+ domain = os_general_config.setdefault(const.DOMAIN, None)
+ if domain is None:
+ self.domain = self.cluster_info.domain
+ else:
+ self.domain = domain
+
+ if const.DNS in host_info:
+ self.dns = host_info[const.DNS]
+ else:
+ self.dns = '.'.join((self.hostname, self.domain))
+
+ if const.NETWORK_MAPPING not in self.package_config:
+ self.network_mapping = self.cluster_info.network_mapping
+ else:
+ self.network_mapping = self.package_config[const.NETWORK_MAPPING]
+
+ if const.ROLES_MAPPING not in self.deployed_package_config:
+ self.roles_mapping = self._get_host_roles_mapping()
+ self.deployed_package_config[
+ const.ROLES_MAPPING
+ ] = self.roles_mapping
+ else:
+ self.roles_mapping = \
+ self.deployed_package_config[const.ROLES_MAPPING]
+
+ self.patched_roles_mapping = self._get_host_patched_roles_mapping()
+
+ self.cluster_info.add_host(self)
+
+ def valid_interface(self, interface):
+ if interface not in self.networks:
+ raise RuntimeError("interface %s is invalid" % interface)
+
+ def get_interface(self, interface):
+ self.valid_interface(interface)
+ return self.networks[interface]
+
+ def get_interface_ip(self, interface):
+ return self.get_interface(interface).get(const.IP_ADDR)
+
+ def get_interface_netmask(self, interface):
+ return self.get_interface(interface).get(const.NETMASK)
+
+ def get_interface_subnet(self, interface):
+ return self.get_interface(interface).get(const.SUBNET)
+
+ def is_interface_promiscuous(self, interface):
+ return self.get_interface(interface).get(const.PROMISCUOUS_FLAG)
+
+ def is_interface_mgmt(self, interface):
+ return self.get_interface(interface).get(const.MGMT_NIC_FLAG)
+
+ def _get_host_roles_mapping(self):
+ if not self.network_mapping:
+ return {}
+
+ net_info = {const.HOSTNAME: self.hostname}
+ for k, v in self.network_mapping.items():
+ try:
+ net_info[k] = self.networks[v[const.NIC]]
+ net_info[k][const.NIC] = v[const.NIC]
+ except Exception:
+ pass
+
+ mapping = {}
+ for role in self.roles:
+ role = role.replace("-", "_")
+ mapping[role] = net_info
+
+ return mapping
+
+ def _get_host_patched_roles_mapping(self):
+ if not self.network_mapping:
+ return {}
+
+ net_info = {const.HOSTNAME: self.hostname}
+ for k, v in self.network_mapping.items():
+ try:
+ net_info[k] = self.networks[v[const.NIC]]
+ net_info[k][const.NIC] = v[const.NIC]
+ except Exception:
+ pass
+
+ mapping = {}
+ for role in self.patched_roles:
+ role = role['name'].replace("-", "_")
+ mapping[role] = net_info
+
+ return mapping
+
+ @property
+ def baseinfo(self):
+ return {
+ const.REINSTALL_OS_FLAG: self.reinstall_os_flag,
+ const.MAC_ADDR: self.mac,
+ const.NAME: self.name,
+ const.HOSTNAME: self.hostname,
+ const.DNS: self.dns,
+ const.NETWORKS: deepcopy(self.networks)
+ }
+
+
+class BaseConfigManager(object):
+ def __init__(self, adapter_info={}, cluster_info={}, hosts_info={}):
+ assert(adapter_info and isinstance(adapter_info, dict))
+ assert(cluster_info and isinstance(cluster_info, dict))
+ assert(hosts_info and isinstance(hosts_info, dict))
+
+ self.adapter_info = AdapterInfo(adapter_info)
+ self.cluster_info = ClusterInfo(cluster_info)
+ self.hosts_info = dict([(k, HostInfo(v, self.cluster_info))
+ for k, v in hosts_info.iteritems()])
+
+ def get_adapter_name(self):
+ return self.adapter_info.name
+
+ def get_dist_system_name(self):
+ return self.adapter_info.dist_system_name
+
+ def get_adapter_health_check_cmd(self):
+ return self.adapter_info.health_check_cmd
+
+ def get_os_installer_settings(self):
+ return self.adapter_info.os_installer[const.INSTALLER_SETTINGS]
+
+ def get_pk_installer_settings(self):
+ return self.adapter_info.package_installer[const.INSTALLER_SETTINGS]
+
+ def get_os_config_metadata(self):
+ return self.adapter_info.metadata[const.OS_CONFIG]
+
+ def get_pk_config_meatadata(self):
+ return self.adapter_info.metadata[const.PK_CONFIG]
+
+ def get_adapter_all_flavors(self):
+ return self.adapter_info.flavor_list
+
+ def get_adapter_flavor(self, flavor_name):
+ return self.adapter_info.get_flavor(flavor_name)
+
+ def get_cluster_id(self):
+ return self.cluster_info.id
+
+ def get_clustername(self):
+ return self.cluster_info.name
+
+ def get_os_version(self):
+ return self.cluster_info.os_version
+
+ def get_cluster_os_config(self):
+ return self.cluster_info.os_config
+
+ def get_cluster_baseinfo(self):
+ return self.cluster_info.base_info
+
+ def get_cluster_flavor_name(self):
+ return self.cluster_info.flavor.get(const.FLAVOR_NAME)
+
+ def get_cluster_flavor_roles(self):
+ return self.cluster_info.flavor.get(const.ROLES, [])
+
+ def get_cluster_flavor_template(self):
+ return self.cluster_info.flavor.get(const.TMPL)
+
+ def get_cluster_package_config(self):
+ return self.cluster_info.package_config
+
+ def get_cluster_network_mapping(self):
+ mapping = self.cluster_info.network_mapping
+ logging.info("Network mapping in the config is '%s'!", mapping)
+ return mapping
+
+ def get_cluster_deployed_os_config(self):
+ return self.cluster_info.deployed_os_config
+
+ def get_cluster_deployed_package_config(self):
+ return self.cluster_info.deployed_package_config
+
+ def get_cluster_roles_mapping(self):
+ return self.cluster_info.roles_mapping
+
+ def get_cluster_patched_roles_mapping(self):
+ return self.cluster_info._get_cluster_patched_roles_mapping()
+
+ def validate_host(self, host_id):
+ if host_id not in self.hosts_info:
+ raise RuntimeError("host_id %s is invalid" % host_id)
+
+ def get_host_id_list(self):
+ return self.hosts_info.keys()
+
+ def get_hosts_id_list_for_os_installation(self):
+ """Get info of hosts which need to install/reinstall OS."""
+ return [
+ id for id, info in self.hosts_info.items()
+ if info.reinstall_os_flag
+ ]
+
+ def get_server_credentials(self):
+ cluster_os_config = self.get_cluster_os_config()
+ if not cluster_os_config:
+ logging.info("cluster os_config is None!")
+ return ()
+
+ username = cluster_os_config[const.SERVER_CREDS][const.USERNAME]
+ password = cluster_os_config[const.SERVER_CREDS][const.PASSWORD]
+ return (username, password)
+
+ def _get_host_info(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id]
+
+ def get_host_baseinfo(self, host_id):
+ self.validate_host(host_id)
+ host_info = self.hosts_info[host_id]
+ return host_info.baseinfo
+
+ def get_host_fullname(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].name
+
+ def get_host_dns(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].dns
+
+ def get_host_mac_address(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].mac
+
+ def get_hostname(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].hostname
+
+ def get_host_networks(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].networks
+
+ def get_host_interfaces(self, host_id):
+ # get interface names
+ return self.get_host_networks(host_id).keys()
+
+ def get_host_interface_ip(self, host_id, interface):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].get_interface_ip(interface)
+
+ def get_host_interface_netmask(self, host_id, interface):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].get_interface_netmask(interface)
+
+ def get_host_interface_subnet(self, host_id, interface):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].get_interface_subnet(interface)
+
+ def is_interface_promiscuous(self, host_id, interface):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].is_interface_promiscuous(interface)
+
+ def is_interface_mgmt(self, host_id, interface):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].is_interface_mgmt(interface)
+
+ def get_host_os_config(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].os_config
+
+ def get_host_domain(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].domain
+
+ def get_host_network_mapping(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].network_mapping
+
+ def get_host_package_config(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].package_config
+
+ def get_host_deployed_os_config(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].deployed_os_config
+
+ def get_host_deployed_package_config(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].deployed_package_config
+
+ def get_host_roles(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].roles
+
+ def get_all_hosts_roles(self, hosts_id_list=None):
+ roles = []
+ for host_id, host_info in self.hosts_info.iteritems():
+ roles.extend(host_info.roles)
+
+ return list(set(roles))
+
+ def get_hosts_ip_settings(self, ip_settings, sys_intf_mappings):
+ logging.info(
+ "get_hosts_ip_settings:ip_settings=%s, sys_intf_mappings=%s" %
+ (ip_settings, sys_intf_mappings)
+ )
+
+ intf_alias = {}
+ for m in sys_intf_mappings:
+ if "vlan_tag" in m:
+ intf_alias[m["name"]] = m["name"]
+ else:
+ intf_alias[m["name"]] = m["interface"]
+
+ mappings = {}
+ hosts_id_list = self.get_host_id_list()
+ for host_id in hosts_id_list:
+ hostname = self.get_hostname(host_id)
+ mappings[hostname] = []
+ for ip_info in ip_settings:
+ logging.info("ip_info=%s" % ip_info)
+ new_ip_info = deepcopy(ip_info)
+ del new_ip_info["ip_ranges"]
+
+ ip_ranges = ip_info["ip_ranges"]
+ new_ip_info["netmask"] = netaddr.IPNetwork(
+ ip_info["cidr"]
+ ).netmask.bin.count("1")
+ new_ip_info["ip"] = get_ip_addr(ip_ranges).next()
+ new_ip_info["alias"] = intf_alias[ip_info["name"]]
+ mappings[hostname].append(new_ip_info)
+
+ return {"ip_settings": mappings}
+
+ def get_host_roles_mapping(self, host_id):
+ self.validate_host(host_id)
+ return self.hosts_info[host_id].roles_mapping
+
+ def get_host_ipmi_info(self, host_id):
+ self.validate_host(host_id)
+ if self.hosts_info[host_id].ipmi:
+ return (
+ self.hosts_info[host_id].ipmi[const.IP_ADDR],
+ self.hosts_info[host_id].ipmi
+ [const.IPMI_CREDS][const.USERNAME],
+ self.hosts_info[host_id].ipmi
+ [const.IPMI_CREDS][const.USERNAME])
+ else:
+ return (None, None, None)
diff --git a/compass-tasks/deployment/installers/installer.py b/compass-tasks/deployment/installers/installer.py
new file mode 100644
index 0000000..cfeb9e8
--- /dev/null
+++ b/compass-tasks/deployment/installers/installer.py
@@ -0,0 +1,291 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = "Grace Yu (grace.yu@huawei.com)"
+
+
+"""Module to provider installer interface.
+"""
+from Cheetah.Template import Template
+from copy import deepcopy
+import imp
+import logging
+import os
+import simplejson as json
+
+from compass.deployment.installers.config_manager import BaseConfigManager
+from compass.utils import setting_wrapper as compass_setting
+from compass.utils import util
+
+
+CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
+
+
+class BaseInstaller(object):
+ """Interface for installer."""
+ NAME = 'installer'
+
+ def __repr__(self):
+ return '%r[%r]' % (self.__class__.__name__, self.NAME)
+
+ def deploy(self, **kwargs):
+ """virtual method to start installing process."""
+ raise NotImplementedError
+
+ def clean_progress(self, **kwargs):
+ raise NotImplementedError
+
+ def delete_hosts(self, **kwargs):
+ """Delete hosts from installer server."""
+ raise NotImplementedError
+
+ def redeploy(self, **kwargs):
+ raise NotImplementedError
+
+ def ready(self, **kwargs):
+ pass
+
+ def cluster_ready(self, **kwargs):
+ pass
+
+ def get_tmpl_vars_from_metadata(self, metadata, config):
+ """Get variables dictionary for rendering templates from metadata.
+
+ :param dict metadata: The metadata dictionary.
+ :param dict config: The
+ """
+ template_vars = {}
+ self._get_tmpl_vars_helper(metadata, config, template_vars)
+
+ return template_vars
+
+ def _get_key_mapping(self, metadata, key, is_regular_key):
+ """Get the keyword which the input key maps to.
+
+ This keyword will be added to dictionary used to render templates.
+
+ If the key in metadata has a mapping to another keyword which is
+ used for templates, then return this keyword. If the key is started
+ with '$', which is a variable in metadata, return the key itself as
+ the mapping keyword. If the key has no mapping, return None.
+
+ :param dict metadata: metadata/submetadata dictionary.
+ :param str key: The keyword defined in metadata.
+ :param bool is_regular_key: False when the key defined in metadata
+ is a variable(starting with '$').
+ """
+ mapping_to = key
+ if is_regular_key:
+ try:
+ mapping_to = metadata['_self']['mapping_to']
+ except Exception:
+ mapping_to = None
+
+ return mapping_to
+
+ def _get_submeta_by_key(self, metadata, key):
+ """Get submetadata dictionary.
+
+ Based on current metadata key. And
+ determines the input key is a regular string keyword or a variable
+ keyword defined in metadata, which starts with '$'.
+
+ :param dict metadata: The metadata dictionary.
+ :param str key: The keyword defined in the metadata.
+ """
+ if key in metadata:
+ return (True, metadata[key])
+
+ temp = deepcopy(metadata)
+ if '_self' in temp:
+ del temp['_self']
+ meta_key = temp.keys()[0]
+ if meta_key.startswith("$"):
+ return (False, metadata[meta_key])
+
+ raise KeyError("'%s' is invalid in metadata '%s'!" % (key, metadata))
+
+ def _get_tmpl_vars_helper(self, metadata, config, output):
+ for key, config_value in sorted(config.iteritems()):
+ is_regular_key, sub_meta = self._get_submeta_by_key(metadata, key)
+ mapping_to = self._get_key_mapping(sub_meta, key, is_regular_key)
+
+ if isinstance(config_value, dict):
+ if mapping_to:
+ new_output = output[mapping_to] = {}
+ else:
+ new_output = output
+
+ self._get_tmpl_vars_helper(sub_meta, config_value, new_output)
+
+ elif mapping_to:
+ output[mapping_to] = config_value
+
+ def get_config_from_template(self, tmpl_path, vars_dict):
+ logging.debug("template path is %s", tmpl_path)
+ logging.debug("vars_dict is %s", vars_dict)
+
+ if not os.path.exists(tmpl_path) or not vars_dict:
+ logging.info("Template dir or vars_dict is None!")
+ return {}
+
+ searchList = []
+ copy_vars_dict = deepcopy(vars_dict)
+ for key, value in vars_dict.iteritems():
+ if isinstance(value, dict):
+ temp = copy_vars_dict[key]
+ del copy_vars_dict[key]
+ searchList.append(temp)
+ searchList.append(copy_vars_dict)
+
+ # Load base template first if it exists
+ base_config = {}
+ base_tmpl_path = os.path.join(os.path.dirname(tmpl_path), 'base.tmpl')
+ if os.path.isfile(base_tmpl_path) and base_tmpl_path != tmpl_path:
+ base_tmpl = Template(file=base_tmpl_path, searchList=searchList)
+ base_config = json.loads(base_tmpl.respond(), encoding='utf-8')
+ base_config = json.loads(json.dumps(base_config), encoding='utf-8')
+
+ # Load specific template for current adapter
+ tmpl = Template(file=open(tmpl_path, "r"), searchList=searchList)
+ config = json.loads(tmpl.respond(), encoding='utf-8')
+ config = json.loads(json.dumps(config), encoding='utf-8')
+
+ # Merge the two outputs
+ config = util.merge_dict(base_config, config)
+
+ logging.debug("get_config_from_template resulting %s", config)
+ return config
+
+ @classmethod
+ def get_installer(cls, name, path, adapter_info, cluster_info, hosts_info):
+ try:
+ mod_file, path, descr = imp.find_module(name, [path])
+ if mod_file:
+ mod = imp.load_module(name, mod_file, path, descr)
+ config_manager = BaseConfigManager(adapter_info, cluster_info,
+ hosts_info)
+ return getattr(mod, mod.NAME)(config_manager)
+
+ except ImportError as exc:
+ logging.error('No such module found: %s', name)
+ logging.exception(exc)
+
+ return None
+
+
+class OSInstaller(BaseInstaller):
+ """Interface for os installer."""
+ NAME = 'OSInstaller'
+ INSTALLER_BASE_DIR = os.path.join(CURRENT_DIR, 'os_installers')
+
+ def get_oses(self):
+ """virtual method to get supported oses.
+
+ :returns: list of str, each is the supported os version.
+ """
+ return []
+
+ @classmethod
+ def get_installer(cls, name, adapter_info, cluster_info, hosts_info):
+ if name is None:
+ logging.info("Installer name is None! No OS installer loaded!")
+ return None
+
+ path = os.path.join(cls.INSTALLER_BASE_DIR, name)
+ installer = super(OSInstaller, cls).get_installer(name, path,
+ adapter_info,
+ cluster_info,
+ hosts_info)
+
+ if not isinstance(installer, OSInstaller):
+ logging.info("Installer '%s' is not an OS installer!" % name)
+ return None
+
+ return installer
+
+ def poweron(self, host_id):
+ pass
+
+ def poweroff(self, host_id):
+ pass
+
+ def reset(self, host_id):
+ pass
+
+
+class PKInstaller(BaseInstaller):
+ """Interface for package installer."""
+ NAME = 'PKInstaller'
+ INSTALLER_BASE_DIR = os.path.join(CURRENT_DIR, 'pk_installers')
+
+ def generate_installer_config(self):
+ raise NotImplementedError(
+ 'generate_installer_config is not defined in %s',
+ self.__class__.__name__
+ )
+
+ def get_target_systems(self):
+ """virtual method to get available target_systems for each os.
+
+ :param oses: supported os versions.
+ :type oses: list of st
+
+ :returns: dict of os_version to target systems as list of str.
+ """
+ return {}
+
+ def get_roles(self, target_system):
+ """virtual method to get all roles of given target system.
+
+ :param target_system: target distributed system such as OpenStack.
+ :type target_system: str
+
+ :returns: dict of role to role description as str.
+ """
+ return {}
+
+ def os_ready(self, **kwargs):
+ pass
+
+ def cluster_os_ready(self, **kwargs):
+ pass
+
+ def serialize_config(self, config, destination):
+ with open(destination, "w") as f:
+ f.write(config)
+
+ @classmethod
+ def get_installer(cls, name, adapter_info, cluster_info, hosts_info):
+ if name is None:
+ logging.info("Install name is None. No package installer loaded!")
+ return None
+
+ path = os.path.join(cls.INSTALLER_BASE_DIR, name)
+ if not os.path.exists(path):
+ path = os.path.join(os.path.join(os.path.join(
+ compass_setting.PLUGINS_DIR, name), "implementation"), name)
+ if not os.path.exists(path):
+ logging.info("Installer '%s' does not exist!" % name)
+ return None
+ installer = super(PKInstaller, cls).get_installer(name, path,
+ adapter_info,
+ cluster_info,
+ hosts_info)
+
+ if not isinstance(installer, PKInstaller):
+ logging.info("Installer '%s' is not a package installer!" % name)
+ return None
+
+ return installer
diff --git a/compass-tasks/deployment/installers/os_installers/__init__.py b/compass-tasks/deployment/installers/os_installers/__init__.py
new file mode 100644
index 0000000..5e42ae9
--- /dev/null
+++ b/compass-tasks/deployment/installers/os_installers/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/deployment/installers/os_installers/cobbler/__init__.py b/compass-tasks/deployment/installers/os_installers/cobbler/__init__.py
new file mode 100644
index 0000000..5e42ae9
--- /dev/null
+++ b/compass-tasks/deployment/installers/os_installers/cobbler/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/deployment/installers/os_installers/cobbler/cobbler.py b/compass-tasks/deployment/installers/os_installers/cobbler/cobbler.py
new file mode 100644
index 0000000..9c2a935
--- /dev/null
+++ b/compass-tasks/deployment/installers/os_installers/cobbler/cobbler.py
@@ -0,0 +1,449 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""os installer cobbler plugin.
+"""
+import logging
+import os
+import shutil
+import xmlrpclib
+
+from compass.deployment.installers.installer import OSInstaller
+from compass.deployment.utils import constants as const
+from compass.utils import setting_wrapper as compass_setting
+from compass.utils import util
+from copy import deepcopy
+
+
+NAME = 'CobblerInstaller'
+
+
+class CobblerInstaller(OSInstaller):
+ """cobbler installer"""
+ CREDENTIALS = "credentials"
+ USERNAME = 'username'
+ PASSWORD = 'password'
+
+ INSTALLER_URL = "cobbler_url"
+ TMPL_DIR = 'tmpl_dir'
+ SYS_TMPL = 'system.tmpl'
+ SYS_TMPL_NAME = 'system.tmpl'
+ SYS_PROFILE_NAME = 'profile.tmpl'
+ PROFILE = 'profile'
+
+ POWER_TYPE = 'power_type'
+ POWER_ADDR = 'power_address'
+ POWER_USER = 'power_user'
+ POWER_PASS = 'power_pass'
+
+ def __init__(self, config_manager):
+ super(CobblerInstaller, self).__init__()
+
+ self.config_manager = config_manager
+ installer_settings = self.config_manager.get_os_installer_settings()
+ try:
+ username = installer_settings[self.CREDENTIALS][self.USERNAME]
+ password = installer_settings[self.CREDENTIALS][self.PASSWORD]
+ cobbler_url = installer_settings[self.INSTALLER_URL]
+ self.tmpl_dir = CobblerInstaller.get_tmpl_path()
+
+ except KeyError as ex:
+ raise KeyError(ex.message)
+
+ # The connection is created when cobbler installer is initialized.
+ self.remote = self._get_cobbler_server(cobbler_url)
+ self.token = self._get_token(username, password)
+ self.pk_installer_config = None
+
+ logging.debug('%s instance created', 'CobblerInstaller')
+
+ @classmethod
+ def get_tmpl_path(cls):
+ return os.path.join(compass_setting.TMPL_DIR, 'cobbler')
+
+ def __repr__(self):
+ return '%r[remote=%r,token=%r' % (
+ self.__class__.__name__, self.remote, self.token)
+
+ def _get_cobbler_server(self, cobbler_url):
+ if not cobbler_url:
+ logging.error("Cobbler URL is None!")
+ raise Exception("Cobbler URL cannot be None!")
+
+ return xmlrpclib.Server(cobbler_url)
+
+ def _get_token(self, username, password):
+ if self.remote is None:
+ raise Exception("Cobbler remote instance is None!")
+ return self.remote.login(username, password)
+
+ def get_supported_oses(self):
+ """get supported os versions.
+
+ note::
+ In cobbler, we treat profile name as the indicator
+ of os version. It is just a simple indicator
+ and not accurate.
+ """
+ profiles = self.remote.get_profiles()
+ oses = []
+ for profile in profiles:
+ oses.append(profile['name'])
+ return oses
+
+ def deploy(self):
+ """Sync cobbler to catch up the latest update config and start to
+
+ install OS. Return both cluster and hosts deploy configs. The return
+ format:
+ {
+ "cluster": {
+ "id": 1,
+ "deployed_os_config": {},
+ },
+ "hosts": {
+ 1($clusterhost_id): {
+ "deployed_os_config": {...},
+ },
+ ....
+ }
+ }
+ """
+ host_ids = self.config_manager.get_hosts_id_list_for_os_installation()
+ if not host_ids:
+ # No hosts need to install OS
+ logging.info("Cobbler: No host needs to install OS.")
+ return {}
+
+ os_version = self.config_manager.get_os_version()
+ profile = self._get_profile_from_server(os_version)
+
+ global_vars_dict = self._get_cluster_tmpl_vars_dict()
+
+ self.update_profile_config_to_cobbler(profile, global_vars_dict)
+
+ hosts_deploy_config = {}
+
+ for host_id in host_ids:
+ hostname = self.config_manager.get_hostname(host_id)
+ vars_dict = self._get_host_tmpl_vars_dict(host_id,
+ global_vars_dict,
+ hostname=hostname,
+ profile=profile)
+
+ self.update_host_config_to_cobbler(host_id, hostname, vars_dict)
+
+ # set host deploy config
+ host_config = {}
+ host_config[const.DEPLOYED_OS_CONFIG] = vars_dict[const.OS_CONFIG]
+ hosts_deploy_config[host_id] = host_config
+
+ # sync to cobbler and trigger installtion.
+ self._sync()
+
+ cluster_config = global_vars_dict.setdefault(const.OS_CONFIG, {})
+
+ return {
+ const.CLUSTER: {
+ const.ID: self.config_manager.get_cluster_id(),
+ const.DEPLOYED_OS_CONFIG: cluster_config
+ },
+ const.HOSTS: hosts_deploy_config
+ }
+
+ def clean_progress(self):
+ """clean log files and config for hosts which to deploy."""
+ clusterhost_list = self.config_manager.get_host_id_list()
+ log_dir_prefix = compass_setting.INSTALLATION_LOGDIR[NAME]
+
+ for host_id in clusterhost_list:
+ hostname = self.config_manager.get_hostname(host_id)
+ self._clean_log(log_dir_prefix, hostname)
+
+ def redeploy(self):
+ """redeploy hosts."""
+ host_ids = self.config_manager.get_host_id_list()
+ if not host_ids:
+ logging.info("Cobbler: hostlist is None, no host is redeployed")
+ return
+ for host_id in host_ids:
+ hostname = self.config_manager.get_hostname(host_id)
+ sys_id = self._get_create_system(hostname)
+ if sys_id:
+ # enable netboot for this host
+ self._netboot_enabled(sys_id)
+
+ self._sync()
+
+ def set_package_installer_config(self, package_configs):
+ """Cobbler can install and configure package installer right after
+
+ OS installation compelets by setting package_config info provided
+ by package installer.
+
+ :param dict package_configs: The dict of config generated by package
+ installer for each clusterhost. The IDs
+ of clusterhosts are the keys of
+ package_configs.
+ """
+ self.pk_installer_config = package_configs
+
+ def _sync(self):
+ """Sync the updated config to cobbler and trigger installation."""
+ try:
+ self.remote.sync(self.token)
+ os.system('sudo service rsyslog restart')
+ except Exception as ex:
+ logging.debug("Failed to sync cobbler server! Error: %s" % ex)
+ raise ex
+
+ def dump_system_info(self, host_id):
+
+ hostname = self.config_manager.get_hostname(host_id)
+ if self.remote is None or not hostname:
+ logging.info("[dump_system_info]Remote or hostname is None.")
+ return {}
+
+ return self.remote.get_system_as_rendered(hostname)
+
+ def _generate_system_config(self, host_id, host_vars_dict):
+ """Generate updated system config from the template.
+
+ :param host_vars_dict: dict of variables for the system template to
+ generate system config dict for each host.
+ """
+ os_version = self.config_manager.get_os_version()
+
+ tmpl_path = os.path.join(
+ os.path.join(self.tmpl_dir, os_version), self.SYS_TMPL_NAME
+ )
+ if not os.path.exists(tmpl_path):
+ err_msg = "Template '%s' does not exists!" % tmpl_path
+ logging.error(err_msg)
+ raise Exception(err_msg)
+ host_vars_dict[const.BASEINFO]['host_id'] = host_id
+ system_config = self.get_config_from_template(tmpl_path,
+ host_vars_dict)
+
+ # update package config info to cobbler ksmeta
+ if self.pk_installer_config and host_id in self.pk_installer_config:
+ pk_config = self.pk_installer_config[host_id]
+ ksmeta = system_config.setdefault("ksmeta", {})
+ util.merge_dict(ksmeta, pk_config)
+ system_config["ksmeta"] = ksmeta
+
+ return system_config
+
+ def _generate_profile_config(self, cluster_vars_dict):
+ os_version = self.config_manager.get_os_version()
+ tmpl_path = os.path.join(
+ os.path.join(self.tmpl_dir, os_version), self.SYS_PROFILE_NAME
+ )
+
+ return self.get_config_from_template(tmpl_path, cluster_vars_dict)
+
+ def _get_profile_from_server(self, os_version):
+ """Get profile from cobbler server."""
+ result = self.remote.find_profile({'name': os_version})
+ if not result:
+ raise Exception("Cannot find profile for '%s'", os_version)
+
+ profile = result[0]
+ return profile
+
+ def _get_create_system(self, hostname):
+ """get system reference id for the host."""
+ sys_name = hostname
+ sys_id = None
+ system_info = self.remote.find_system({"name": hostname})
+
+ if not system_info:
+ # Create a new system
+ sys_id = self.remote.new_system(self.token)
+ self.remote.modify_system(sys_id, "name", hostname, self.token)
+ logging.debug('create new system %s for %s', sys_id, sys_name)
+ else:
+ sys_id = self.remote.get_system_handle(sys_name, self.token)
+
+ return sys_id
+
+ def _get_profile_id(self, profilename):
+ """get profile reference id for the cluster."""
+ return self.remote.get_profile_handle(profilename, self.token)
+
+ def _clean_system(self, hostname):
+ """clean system."""
+ sys_name = hostname
+ try:
+ self.remote.remove_system(sys_name, self.token)
+ logging.debug('system %s is removed', sys_name)
+ except Exception:
+ logging.debug('no system %s found to remove', sys_name)
+
+ def _update_system_config(self, sys_id, system_config):
+ """update modify system."""
+ for key, value in system_config.iteritems():
+ self.remote.modify_system(sys_id, str(key), value, self.token)
+
+ self.remote.save_system(sys_id, self.token)
+
+ def _update_profile_config(self, profile_id, profile_config):
+ for key, value in profile_config.iteritems():
+ self.remote.modify_profile(profile_id, str(key), value, self.token)
+
+ self.remote.save_profile(profile_id, self.token)
+
+ def _netboot_enabled(self, sys_id):
+ """enable netboot."""
+ self.remote.modify_system(sys_id, 'netboot_enabled', True, self.token)
+ self.remote.save_system(sys_id, self.token)
+
+ def _clean_log(self, log_dir_prefix, system_name):
+ """clean log."""
+ log_dir = os.path.join(log_dir_prefix, system_name)
+ shutil.rmtree(log_dir, True)
+
+ def update_host_config_to_cobbler(self, host_id, hostname, host_vars_dict):
+ """update host config and upload to cobbler server."""
+ sys_id = self._get_create_system(hostname)
+
+ system_config = self._generate_system_config(host_id, host_vars_dict)
+ logging.debug('%s system config to update: %s', host_id, system_config)
+
+ self._update_system_config(sys_id, system_config)
+ self._netboot_enabled(sys_id)
+
+ def update_profile_config_to_cobbler(self, profilename, cluster_vars_dict):
+ """update profile config and upload to cobbler server."""
+
+ profile_id = self._get_profile_id(profilename)
+
+ profile_config = self._generate_profile_config(cluster_vars_dict)
+ logging.debug(
+ '%s profile config to update: %s', profilename, profile_config
+ )
+
+ self._update_profile_config(profile_id, profile_config)
+
+ def delete_hosts(self):
+ hosts_id_list = self.config_manager.get_host_id_list()
+ logging.debug('delete hosts %s', hosts_id_list)
+ for host_id in hosts_id_list:
+ self.delete_single_host(host_id)
+ self._sync()
+
+ def delete_single_host(self, host_id):
+ """Delete the host from cobbler server and clean up the installation
+
+ progress.
+ """
+ hostname = self.config_manager.get_hostname(host_id)
+ try:
+ log_dir_prefix = compass_setting.INSTALLATION_LOGDIR[NAME]
+ self._clean_system(hostname)
+ self._clean_log(log_dir_prefix, hostname)
+ except Exception as ex:
+ logging.error("Deleting host got exception: %s", ex)
+ logging.exception(ex)
+
+ def _get_host_tmpl_vars_dict(self, host_id, global_vars_dict, **kwargs):
+ """Generate template variables dictionary."""
+ vars_dict = {}
+ if global_vars_dict:
+ # Set cluster template vars_dict from cluster os_config.
+ vars_dict = deepcopy(global_vars_dict)
+
+ # Set hostname, MAC address and hostname, networks, dns and so on.
+ host_baseinfo = self.config_manager.get_host_baseinfo(host_id)
+ vars_dict[const.BASEINFO] = host_baseinfo
+
+ # Set profile
+ if self.PROFILE in kwargs:
+ profile = kwargs[self.PROFILE]
+ else:
+ os_version = self.config_manager.get_os_version()
+ profile = self._get_profile_from_server(os_version)
+
+ vars_dict[const.BASEINFO][self.PROFILE] = profile
+
+ metadata = self.config_manager.get_os_config_metadata()
+ os_config = self.config_manager.get_host_os_config(host_id)
+
+ # Get template variables values from host os_config
+ host_vars_dict = self.get_tmpl_vars_from_metadata(metadata, os_config)
+ util.merge_dict(
+ vars_dict.setdefault(const.OS_CONFIG, {}), host_vars_dict
+ )
+ return vars_dict
+
+ def _get_cluster_tmpl_vars_dict(self):
+ metadata = self.config_manager.get_os_config_metadata()
+ os_config = self.config_manager.get_cluster_os_config()
+
+ cluster_vas_dict = {}
+ cluster_vas_dict[const.OS_CONFIG] = \
+ self.get_tmpl_vars_from_metadata(metadata, os_config)
+
+ return cluster_vas_dict
+
+ def _check_and_set_system_impi(self, host_id, sys_id):
+ if not sys_id:
+ logging.info("System is None!")
+ return False
+
+ system = self.dump_system_info(host_id)
+ if system[self.POWER_TYPE] != 'ipmilan' or not system[self.POWER_USER]:
+ # Set sytem power type to ipmilan if needs and set IPMI info
+ ipmi_info = self.config_manager.get_host_ipmi_info(host_id)
+ if not ipmi_info:
+ logging.info('No IPMI information found! Failed power on.')
+ return False
+
+ ipmi_ip, ipmi_user, ipmi_pass = ipmi_info
+ power_opts = {}
+ power_opts[self.POWER_TYPE] = 'ipmilan'
+ power_opts[self.POWER_ADDR] = ipmi_ip
+ power_opts[self.POWER_USER] = ipmi_user
+ power_opts[self.POWER_PASS] = ipmi_pass
+
+ self._update_system_config(sys_id, power_opts)
+
+ return True
+
+ def poweron(self, host_id):
+ hostname = self.config_manager.get_hostname(host_id)
+ sys_id = self._get_create_system(hostname)
+ if not self._check_and_set_system_impi(sys_id):
+ return
+
+ self.remote.power_system(sys_id, self.token, power='on')
+ logging.info("Host with ID=%d starts to power on!" % host_id)
+
+ def poweroff(self, host_id):
+ hostname = self.config_manager.get_hostname(host_id)
+ sys_id = self._get_create_system(hostname)
+ if not self._check_and_set_system_impi(sys_id):
+ return
+
+ self.remote.power_system(sys_id, self.token, power='off')
+ logging.info("Host with ID=%d starts to power off!" % host_id)
+
+ def reset(self, host_id):
+ hostname = self.config_manager.get_hostname(host_id)
+ sys_id = self._get_create_system(hostname)
+ if not self._check_and_set_system_impi(sys_id):
+ return
+
+ self.remote.power_system(sys_id, self.token, power='reboot')
+ logging.info("Host with ID=%d starts to reboot!" % host_id)
diff --git a/compass-tasks/deployment/installers/pk_installers/__init__.py b/compass-tasks/deployment/installers/pk_installers/__init__.py
new file mode 100644
index 0000000..5e42ae9
--- /dev/null
+++ b/compass-tasks/deployment/installers/pk_installers/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/deployment/installers/pk_installers/ansible_installer/__init__.py b/compass-tasks/deployment/installers/pk_installers/ansible_installer/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-tasks/deployment/installers/pk_installers/ansible_installer/__init__.py
diff --git a/compass-tasks/deployment/installers/pk_installers/ansible_installer/ansible_installer.py b/compass-tasks/deployment/installers/pk_installers/ansible_installer/ansible_installer.py
new file mode 100644
index 0000000..0a86be4
--- /dev/null
+++ b/compass-tasks/deployment/installers/pk_installers/ansible_installer/ansible_installer.py
@@ -0,0 +1,441 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__auther__ = "Compass Dev Team (dev-team@syscompass.org)"
+
+"""package installer: ansible plugin."""
+
+from Cheetah.Template import Template
+from copy import deepcopy
+import json
+import logging
+import os
+import re
+import shutil
+import subprocess
+
+from compass.deployment.installers.installer import PKInstaller
+from compass.deployment.utils import constants as const
+from compass.utils import setting_wrapper as compass_setting
+from compass.utils import util
+
+NAME = "AnsibleInstaller"
+
+
+def byteify(input):
+ if isinstance(input, dict):
+ return dict([(byteify(key), byteify(value))
+ for key, value in input.iteritems()])
+ elif isinstance(input, list):
+ return [byteify(element) for element in input]
+ elif isinstance(input, unicode):
+ return input.encode('utf-8')
+ else:
+ return input
+
+
+class AnsibleInstaller(PKInstaller):
+ INVENTORY_TMPL_DIR = 'inventories'
+ GROUPVARS_TMPL_DIR = 'vars'
+ INVENTORY_PATCH_TEMPALTE_DIR = 'inventories'
+
+ # keywords in package installer settings
+ ANSIBLE_DIR = 'ansible_dir'
+ ANSIBLE_RUN_DIR = 'ansible_run_dir'
+ LOG_FILE = 'ansible_log_file'
+ ANSIBLE_CONFIG = 'ansible_config'
+ INVENTORY = 'inventory_file'
+ INVENTORY_JSON = 'inventory_json_file'
+ INVENTORY_GROUP = 'inventory_group'
+ GROUP_VARIABLE = 'group_variable'
+ HOSTS_PATH = 'etc_hosts_path'
+ RUNNER_DIRS = 'runner_dirs'
+
+ def __init__(self, config_manager):
+ super(AnsibleInstaller, self).__init__()
+
+ self.config_manager = config_manager
+ self.tmpl_name = self.config_manager.get_cluster_flavor_template()
+ self.installer_settings = (
+ self.config_manager.get_pk_installer_settings()
+ )
+ settings = self.installer_settings
+ self.ansible_dir = settings.setdefault(self.ANSIBLE_DIR, None)
+ self.ansible_run_dir = (
+ settings.setdefault(self.ANSIBLE_RUN_DIR, None)
+ )
+ self.log_file = settings.setdefault(self.LOG_FILE, None)
+ self.ansible_config = (
+ settings.setdefault(self.ANSIBLE_CONFIG, None)
+ )
+ self.inventory = settings.setdefault(self.INVENTORY, None)
+ self.inventory_json = settings.setdefault(self.INVENTORY_JSON, None)
+ self.inventory_group = settings.setdefault(self.INVENTORY_GROUP, None)
+ self.group_variable = (
+ settings.setdefault(self.GROUP_VARIABLE, None)
+ )
+ self.hosts_path = (
+ settings.setdefault(self.HOSTS_PATH, None)
+ )
+ self.runner_dirs = (
+ settings.setdefault(self.RUNNER_DIRS, None)
+ )
+ self.playbook = self.tmpl_name.replace('tmpl', 'yml')
+ self.runner_files = [self.playbook]
+
+ adapter_name = self.config_manager.get_dist_system_name()
+ self.tmpl_dir = AnsibleInstaller.get_tmpl_path(adapter_name)
+ self.adapter_dir = os.path.join(self.ansible_dir, adapter_name)
+ logging.debug('%s instance created', self)
+
+ @classmethod
+ def get_tmpl_path(cls, adapter_name):
+ tmpl_path = os.path.join(
+ os.path.join(compass_setting.TMPL_DIR, 'ansible_installer'),
+ adapter_name
+ )
+ return tmpl_path
+
+ def __repr__(self):
+ return '%s[name=%s,installer_url=%s]' % (
+ self.__class__.__name__, self.NAME, self.installer_url)
+
+ def dump_inventory(self, data, inventory):
+ with open(inventory, "w") as f:
+ json.dump(data, f, indent=4)
+
+ def _generate_inventory_data(self, global_vars_dict):
+ vars_dict = global_vars_dict['roles_mapping']
+ inventory_data = {}
+ inventory_data['_meta'] = {'hostvars': {}}
+ for item in self.inventory_group:
+ if item in vars_dict:
+ inventory_data[item] = {'hosts': []}
+ for host in vars_dict[item]:
+ hostname = host['hostname']
+ if hostname not in inventory_data['_meta']['hostvars']:
+ host_dict = {}
+ host_dict['ansible_ssh_host'] = host['install']['ip']
+ host_dict['ansible_ssh_user'] = 'root'
+ host_dict['ansible_ssh_pass'] = 'root'
+ inventory_data['_meta']['hostvars'].update(
+ {hostname: host_dict})
+ inventory_data[item]['hosts'].append(hostname)
+
+ inventory_data['ceph'] = {'children':
+ ['ceph_adm', 'ceph_mon', 'ceph_osd']}
+ return inventory_data
+
+ def generate_installer_config(self):
+ """Render ansible config file by OS installing.
+
+ The output format:
+ {
+ '1'($host_id/clusterhost_id):{
+ 'tool': 'ansible',
+ },
+ .....
+ }
+ """
+ host_ids = self.config_manager.get_host_id_list()
+ os_installer_configs = {}
+ for host_id in host_ids:
+ temp = {
+ "tool": "ansible",
+ }
+ os_installer_configs[host_id] = temp
+
+ return os_installer_configs
+
+ def get_env_name(self, dist_sys_name, cluster_name):
+ return "-".join((dist_sys_name, cluster_name))
+
+ def _get_cluster_tmpl_vars(self):
+ """Generate template variables dict
+
+ Generates based on cluster level config.
+ The vars_dict will be:
+ {
+ "baseinfo": {
+ "id":1,
+ "name": "cluster01",
+ ...
+ },
+ "package_config": {
+ .... //mapped from original package config based on metadata
+ },
+ "role_mapping": {
+ ....
+ }
+ }
+ """
+ cluster_vars_dict = {}
+ # set cluster basic information to vars_dict
+ cluster_baseinfo = self.config_manager.get_cluster_baseinfo()
+ cluster_vars_dict[const.BASEINFO] = cluster_baseinfo
+
+ # get and set template variables from cluster package config.
+ pk_metadata = self.config_manager.get_pk_config_meatadata()
+ pk_config = self.config_manager.get_cluster_package_config()
+
+ # get os config as ansible needs them
+ os_metadata = self.config_manager.get_os_config_metadata()
+ os_config = self.config_manager.get_cluster_os_config()
+
+ pk_meta_dict = self.get_tmpl_vars_from_metadata(pk_metadata, pk_config)
+ os_meta_dict = self.get_tmpl_vars_from_metadata(os_metadata, os_config)
+ util.merge_dict(pk_meta_dict, os_meta_dict)
+
+ cluster_vars_dict[const.PK_CONFIG] = pk_meta_dict
+
+ # get and set roles_mapping to vars_dict
+ mapping = self.config_manager.get_cluster_roles_mapping()
+ logging.info("cluster role mapping is %s", mapping)
+ cluster_vars_dict[const.ROLES_MAPPING] = mapping
+
+ # get ip settings to vars_dict
+ hosts_ip_settings = self.config_manager.get_hosts_ip_settings(
+ pk_meta_dict["network_cfg"]["ip_settings"],
+ pk_meta_dict["network_cfg"]["sys_intf_mappings"]
+ )
+ logging.info("hosts_ip_settings is %s", hosts_ip_settings)
+ cluster_vars_dict["ip_settings"] = hosts_ip_settings
+
+ return byteify(cluster_vars_dict)
+
+ def _generate_inventory_attributes(self, global_vars_dict):
+ inventory_tmpl_path = os.path.join(
+ os.path.join(self.tmpl_dir, self.INVENTORY_TMPL_DIR),
+ self.tmpl_name
+ )
+ if not os.path.exists(inventory_tmpl_path):
+ logging.error(
+ "Inventory template '%s' does not exist", self.tmpl_name
+ )
+ raise Exception("Template '%s' does not exist!" % self.tmpl_name)
+ inventory_dir = os.path.join(global_vars_dict['run_dir'], 'inventories')
+ inventory_json = os.path.join(inventory_dir, self.inventory_json)
+ vars_dict = {'inventory_json': inventory_json}
+ return self.get_config_from_template(
+ inventory_tmpl_path, vars_dict
+ )
+
+ def _generate_group_vars_attributes(self, global_vars_dict):
+ logging.info("global vars dict is %s", global_vars_dict)
+ group_vars_tmpl_path = os.path.join(
+ os.path.join(self.tmpl_dir, self.GROUPVARS_TMPL_DIR),
+ self.tmpl_name
+ )
+ if not os.path.exists(group_vars_tmpl_path):
+ logging.error("Vars template '%s' does not exist",
+ self.tmpl_name)
+ raise Exception("Template '%s' does not exist!" % self.tmpl_name)
+
+ return self.get_config_from_template(
+ group_vars_tmpl_path, global_vars_dict
+ )
+
+ def _generate_hosts_attributes(self, global_vars_dict):
+ hosts_tmpl_path = os.path.join(
+ os.path.join(self.tmpl_dir, 'hosts'), self.tmpl_name
+ )
+ if not os.path.exists(hosts_tmpl_path):
+ logging.error("Hosts template '%s' does not exist", self.tmpl_name)
+ raise Exception("Template '%s' does not exist!" % self.tmpl_name)
+
+ return self.get_config_from_template(hosts_tmpl_path, global_vars_dict)
+
+ def _generate_ansible_cfg_attributes(self, global_vars_dict):
+ ansible_cfg_tmpl_path = os.path.join(
+ os.path.join(self.tmpl_dir, 'ansible_cfg'), self.tmpl_name
+ )
+ if not os.path.exists(ansible_cfg_tmpl_path):
+ logging.error("cfg template '%s' does not exist", self.tmpl_name)
+ raise Exception("Template '%s' does not exist!" % self.tmpl_name)
+
+ return self.get_config_from_template(
+ ansible_cfg_tmpl_path,
+ global_vars_dict
+ )
+
+ def get_config_from_template(self, tmpl_path, vars_dict):
+ logging.debug("vars_dict is %s", vars_dict)
+
+ if not os.path.exists(tmpl_path) or not vars_dict:
+ logging.info("Template dir or vars_dict is None!")
+ return {}
+
+ searchList = []
+ copy_vars_dict = deepcopy(vars_dict)
+ for key, value in vars_dict.iteritems():
+ if isinstance(value, dict):
+ temp = copy_vars_dict[key]
+ del copy_vars_dict[key]
+ searchList.append(temp)
+ searchList.append(copy_vars_dict)
+
+ # Load specific template for current adapter
+ tmpl = Template(file=open(tmpl_path, "r"), searchList=searchList)
+ return tmpl.respond()
+
+ def _create_ansible_run_env(self, env_name, ansible_run_destination):
+ if os.path.exists(ansible_run_destination):
+ shutil.rmtree(ansible_run_destination, True)
+
+ os.mkdir(ansible_run_destination)
+
+ # copy roles to run env
+ dirs = self.runner_dirs
+ files = self.runner_files
+ for dir in dirs:
+ if not os.path.exists(os.path.join(self.ansible_dir, dir)):
+ continue
+ os.system(
+ "cp -rf %s %s" % (
+ os.path.join(self.ansible_dir, dir),
+ ansible_run_destination
+ )
+ )
+ for file in files:
+ logging.info('file is %s', file)
+ shutil.copy(
+ os.path.join(self.adapter_dir, file),
+ os.path.join(
+ ansible_run_destination,
+ file
+ )
+ )
+
+ def prepare_ansible(self, env_name, global_vars_dict):
+ ansible_run_destination = os.path.join(self.ansible_run_dir, env_name)
+ if os.path.exists(ansible_run_destination):
+ ansible_run_destination += "-expansion"
+ self._create_ansible_run_env(env_name, ansible_run_destination)
+ global_vars_dict.update({'run_dir': ansible_run_destination})
+
+ inv_config = self._generate_inventory_attributes(global_vars_dict)
+ inventory_dir = os.path.join(ansible_run_destination, 'inventories')
+
+ vars_config = self._generate_group_vars_attributes(global_vars_dict)
+ vars_dir = os.path.join(ansible_run_destination, 'group_vars')
+
+ hosts_config = self._generate_hosts_attributes(global_vars_dict)
+ hosts_destination = os.path.join(
+ ansible_run_destination, self.hosts_path
+ )
+
+ cfg_config = self._generate_ansible_cfg_attributes(global_vars_dict)
+ cfg_destination = os.path.join(
+ ansible_run_destination,
+ self.ansible_config
+ )
+
+ inventory_data = self._generate_inventory_data(global_vars_dict)
+ inventory_json_destination = os.path.join(inventory_dir,
+ self.inventory_json)
+
+ os.mkdir(inventory_dir)
+ os.mkdir(vars_dir)
+
+ inventory_destination = os.path.join(inventory_dir, self.inventory)
+ group_vars_destination = os.path.join(vars_dir, self.group_variable)
+ self.dump_inventory(inventory_data, inventory_json_destination)
+ self.serialize_config(inv_config, inventory_destination)
+ self.serialize_config(vars_config, group_vars_destination)
+ self.serialize_config(hosts_config, hosts_destination)
+ self.serialize_config(cfg_config, cfg_destination)
+
+ def deploy(self):
+ """Start to deploy a distributed system.
+
+ Return both cluster and hosts deployed configs.
+ The return format:
+ {
+ "cluster": {
+ "id": 1,
+ "deployed_package_config": {
+ "roles_mapping": {...},
+ "service_credentials": {...},
+ ....
+ }
+ },
+ "hosts": {
+ 1($clusterhost_id): {
+ "deployed_package_config": {...}
+ },
+ ....
+ }
+ }
+ """
+ host_list = self.config_manager.get_host_id_list()
+ if not host_list:
+ return {}
+
+ adapter_name = self.config_manager.get_adapter_name()
+ cluster_name = self.config_manager.get_clustername()
+ env_name = self.get_env_name(adapter_name, cluster_name)
+
+ global_vars_dict = self._get_cluster_tmpl_vars()
+ logging.info(
+ '%s var dict: %s', self.__class__.__name__, global_vars_dict
+ )
+ # Create ansible related files
+ self.prepare_ansible(env_name, global_vars_dict)
+
+ def patch(self, patched_role_mapping):
+ adapter_name = self.config_manager.get_adapter_name()
+ cluster_name = self.config_manager.get_clustername()
+ env_name = self.get_env_name(adapter_name, cluster_name)
+ ansible_run_destination = os.path.join(self.ansible_run_dir, env_name)
+ inventory_dir = os.path.join(ansible_run_destination, 'inventories')
+ patched_global_vars_dict = self._get_cluster_tmpl_vars()
+ mapping = self.config_manager.get_cluster_patched_roles_mapping()
+ patched_global_vars_dict['roles_mapping'] = mapping
+ patched_inv = self._generate_inventory_attributes(
+ patched_global_vars_dict)
+ inv_file = os.path.join(inventory_dir, 'patched_inventory.yml')
+ self.serialize_config(patched_inv, inv_file)
+ config_file = os.path.join(
+ ansible_run_destination, self.ansible_config
+ )
+ playbook_file = os.path.join(ansible_run_destination, self.playbook)
+ log_file = os.path.join(ansible_run_destination, 'patch.log')
+ cmd = "ANSIBLE_CONFIG=%s ansible-playbook -i %s %s" % (config_file,
+ inv_file,
+ playbook_file)
+ with open(log_file, 'w') as logfile:
+ subprocess.Popen(cmd, shell=True, stdout=logfile, stderr=logfile)
+ return patched_role_mapping
+
+ def cluster_os_ready(self):
+ adapter_name = self.config_manager.get_adapter_name()
+ cluster_name = self.config_manager.get_clustername()
+ env_name = self.get_env_name(adapter_name, cluster_name)
+ ansible_run_destination = os.path.join(self.ansible_run_dir, env_name)
+ expansion_dir = ansible_run_destination + "-expansion"
+ if os.path.exists(expansion_dir):
+ ansible_run_destination = expansion_dir
+ inventory_dir = os.path.join(ansible_run_destination, 'inventories')
+ inventory_file = os.path.join(inventory_dir, self.inventory)
+ playbook_file = os.path.join(ansible_run_destination, self.playbook)
+ log_file = os.path.join(ansible_run_destination, 'run.log')
+ config_file = os.path.join(
+ ansible_run_destination, self.ansible_config
+ )
+ os.system("chmod +x %s" % inventory_file)
+ cmd = "ANSIBLE_CONFIG=%s ansible-playbook -i %s %s" % (config_file,
+ inventory_file,
+ playbook_file)
+ with open(log_file, 'w') as logfile:
+ subprocess.Popen(cmd, shell=True, stdout=logfile, stderr=logfile)
diff --git a/compass-tasks/deployment/utils/__init__.py b/compass-tasks/deployment/utils/__init__.py
new file mode 100644
index 0000000..cbd36e0
--- /dev/null
+++ b/compass-tasks/deployment/utils/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = "Grace Yu (grace.yu@huawei.com)"
diff --git a/compass-tasks/deployment/utils/constants.py b/compass-tasks/deployment/utils/constants.py
new file mode 100644
index 0000000..e90b1b2
--- /dev/null
+++ b/compass-tasks/deployment/utils/constants.py
@@ -0,0 +1,84 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = "Grace Yu (grace.yu@huawei.com)"
+
+
+"""All keywords variables in deployment are defined in this module."""
+
+
+# General keywords
+BASEINFO = 'baseinfo'
+CLUSTER = 'cluster'
+HOST = 'host'
+HOSTS = 'hosts'
+ID = 'id'
+NAME = 'name'
+PASSWORD = 'password'
+USERNAME = 'username'
+
+
+# Adapter info related keywords
+FLAVOR = 'flavor'
+FLAVORS = 'flavors'
+PLAYBOOK = 'playbook'
+FLAVOR_NAME = 'flavor_name'
+HEALTH_CHECK_CMD = 'health_check_cmd'
+TMPL = 'template'
+INSTALLER_SETTINGS = 'settings'
+METADATA = 'metadata'
+OS_INSTALLER = 'os_installer'
+PK_INSTALLER = 'package_installer'
+SUPPORT_OSES = 'supported_oses'
+
+
+# Cluster info related keywords
+ADAPTER_ID = 'adapter_id'
+OS_VERSION = 'os_name'
+
+
+# Host info related keywords
+DNS = 'dns'
+DOMAIN = 'domain'
+HOST_ID = 'host_id'
+HOSTNAME = 'hostname'
+IP_ADDR = 'ip'
+IPMI = 'ipmi'
+IPMI_CREDS = 'ipmi_credentials'
+MAC_ADDR = 'mac'
+MGMT_NIC_FLAG = 'is_mgmt'
+NETMASK = 'netmask'
+NETWORKS = 'networks'
+NIC = 'interface'
+CLUSTER_ID = 'cluster_id'
+ORIGIN_CLUSTER_ID = 'origin_cluster_id'
+PROMISCUOUS_FLAG = 'is_promiscuous'
+REINSTALL_OS_FLAG = 'reinstall_os'
+SUBNET = 'subnet'
+
+
+# Cluster/host config related keywords
+COMPLETED_PK_CONFIG = 'completed_package_config'
+COMPLETED_OS_CONFIG = 'completed_os_config'
+DEPLOYED_OS_CONFIG = 'deployed_os_config'
+DEPLOYED_PK_CONFIG = 'deployed_package_config'
+NETWORK_MAPPING = 'network_mapping'
+OS_CONFIG = 'os_config'
+OS_CONFIG_GENERAL = 'general'
+PK_CONFIG = 'package_config'
+ROLES = 'roles'
+PATCHED_ROLES = 'patched_roles'
+ROLES_MAPPING = 'roles_mapping'
+SERVER_CREDS = 'server_credentials'
+TMPL_VARS_DICT = 'vars_dict'
diff --git a/compass-tasks/hdsdiscovery/SNMP_CONFIG.md b/compass-tasks/hdsdiscovery/SNMP_CONFIG.md
new file mode 100644
index 0000000..337b542
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/SNMP_CONFIG.md
@@ -0,0 +1,33 @@
+Install & Config Prerequisite Packages:
+
+1. Net-Snmp:
+ a. #apt-get install -y snmpd snmp libsnmp-python
+ b. #apt-get install -y snmp-mibs-downloader
+ For Centos:
+ # yum install net-snmp net-snmp-utils
+
+ c. create vendor's mibs directory(for example):
+ - #mkdir -p /root/.snmp/mibs/huawei
+ - #vim /etc/snmp/snmp.conf (if not exists, create snmp.conf file)
+ * add vendor;s mibs directory:
+ mibdirs +/root/.snmp/mibs/huawei
+ * comment the line:
+ #mibs:
+ d. copy vendor's mibs to that directory
+ e. #vim /etc/default/snmpd
+ * modify the directive from
+ TRAPDRUN=no --> TRAPDRUN=yes
+ For Centos:
+ # vim /etc/sysconfig/snmpd
+ * modify into or add the directive
+ TRAPDRUN=yes
+
+ f. #vim /etc/snmp/snmpd.conf
+ * add the following line, where $ip is the ip address of manager machine:
+ com2sec mynetwork $ip/24 public
+ g. #service snmpd restart
+
+ Note: net-snmp-config is used to see default configuration
+
+2. paramiko:
+ #apt-get install python-paramiko
diff --git a/compass-tasks/hdsdiscovery/__init__.py b/compass-tasks/hdsdiscovery/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/hdsdiscovery/base.py b/compass-tasks/hdsdiscovery/base.py
new file mode 100644
index 0000000..77b3b0b
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/base.py
@@ -0,0 +1,185 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Base class extended by specific vendor in vendors directory.
+A vendor needs to implement abstract methods of base class.
+"""
+import logging
+import re
+
+from abc import ABCMeta
+
+from compass.hdsdiscovery.error import TimeoutError
+from compass.hdsdiscovery import utils
+
+
+class BaseVendor(object):
+ """Basic Vendor object."""
+ __metaclass__ = ABCMeta
+
+ def is_this_vendor(self, sys_info, **kwargs):
+ """Determine if the host is associated with this vendor.
+
+ This function must be implemented by vendor itself
+ """
+ raise NotImplementedError
+
+
+class BaseSnmpVendor(BaseVendor):
+ """Base SNMP-based vendor plugin.
+
+ .. note::
+ It uses MIB-II sysDescr value to determine the vendor of the switch.
+ """
+
+ def __init__(self, matched_names):
+ super(BaseSnmpVendor, self).__init__()
+ self._matched_names = matched_names
+
+ def is_this_vendor(self, sys_info, **kwargs):
+ """Determine if the switch belongs to this vendor.
+
+ Matching the system information retrieved from the switch.
+ :param str sys_info: the system information retrieved from a switch
+ Return True
+ """
+ if sys_info:
+ for name in self._matched_names:
+ if re.search(r"\b" + re.escape(name) + r"\b", sys_info,
+ re.IGNORECASE):
+ return True
+ return False
+
+
+class BasePlugin(object):
+ """Extended by vendor's plugin.
+
+ This plugin processes request and retrieve info directly from the switch.
+ """
+ __metaclass__ = ABCMeta
+
+ def process_data(self, oper='SCAN', **kwargs):
+ """Each vendors will have some plugins to do some operations.
+
+ Plugin will process request data and return expected result.
+
+ :param oper: operation function name.
+ :param kwargs: key-value pairs of arguments
+ """
+ raise NotImplementedError
+
+ # At least one of these three functions below must be implemented.
+ def scan(self, **kwargs):
+ """Get multiple records at once."""
+ pass
+
+ def set(self, **kwargs):
+ """Set value to desired variable."""
+ pass
+
+ def get(self, **kwargs):
+ """Get one record from a host."""
+ pass
+
+
+class BaseSnmpMacPlugin(BasePlugin):
+ """Base snmp plugin."""
+
+ def __init__(self, host, credential, oid='BRIDGE-MIB::dot1dTpFdbPort',
+ vlan_oid='Q-BRIDGE-MIB::dot1qPvid'):
+ super(BaseSnmpMacPlugin, self).__init__()
+ self.host = host
+ self.credential = credential
+ self.oid = oid
+ self.port_oid = 'ifName'
+ self.vlan_oid = vlan_oid
+
+ def process_data(self, oper='SCAN', **kwargs):
+ """progress data."""
+ func_name = oper.lower()
+ return getattr(self, func_name)(**kwargs)
+
+ def scan(self, **kwargs):
+ """scan."""
+ results = None
+ try:
+ results = utils.snmpwalk_by_cl(self.host, self.credential,
+ self.oid)
+ except TimeoutError as error:
+ logging.debug("PluginMac:scan snmpwalk_by_cl failed: %s",
+ error.message)
+ return None
+
+ mac_list = []
+ for entity in results:
+ if_index = entity['value']
+ if entity and int(if_index):
+ tmp = {}
+ mac_numbers = entity['iid'].split('.')
+ tmp['mac'] = self.get_mac_address(mac_numbers)
+ tmp['port'] = self.get_port(if_index)
+ tmp['vlan'] = self.get_vlan_id(if_index)
+ mac_list.append(tmp)
+
+ return mac_list
+
+ def get_vlan_id(self, port):
+ """Get vlan Id."""
+ if not port:
+ return None
+
+ oid = '.'.join((self.vlan_oid, port))
+ vlan_id = None
+ result = None
+ try:
+ result = utils.snmpget_by_cl(self.host, self.credential, oid)
+ except TimeoutError as error:
+ logging.debug("[PluginMac:get_vlan_id snmpget_by_cl failed: %s]",
+ error.message)
+ return None
+
+ vlan_id = result.split()[-1]
+ return vlan_id
+
+ def get_port(self, if_index):
+ """Get port number."""
+
+ if_name = '.'.join((self.port_oid, if_index))
+ result = None
+ try:
+ result = utils.snmpget_by_cl(self.host, self.credential, if_name)
+ except TimeoutError as error:
+ logging.debug("[PluginMac:get_port snmpget_by_cl failed: %s]",
+ error.message)
+ return None
+
+ # A result may be like "Value: FasterEthernet1/2/34
+ port = result.split()[-1].split('/')[-1]
+ return port
+
+ def convert_to_hex(self, value):
+ """Convert the integer from decimal to hex."""
+
+ return "%0.2x" % int(value)
+
+ def get_mac_address(self, mac_numbers):
+ """Assemble mac address from the list."""
+ if len(mac_numbers) != 6:
+ logging.error("[PluginMac:get_mac_address] MAC address must be "
+ "6 digitals")
+ return None
+
+ mac_in_hex = [self.convert_to_hex(num) for num in mac_numbers]
+ return ":".join(mac_in_hex)
diff --git a/compass-tasks/hdsdiscovery/error.py b/compass-tasks/hdsdiscovery/error.py
new file mode 100644
index 0000000..5bcf1a2
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/error.py
@@ -0,0 +1,26 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""hdsdiscovery module errors."""
+
+
+class TimeoutError(Exception):
+ """Timeout error."""
+
+ def __init__(self, message):
+ super(TimeoutError, self).__init__(message)
+ self.message = message
+
+ def __str__(self):
+ return repr(self.message)
diff --git a/compass-tasks/hdsdiscovery/hdmanager.py b/compass-tasks/hdsdiscovery/hdmanager.py
new file mode 100644
index 0000000..028d444
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/hdmanager.py
@@ -0,0 +1,171 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Manage hdsdiscovery functionalities."""
+import logging
+import os
+import re
+
+from compass.hdsdiscovery.error import TimeoutError
+from compass.hdsdiscovery import utils
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+UNREACHABLE = 'unreachable'
+NOTSUPPORTED = 'notsupported'
+ERROR = 'error'
+REPOLLING = 'repolling'
+
+
+class HDManager(object):
+ """Process a request."""
+
+ def __init__(self):
+ base_dir = os.path.dirname(os.path.realpath(__file__))
+ self.vendors_dir = os.path.join(base_dir, 'vendors')
+ self.vendor_plugins_dir = os.path.join(self.vendors_dir, '?/plugins')
+ self.snmp_sysdescr = 'sysDescr.0'
+
+ def learn(self, host, credential, vendor, req_obj, oper="SCAN", **kwargs):
+ """Insert/update record of switch_info.
+
+ Get expected results from switch according to sepcific operation.
+
+ :param req_obj: the object of a machine
+ :param host: switch IP address
+ :param credientials: credientials to access switch
+ :param oper: operations of the plugin (SCAN, GETONE, SET)
+ :param kwargs(optional): key-value pairs
+ """
+ plugin_dir = self.vendor_plugins_dir.replace('?', vendor)
+ if not os.path.exists(plugin_dir):
+ logging.error('No such directory: %s', plugin_dir)
+ return None
+
+ plugin = utils.load_module(req_obj, plugin_dir, host, credential)
+ if not plugin:
+ # No plugin found!
+ # TODO(Grace): add more code to catch excpetion or unexpected state
+ logging.error('no plugin %s to load from %s', req_obj, plugin_dir)
+ return None
+
+ return plugin.process_data(oper, **kwargs)
+
+ def is_valid_vendor(self, host, credential, vendor):
+ """Check if vendor is associated with this host and credential
+
+ :param host: switch ip
+ :param credential: credential to access switch
+ :param vendor: the vendor of switch
+ """
+ vendor_dir = os.path.join(self.vendors_dir, vendor)
+ if not os.path.exists(vendor_dir):
+ logging.error('no such directory: %s', vendor_dir)
+ return False
+
+ sys_info, err = self.get_sys_info(host, credential)
+ if not sys_info:
+ logging.debug("[hdsdiscovery][hdmanager][is_valid_vendor]"
+ "failded to get sys information: %s", err)
+ return False
+
+ instance = utils.load_module(vendor, vendor_dir)
+ if not instance:
+ logging.debug("[hdsdiscovery][hdmanager][is_valid_vendor]"
+ "No such vendor found!")
+ return False
+
+ if instance.is_this_vendor(sys_info):
+ logging.info("[hdsdiscovery][hdmanager][is_valid_vendor]"
+ "vendor %s is correct!", vendor)
+ return True
+
+ return False
+
+ def get_vendor(self, host, credential):
+ """Check and get vendor of the switch.
+
+ :param host: switch ip:
+ :param credential: credential to access switch
+ :return a tuple (vendor, switch_state, error)
+ """
+
+ switch_lists = util.load_configs(setting.MACHINE_LIST_DIR)
+ switch_list = []
+ for items in switch_lists:
+ for item in items['MACHINE_LIST']:
+ for k, v in item.items():
+ switch_list.append(k)
+ if host in switch_list:
+ return ("appliance", "Found", "")
+
+ # TODO(grace): Why do we need to have valid IP?
+ # a hostname should also work.
+ if not utils.valid_ip_format(host):
+ logging.error("host '%s' is not valid IP address!", host)
+ return (None, ERROR, "Invalid IP address %s!" % host)
+
+ if not utils.is_valid_snmp_v2_credential(credential):
+ logging.debug("******The credential %s of host %s cannot "
+ "be used for either SNMP v2 or SSH*****",
+ credential, host)
+ return (None, ERROR, "Invalid credential")
+
+ sys_info, err = self.get_sys_info(host, credential)
+ if not sys_info:
+ return (None, UNREACHABLE, err)
+
+ # List all vendors in vendors directory -- a directory but hidden
+ # under ../vendors
+ all_vendors = [o for o in os.listdir(self.vendors_dir)
+ if os.path.isdir(os.path.join(self.vendors_dir, o))
+ and re.match(r'^[^\.]', o)]
+
+ logging.debug("[get_vendor][available vendors]: %s ", all_vendors)
+ logging.debug("[get_vendor] System Information is [%s]", sys_info)
+
+ # TODO(grace): should not conver to lower. The vendor impl can choose
+ # to do case-insensitive match
+ # sys_info = sys_info.lower()
+ vendor = None
+ for vname in all_vendors:
+ vpath = os.path.join(self.vendors_dir, vname)
+ instance = utils.load_module(vname, vpath)
+ if not instance:
+ logging.error('no instance %s load from %s', vname, vpath)
+ continue
+
+ if instance.is_this_vendor(sys_info):
+ logging.info("[get_vendor]****Found vendor '%s'****", vname)
+ vendor = vname
+ break
+
+ if not vendor:
+ logging.debug("[get_vendor] No vendor found! <==================")
+ return (None, NOTSUPPORTED, "Not supported switch vendor!")
+
+ return (vendor, REPOLLING, "")
+
+ def get_sys_info(self, host, credential):
+ """get sys info."""
+ sys_info = None
+ try:
+ sys_info = utils.snmpget_by_cl(host,
+ credential,
+ self.snmp_sysdescr)
+ except TimeoutError as error:
+ return (None, error.message)
+
+ return (sys_info, "")
diff --git a/compass-tasks/hdsdiscovery/utils.py b/compass-tasks/hdsdiscovery/utils.py
new file mode 100644
index 0000000..72adb0a
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/utils.py
@@ -0,0 +1,289 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utility functions
+ Including functions of get/getbulk/walk/set of snmp for three versions
+"""
+import imp
+import logging
+import re
+import subprocess
+
+from compass.hdsdiscovery.error import TimeoutError
+
+
+def load_module(mod_name, path, host=None, credential=None):
+ """Load a module instance.
+
+ :param str mod_name: module name
+ :param str path: directory of the module
+ :param str host: switch ip address
+ :param str credential: credential used to access switch
+ """
+ try:
+ mod_file, path, descr = imp.find_module(mod_name, [path])
+ if mod_file:
+ mod = imp.load_module(mod_name, mod_file, path, descr)
+ if host and credential:
+ instance = getattr(mod, mod.CLASS_NAME)(host, credential)
+ else:
+ instance = getattr(mod, mod.CLASS_NAME)()
+
+ return instance
+ except ImportError as exc:
+ logging.error('No such module found: %s', mod_name)
+ logging.exception(exc)
+ return None
+
+
+def ssh_remote_execute(host, username, password, cmd):
+ """SSH to execute script on remote machine
+
+ :param host: ip of the remote machine
+ :param username: username to access the remote machine
+ :param password: password to access the remote machine
+ :param cmd: command to execute
+ """
+ try:
+ import paramiko
+ if not cmd:
+ logging.error("[hdsdiscovery][utils][ssh_remote_execute] command"
+ "is None! Failed!")
+ return None
+
+ client = paramiko.SSHClient()
+ client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ client.connect(host, username=username, password=password, timeout=15)
+ stdin, stdout, stderr = client.exec_command(cmd)
+ result = stdout.readlines()
+ return result
+
+ except ImportError as exc:
+ logging.error("[hdsdiscovery][utils][ssh_remote_execute] failed to"
+ "load module 'paramiko', donnot exist!")
+ logging.exception(exc)
+ return None
+
+ except Exception as exc:
+ logging.error("[hdsdiscovery][utils][ssh_remote_execute] failed: %s",
+ cmd)
+ logging.exception(exc)
+ return None
+
+ finally:
+ stdin.close()
+ stdout.close()
+ stderr.close()
+ client.close()
+
+
+def valid_ip_format(ip_address):
+ """Valid the format of an Ip address."""
+
+ if not re.match(r'^((([0-2]?\d{0,2}\.){3}([0-2]?\d{0,2}))'
+ r'|(([\da-fA-F]{1,4}:){7}([\da-fA-F]{1,4})))$',
+ ip_address):
+ # check IP's format is match ipv4 or ipv6 by regex
+ return False
+
+ return True
+
+#################################################################
+# Implement snmpwalk and snmpget funtionality
+# The structure of returned dictionary will by tag/iid/value/type
+#################################################################
+AUTH_VERSIONS = {
+ '1': 1,
+ '2c': 2,
+ '3': 3
+}
+
+
+def snmp_walk(host, credential, *args, **kwargs):
+ """Impelmentation of snmpwalk functionality
+
+ :param host: switch ip
+ :param credential: credential to access switch
+ :param args: OIDs
+ :param kwargs: key-value pairs
+ """
+ try:
+ import netsnmp
+
+ except ImportError:
+ logging.error("Module 'netsnmp' do not exist! Please install it first")
+ return None
+
+ if 'version' not in credential or 'community' not in credential:
+ logging.error("[utils] missing 'version' and 'community' in %s",
+ credential)
+ return None
+
+ version = None
+ if credential['version'] in AUTH_VERSIONS:
+ version = AUTH_VERSIONS[credential['version']]
+
+ varbind_list = []
+ for arg in args:
+ varbind = netsnmp.Varbind(arg)
+ varbind_list.append(varbind)
+
+ var_list = netsnmp.VarList(*varbind_list)
+
+ netsnmp.snmpwalk(var_list,
+ DestHost=host,
+ Version=version,
+ Community=credential['community'],
+ **kwargs)
+
+ result = []
+ if not var_list:
+ logging.error("[hsdiscovery][utils][snmp_walk] retrived no record!")
+ return result
+
+ for var in var_list:
+ response = {}
+ response['elem_name'] = var.tag
+ response['iid'] = var.iid
+ response['value'] = var.val
+ response['type'] = var.type
+ result.append(response)
+
+ return result
+
+
+def snmp_get(host, credential, object_type, **kwargs):
+ """Impelmentation of snmp get functionality
+
+ :param object_type: mib object
+ :param host: switch ip
+ :param credential: the dict of credential to access switch
+ """
+ try:
+ import netsnmp
+
+ except ImportError:
+ logging.error("Module 'netsnmp' do not exist! Please install it first")
+ return None
+
+ if 'version' not in credential or 'community' not in credential:
+ logging.error('[uitls][snmp_get] missing keywords in %s for %s',
+ credential, host)
+ return None
+
+ version = None
+ if credential['version'] in AUTH_VERSIONS:
+ version = AUTH_VERSIONS[credential['version']]
+
+ varbind = netsnmp.Varbind(object_type)
+ res = netsnmp.snmpget(varbind,
+ DestHost=host,
+ Version=version,
+ Community=credential['community'],
+ **kwargs)
+ if res and res[0]:
+ return res[0]
+
+ logging.info('no result found for %s %s', host, credential)
+ return None
+
+
+SSH_CREDENTIALS = {"username": "", "password": ""}
+SNMP_V2_CREDENTIALS = {"version": "", "community": ""}
+
+
+def is_valid_snmp_v2_credential(credential):
+ """check if credential is valid snmp v2 credential."""
+ if credential.keys() != SNMP_V2_CREDENTIALS.keys():
+ return False
+ if credential['version'] != '2c':
+ logging.error("The value of version in credential is not '2c'!")
+ return False
+ return True
+
+
+def is_valid_ssh_credential(credential):
+ """check if credential is valid ssh credential."""
+ if credential.keys() != SSH_CREDENTIALS.keys():
+ return False
+ return True
+
+
+def snmpget_by_cl(host, credential, oid, timeout=8, retries=3):
+ """snmpget by credential."""
+ if not is_valid_snmp_v2_credential(credential):
+ logging.error("[utils][snmpget_by_cl] Credential %s cannot be used "
+ "for SNMP request!", credential)
+ return None
+
+ version = credential['version']
+ community = credential['community']
+ cmd = "snmpget -v %s -c %s -Ob -r %s -t %s %s %s" % (
+ version, community, retries, timeout, host, oid)
+
+ returncode, output, err = exec_command(cmd)
+
+ if returncode and err:
+ logging.error("[snmpget_by_cl] %s", err)
+ raise TimeoutError(err.strip('\n'))
+
+ return output.strip('\n')
+
+
+def snmpwalk_by_cl(host, credential, oid, timeout=5, retries=3):
+ """snmpwalk by credential."""
+ if not is_valid_snmp_v2_credential(credential):
+ logging.error("[utils][snmpwalk_by_cl] Credential %s cannot be used "
+ "for SNMP request!", credential)
+ return None
+
+ version = credential['version']
+ community = credential['community']
+ cmd = "snmpwalk -v %s -c %s -Cc -r %s -t %s -Ob %s %s" % (
+ version, community, retries, timeout, host, oid)
+
+ returncode, output, err = exec_command(cmd)
+
+ if returncode and err:
+ logging.debug("[snmpwalk_by_cl] %s ", err)
+ raise TimeoutError(err)
+
+ result = []
+ if not output:
+ return result
+
+ output = output.split('\n')
+ for line in output:
+ if not line:
+ continue
+ temp = {}
+ arr = line.split(" ")
+ temp['iid'] = arr[0].split('.', 1)[-1]
+ temp['value'] = arr[-1]
+ result.append(temp)
+
+ return result
+
+
+def exec_command(command):
+ """Execute command.
+
+ Return a tuple: returncode, output and error message(None if no error).
+ """
+ sub_p = subprocess.Popen(command,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ output, err_msg = sub_p.communicate()
+ return (sub_p.returncode, output, err_msg)
diff --git a/compass-tasks/hdsdiscovery/vendors/__init__.py b/compass-tasks/hdsdiscovery/vendors/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/hdsdiscovery/vendors/appliance/__init__.py b/compass-tasks/hdsdiscovery/vendors/appliance/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/appliance/__init__.py
diff --git a/compass-tasks/hdsdiscovery/vendors/appliance/appliance.py b/compass-tasks/hdsdiscovery/vendors/appliance/appliance.py
new file mode 100644
index 0000000..3d66f4e
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/appliance/appliance.py
@@ -0,0 +1,34 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Vendor: Compass Appliance"""
+from compass.hdsdiscovery import base
+
+
+# Vendor_loader will load vendor instance by CLASS_NAME
+CLASS_NAME = 'Appliance'
+
+
+class Appliance(base.BaseSnmpVendor):
+ """Fake SNMP object for compass appliance."""
+
+ def __init__(self):
+ base.BaseSnmpVendor.__init__(self, ['appliance'])
+ self.__name = 'appliance'
+
+ @property
+ def name(self):
+ """Get 'name' proptery."""
+ return self.__name
diff --git a/compass-tasks/hdsdiscovery/vendors/appliance/plugins/__init__.py b/compass-tasks/hdsdiscovery/vendors/appliance/plugins/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/appliance/plugins/__init__.py
diff --git a/compass-tasks/hdsdiscovery/vendors/appliance/plugins/mac.py b/compass-tasks/hdsdiscovery/vendors/appliance/plugins/mac.py
new file mode 100644
index 0000000..cc14881
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/appliance/plugins/mac.py
@@ -0,0 +1,48 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Compass Appliance Mac module."""
+from compass.hdsdiscovery import base
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+import logging
+
+
+CLASS_NAME = "Mac"
+
+
+class Mac(base.BaseSnmpMacPlugin):
+ """Processes MAC address."""
+
+ def __init__(self, host, credential):
+ self.host = host
+ # self.credential = credential
+ # return
+
+ def scan(self):
+ """Implemnets the scan method in BasePlugin class.
+
+ .. note::
+ Dummy scan function for compass appliance.
+ Returns fixed mac addresses.
+ """
+ mac_list = None
+ machine_lists = util.load_configs(setting.MACHINE_LIST_DIR)
+ for items in machine_lists:
+ for item in items['MACHINE_LIST']:
+ for k, v in item.items():
+ if k == self.host:
+ mac_list = v
+ return mac_list
diff --git a/compass-tasks/hdsdiscovery/vendors/arista/__init__.py b/compass-tasks/hdsdiscovery/vendors/arista/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/arista/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/hdsdiscovery/vendors/arista/arista.py b/compass-tasks/hdsdiscovery/vendors/arista/arista.py
new file mode 100644
index 0000000..5eacea1
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/arista/arista.py
@@ -0,0 +1,33 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Vendor: Arista."""
+from compass.hdsdiscovery import base
+
+
+# Vendor_loader will load vendor instance by CLASS_NAME
+CLASS_NAME = 'Arista'
+
+
+class Arista(base.BaseSnmpVendor):
+ """Arista Network switch object."""
+
+ def __init__(self):
+ base.BaseSnmpVendor.__init__(self, ['arista'])
+ self._name = 'arista'
+
+ @property
+ def name(self):
+ """Get 'name' proptery."""
+ return self._name
diff --git a/compass-tasks/hdsdiscovery/vendors/arista/plugins/__init__.py b/compass-tasks/hdsdiscovery/vendors/arista/plugins/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/arista/plugins/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/hdsdiscovery/vendors/arista/plugins/mac.py b/compass-tasks/hdsdiscovery/vendors/arista/plugins/mac.py
new file mode 100644
index 0000000..ed2f331
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/arista/plugins/mac.py
@@ -0,0 +1,24 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Pica8 Switch Mac module."""
+from compass.hdsdiscovery.base import BaseSnmpMacPlugin
+
+
+CLASS_NAME = 'Mac'
+
+
+class Mac(BaseSnmpMacPlugin):
+ """Process MAC address by Arista switch."""
+ pass
diff --git a/compass-tasks/hdsdiscovery/vendors/hp/__init__.py b/compass-tasks/hdsdiscovery/vendors/hp/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/hp/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/hdsdiscovery/vendors/hp/hp.py b/compass-tasks/hdsdiscovery/vendors/hp/hp.py
new file mode 100644
index 0000000..7ddc5e9
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/hp/hp.py
@@ -0,0 +1,33 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Vendor: HP."""
+from compass.hdsdiscovery import base
+
+
+# Vendor_loader will load vendor instance by CLASS_NAME
+CLASS_NAME = 'Hp'
+
+
+class Hp(base.BaseSnmpVendor):
+ """Hp switch object."""
+
+ def __init__(self):
+ base.BaseSnmpVendor.__init__(self, ['hp', 'procurve'])
+ self.names = ['hp', 'procurve']
+
+ @property
+ def name(self):
+ """Get 'name' proptery."""
+ return self.names[0]
diff --git a/compass-tasks/hdsdiscovery/vendors/hp/plugins/__init__.py b/compass-tasks/hdsdiscovery/vendors/hp/plugins/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/hp/plugins/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/hdsdiscovery/vendors/hp/plugins/mac.py b/compass-tasks/hdsdiscovery/vendors/hp/plugins/mac.py
new file mode 100644
index 0000000..3bc81f4
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/hp/plugins/mac.py
@@ -0,0 +1,23 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""HP Switch Mac module."""
+from compass.hdsdiscovery.base import BaseSnmpMacPlugin
+
+CLASS_NAME = 'Mac'
+
+
+class Mac(BaseSnmpMacPlugin):
+ """Process MAC address by HP switch."""
+ pass
diff --git a/compass-tasks/hdsdiscovery/vendors/huawei/__init__.py b/compass-tasks/hdsdiscovery/vendors/huawei/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/huawei/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/hdsdiscovery/vendors/huawei/huawei.py b/compass-tasks/hdsdiscovery/vendors/huawei/huawei.py
new file mode 100644
index 0000000..19fd043
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/huawei/huawei.py
@@ -0,0 +1,33 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Huawei Switch."""
+from compass.hdsdiscovery import base
+
+
+# Vendor_loader will load vendor instance by CLASS_NAME
+CLASS_NAME = "Huawei"
+
+
+class Huawei(base.BaseSnmpVendor):
+ """Huawei switch."""
+
+ def __init__(self):
+ base.BaseSnmpVendor.__init__(self, ["huawei"])
+ self.__name = "huawei"
+
+ @property
+ def name(self):
+ """Return switch name."""
+ return self.__name
diff --git a/compass-tasks/hdsdiscovery/vendors/huawei/plugins/__init__.py b/compass-tasks/hdsdiscovery/vendors/huawei/plugins/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/huawei/plugins/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/hdsdiscovery/vendors/huawei/plugins/mac.py b/compass-tasks/hdsdiscovery/vendors/huawei/plugins/mac.py
new file mode 100644
index 0000000..49d3863
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/huawei/plugins/mac.py
@@ -0,0 +1,63 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Huawei Switch Mac module."""
+import logging
+
+from compass.hdsdiscovery.base import BaseSnmpMacPlugin
+from compass.hdsdiscovery import utils
+
+
+CLASS_NAME = "Mac"
+
+
+class Mac(BaseSnmpMacPlugin):
+ """Processes MAC address."""
+
+ def __init__(self, host, credential):
+ super(Mac, self).__init__(
+ host, credential,
+ 'HUAWEI-L2MAM-MIB::hwDynFdbPort')
+
+ def scan(self):
+ """Implemnets the scan method in BasePlugin class.
+
+ .. note::
+ In this mac module, mac addesses were retrieved by
+ snmpwalk commandline.
+ """
+ results = utils.snmpwalk_by_cl(self.host, self.credential, self.oid)
+
+ if not results:
+ logging.info("[Huawei][mac] No results returned from SNMP walk!")
+ return None
+
+ mac_list = []
+
+ for entity in results:
+ # The format of 'iid' is like '248.192.1.214.34.15.31.1.48'
+ # The first 6 numbers will be the MAC address
+ # The 7th number is its vlan ID
+ numbers = entity['iid'].split('.')
+ mac = self.get_mac_address(numbers[:6])
+ vlan = numbers[6]
+ port = self.get_port(entity['value'])
+
+ tmp = {}
+ tmp['port'] = port
+ tmp['mac'] = mac
+ tmp['vlan'] = vlan
+ mac_list.append(tmp)
+
+ return mac_list
diff --git a/compass-tasks/hdsdiscovery/vendors/ovswitch/__init__.py b/compass-tasks/hdsdiscovery/vendors/ovswitch/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/ovswitch/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/hdsdiscovery/vendors/ovswitch/ovswitch.py b/compass-tasks/hdsdiscovery/vendors/ovswitch/ovswitch.py
new file mode 100644
index 0000000..4d03328
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/ovswitch/ovswitch.py
@@ -0,0 +1,76 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Open Vswitch module."""
+import logging
+import re
+
+from compass.hdsdiscovery import base
+from compass.hdsdiscovery import utils
+
+
+# Vendor_loader will load vendor instance by CLASS_NAME
+CLASS_NAME = "OVSwitch"
+
+
+class OVSwitch(base.BaseVendor):
+ """Open Vswitch."""
+ def __init__(self):
+ self.__name = "Open vSwitch"
+
+ def is_this_vendor(self, sys_info, host=None, credential=None, **kwargs):
+ """Determine if the hostname is accociated witH this vendor.
+
+ :param host: swtich's IP address
+ :param credential: credential to access switch
+ """
+ result = sys_info
+ if host and credential:
+ if utils.is_valid_ssh_credential(credential):
+ user = credential['username']
+ pwd = credential['password']
+
+ else:
+ msg = ("[OVSwitch]The format of credential %r is not for SSH "
+ "or incorrect Keywords! " % credential)
+ logging.info(msg)
+ return False
+
+ cmd = "ovs-vsctl -V"
+ result = None
+ try:
+ result = utils.ssh_remote_execute(host, user, pwd, cmd)
+ logging.debug('%s result for %s is %s', cmd, host, result)
+ if not result:
+ return False
+ except Exception as exc:
+ logging.error("No vendor or connection failed to run %s", cmd)
+ logging.exception(exc)
+ return False
+
+ if isinstance(result, str):
+ result = [result]
+
+ for line in result:
+ if not line:
+ continue
+ if re.search(r"\b" + re.escape(self.__name) + r"\b", line):
+ return True
+
+ return False
+
+ @property
+ def name(self):
+ """Open Vswitch name."""
+ return self.__name
diff --git a/compass-tasks/hdsdiscovery/vendors/ovswitch/plugins/__init__.py b/compass-tasks/hdsdiscovery/vendors/ovswitch/plugins/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/ovswitch/plugins/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/hdsdiscovery/vendors/ovswitch/plugins/mac.py b/compass-tasks/hdsdiscovery/vendors/ovswitch/plugins/mac.py
new file mode 100644
index 0000000..5f497a0
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/ovswitch/plugins/mac.py
@@ -0,0 +1,87 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Open Vswitch Mac address module."""
+import logging
+
+from compass.hdsdiscovery import base
+from compass.hdsdiscovery import utils
+
+
+CLASS_NAME = "Mac"
+
+
+class Mac(base.BasePlugin):
+ """Open Vswitch MAC address module."""
+ def __init__(self, host, credential):
+ self.host = host
+ self.credential = credential
+
+ def process_data(self, oper="SCAN", **kwargs):
+ """Dynamically call the function according 'oper'
+
+ :param oper: operation of data processing
+ """
+ func_name = oper.lower()
+ return getattr(self, func_name)(**kwargs)
+
+ def scan(self, **kwargs):
+ """Implemnets the scan method in BasePlugin class.
+
+ .. note::
+ In this module, mac addesses were retrieved by ssh.
+ """
+ try:
+ user = self.credential['username']
+ pwd = self.credential['password']
+ except KeyError:
+ logging.error("Cannot find username and password in credential")
+ return None
+
+ cmd = ("BRIDGES=$(ovs-vsctl show |grep Bridge |cut -f 2 -d '\"');"
+ "for br in $BRIDGES; do"
+ "PORTS=$(ovs-ofctl show $br |grep addr |cut -f 1 -d ':' "
+ "|egrep -v 'eth|wlan|LOCAL'|awk -F '(' '{print $1}');"
+ "for port in $PORTS; do"
+ "RESULT=$(ovs-appctl fdb/show $br |"
+ "awk '$1 == '$port' {print $1" "$2" "$3}');"
+ "echo '$RESULT'"
+ "done;"
+ "done;")
+ output = None
+ try:
+ output = utils.ssh_remote_execute(self.host, user, pwd, cmd)
+ except Exception as error:
+ logging.exception(error)
+ return None
+
+ logging.debug("[scan][output] output is %s", output)
+ if not output:
+ return None
+
+ fields_arr = ['port', 'vlan', 'mac']
+
+ result = []
+ for line in output:
+ if not line or line == '\n':
+ continue
+
+ values_arr = line.split()
+ temp = {}
+ for field, value in zip(fields_arr, values_arr):
+ temp[field] = value
+
+ result.append(temp.copy())
+
+ return result
diff --git a/compass-tasks/hdsdiscovery/vendors/pica8/__init__.py b/compass-tasks/hdsdiscovery/vendors/pica8/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/pica8/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/hdsdiscovery/vendors/pica8/pica8.py b/compass-tasks/hdsdiscovery/vendors/pica8/pica8.py
new file mode 100644
index 0000000..856817e
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/pica8/pica8.py
@@ -0,0 +1,33 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Vendor: Pica8."""
+from compass.hdsdiscovery import base
+
+
+# Vendor_loader will load vendor instance by CLASS_NAME
+CLASS_NAME = 'Pica8'
+
+
+class Pica8(base.BaseSnmpVendor):
+ """Pica8 switch object."""
+
+ def __init__(self):
+ base.BaseSnmpVendor.__init__(self, ['pica8'])
+ self._name = 'pica8'
+
+ @property
+ def name(self):
+ """Get 'name' proptery."""
+ return self._name
diff --git a/compass-tasks/hdsdiscovery/vendors/pica8/plugins/__init__.py b/compass-tasks/hdsdiscovery/vendors/pica8/plugins/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/pica8/plugins/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/hdsdiscovery/vendors/pica8/plugins/mac.py b/compass-tasks/hdsdiscovery/vendors/pica8/plugins/mac.py
new file mode 100644
index 0000000..d5ccfc0
--- /dev/null
+++ b/compass-tasks/hdsdiscovery/vendors/pica8/plugins/mac.py
@@ -0,0 +1,24 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Pica8 Switch Mac module."""
+from compass.hdsdiscovery.base import BaseSnmpMacPlugin
+
+
+CLASS_NAME = 'Mac'
+
+
+class Mac(BaseSnmpMacPlugin):
+ """Process MAC address by Pica8 switch."""
+ pass
diff --git a/compass-tasks/log_analyzor/__init__.py b/compass-tasks/log_analyzor/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/log_analyzor/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/log_analyzor/adapter_matcher.py b/compass-tasks/log_analyzor/adapter_matcher.py
new file mode 100644
index 0000000..8630e01
--- /dev/null
+++ b/compass-tasks/log_analyzor/adapter_matcher.py
@@ -0,0 +1,126 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to provider installing progress calculation for the adapter.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import logging
+import re
+
+
+class AdapterItemMatcher(object):
+ """Progress matcher for the os installing or package installing."""
+
+ def __init__(self, file_matchers):
+ self.file_matchers_ = file_matchers
+
+ def __repr__(self):
+ return '%r[file_matchers: %r]' % (
+ self.__class__.__name__, self.file_matchers_
+ )
+
+ def update_progress(
+ self, file_reader_factory, name, state, log_history_mapping
+ ):
+ """Update progress.
+
+ :param name: the fullname of the installing host.
+ :type name: str
+ :param progress: Progress instance to update.
+ """
+ for file_matcher in self.file_matchers_:
+ filename = file_matcher.filename_
+ if filename not in log_history_mapping:
+ log_history_mapping[filename] = {
+ 'filename': filename,
+ 'partial_line': '',
+ 'position': 0,
+ 'line_matcher_name': 'start',
+ 'percentage': 0.0,
+ 'message': '',
+ 'severity': 'INFO'
+ }
+ log_history = log_history_mapping[filename]
+ file_matcher.update_progress(
+ file_reader_factory, name, state, log_history
+ )
+
+
+class OSMatcher(object):
+ """Progress matcher for os installer."""
+
+ def __init__(
+ self, os_installer_name,
+ os_pattern, item_matcher,
+ file_reader_factory
+ ):
+ self.name_ = re.compile(os_installer_name)
+ self.os_regex_ = re.compile(os_pattern)
+ self.matcher_ = item_matcher
+ self.file_reader_factory_ = file_reader_factory
+
+ def __repr__(self):
+ return '%r[name:%r, os_pattern:%r, matcher:%r]' % (
+ self.__class__.__name__, self.name_.pattern,
+ self.os_regex_.pattern, self.matcher_)
+
+ def match(self, os_installer_name, os_name):
+ """Check if the os matcher is acceptable."""
+ if os_name is None:
+ return False
+ else:
+ return all([
+ self.name_.match(os_installer_name),
+ self.os_regex_.match(os_name)
+ ])
+
+ def update_progress(self, name, state, log_history_mapping):
+ """Update progress."""
+ self.matcher_.update_progress(
+ self.file_reader_factory_, name, state, log_history_mapping)
+
+
+class PackageMatcher(object):
+ """Progress matcher for package installer."""
+
+ def __init__(
+ self, package_installer_name, adapter_pattern,
+ item_matcher, file_reader_factory
+ ):
+ self.name_ = re.compile(package_installer_name)
+ self.adapter_regex_ = re.compile(adapter_pattern)
+ self.matcher_ = item_matcher
+ self.file_reader_factory_ = file_reader_factory
+
+ def __repr__(self):
+ return '%s[name:%s, adapter_pattern:%s, matcher:%s]' % (
+ self.__class__.__name__, self.name_.pattern,
+ self.adapter_regex_.pattern, self.matcher_)
+
+ def match(self, package_installer_name, adapter_name):
+ """Check if the package matcher is acceptable."""
+ if package_installer_name is None:
+ return False
+ else:
+ return all([
+ self.name_.match(package_installer_name),
+ self.adapter_regex_.match(adapter_name)
+ ])
+
+ def update_progress(self, name, state, log_history_mapping):
+ """Update progress."""
+ self.matcher_.update_progress(
+ self.file_reader_factory_, name, state, log_history_mapping
+ )
diff --git a/compass-tasks/log_analyzor/environment.py b/compass-tasks/log_analyzor/environment.py
new file mode 100644
index 0000000..80ff738
--- /dev/null
+++ b/compass-tasks/log_analyzor/environment.py
@@ -0,0 +1,29 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""module to provide environment to load progress calculator configurations.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+from compass.log_analyzor.adapter_matcher import AdapterItemMatcher
+from compass.log_analyzor.file_matcher import FileMatcher
+from compass.log_analyzor.file_matcher import FileReaderFactory
+from compass.log_analyzor.line_matcher import IncrementalProgress
+from compass.log_analyzor.line_matcher import LineMatcher
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+
+ENV_GLOBALS = globals()
+ENV_LOCALS = locals()
diff --git a/compass-tasks/log_analyzor/file_matcher.py b/compass-tasks/log_analyzor/file_matcher.py
new file mode 100644
index 0000000..be3143b
--- /dev/null
+++ b/compass-tasks/log_analyzor/file_matcher.py
@@ -0,0 +1,252 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to update intalling progress by processing log file.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import logging
+import os.path
+
+from compass.utils import setting_wrapper as setting
+
+
+class FileFilter(object):
+ """base class to filter log file."""
+ def __repr__(self):
+ return self.__class__.__name__
+
+ def filter(self, pathname):
+ """Filter log file.
+
+ :param pathname: the absolute path name to the log file.
+ """
+ raise NotImplementedError(str(self))
+
+
+class CompositeFileFilter(FileFilter):
+ """filter log file based on the list of filters."""
+ def __init__(self, filters):
+ self.filters_ = filters
+
+ def __str__(self):
+ return 'CompositeFileFilter[%s]' % self.filters_
+
+ def append_filter(self, file_filter):
+ """append filter."""
+ self.filters_.append(file_filter)
+
+ def filter(self, pathname):
+ """filter log file."""
+ for file_filter in self.filters_:
+ if not file_filter.filter(pathname):
+ return False
+
+ return True
+
+
+class FilterFileExist(FileFilter):
+ """filter log file if not exists."""
+ def filter(self, pathname):
+ """filter log file."""
+ file_exist = os.path.isfile(pathname)
+ if not file_exist:
+ logging.debug("%s is not exist", pathname)
+
+ return file_exist
+
+
+def get_file_filter():
+ """get file filter"""
+ composite_filter = CompositeFileFilter([FilterFileExist()])
+ return composite_filter
+
+
+class FileReader(object):
+ """Class to read log file.
+
+ The class provide support to read log file from the position
+ it has read last time. and update the position when it finish
+ reading the log.
+ """
+ def __init__(self, pathname, log_history):
+ self.pathname_ = pathname
+ self.log_history_ = log_history
+
+ def __repr__(self):
+ return (
+ '%s[pathname:%s, log_history:%s]' % (
+ self.__class__.__name__, self.pathname_,
+ self.log_history_
+ )
+ )
+
+ def readline(self):
+ """Generate each line of the log file."""
+ old_position = self.log_history_['position']
+ position = self.log_history_['position']
+ partial_line = self.log_history_['partial_line']
+ try:
+ with open(self.pathname_) as logfile:
+ logfile.seek(position)
+ while True:
+ line = logfile.readline()
+ partial_line += line
+ position = logfile.tell()
+ if position > self.log_history_['position']:
+ self.log_history_['position'] = position
+
+ if partial_line.endswith('\n'):
+ self.log_history_['partial_line'] = ''
+ yield partial_line
+ partial_line = self.log_history_['partial_line']
+ else:
+ self.log_history_['partial_line'] = partial_line
+ break
+ if partial_line:
+ yield partial_line
+
+ except Exception as error:
+ logging.error('failed to processing file %s', self.pathname_)
+ raise error
+
+ logging.debug(
+ 'processing file %s log %s bytes to position %s',
+ self.pathname_, position - old_position, position
+ )
+
+
+class FileReaderFactory(object):
+ """factory class to create FileReader instance."""
+
+ def __init__(self, logdir):
+ self.logdir_ = logdir
+ self.filefilter_ = get_file_filter()
+
+ def __str__(self):
+ return '%s[logdir: %s filefilter: %s]' % (
+ self.__class__.__name__, self.logdir_, self.filefilter_)
+
+ def get_file_reader(self, hostname, filename, log_history):
+ """Get FileReader instance.
+
+ :param fullname: fullname of installing host.
+ :param filename: the filename of the log file.
+
+ :returns: :class:`FileReader` instance if it is not filtered.
+ """
+ pathname = os.path.join(self.logdir_, hostname, filename)
+ logging.debug('get FileReader from %s', pathname)
+ if not self.filefilter_.filter(pathname):
+ logging.debug('%s is filtered', pathname)
+ return None
+
+ return FileReader(pathname, log_history)
+
+
+class FileMatcher(object):
+ """File matcher to get the installing progress from the log file."""
+ def __init__(self, line_matchers, min_progress, max_progress, filename):
+ if not 0.0 <= min_progress <= max_progress <= 1.0:
+ raise IndexError(
+ '%s restriction is not mat: 0.0 <= min_progress'
+ '(%s) <= max_progress(%s) <= 1.0' % (
+ self.__class__.__name__,
+ min_progress,
+ max_progress))
+ if 'start' not in line_matchers:
+ raise KeyError(
+ 'key `start` does not in line matchers %s' % line_matchers
+ )
+ self.line_matchers_ = line_matchers
+ self.min_progress_ = min_progress
+ self.max_progress_ = max_progress
+ self.progress_diff_ = max_progress - min_progress
+ self.filename_ = filename
+
+ def __repr__(self):
+ return (
+ '%r[filename: %r, progress:[%r:%r], '
+ 'line_matchers: %r]' % (
+ self.__class__.__name__, self.filename_,
+ self.min_progress_,
+ self.max_progress_, self.line_matchers_)
+ )
+
+ def update_progress_from_log_history(self, state, log_history):
+ file_percentage = log_history['percentage']
+ percentage = max(
+ self.min_progress_,
+ min(
+ self.max_progress_,
+ self.min_progress_ + file_percentage * self.progress_diff_
+ )
+ )
+ if (
+ percentage > state['percentage'] or
+ (
+ percentage == state['percentage'] and
+ log_history['message'] != state['message']
+ )
+ ):
+ state['percentage'] = percentage
+ state['message'] = log_history['message']
+ state['severity'] = log_history['severity']
+ else:
+ logging.debug(
+ 'ingore update state %s from log history %s '
+ 'since the updated progress %s lag behind',
+ state, log_history, percentage
+ )
+
+ def update_progress(self, file_reader_factory, name, state, log_history):
+ """update progress from file.
+
+ :param fullname: the fullname of the installing host.
+ :type fullname: str
+ :param total_progress: Progress instance to update.
+
+ the function update installing progress by reading the log file.
+ It contains a list of line matcher, when one log line matches
+ with current line matcher, the installing progress is updated.
+ and the current line matcher got updated.
+ Notes: some line may be processed multi times. The case is the
+ last line of log file is processed in one run, while in the other
+ run, it will be reprocessed at the beginning because there is
+ no line end indicator for the last line of the file.
+ """
+ file_reader = file_reader_factory.get_file_reader(
+ name, self.filename_, log_history)
+ if not file_reader:
+ return
+
+ line_matcher_name = log_history['line_matcher_name']
+ for line in file_reader.readline():
+ if line_matcher_name not in self.line_matchers_:
+ logging.debug('early exit at\n%s\nbecause %s is not in %s',
+ line, line_matcher_name, self.line_matchers_)
+ break
+
+ same_line_matcher_name = line_matcher_name
+ while same_line_matcher_name in self.line_matchers_:
+ line_matcher = self.line_matchers_[same_line_matcher_name]
+ same_line_matcher_name, line_matcher_name = (
+ line_matcher.update_progress(line, log_history)
+ )
+ log_history['line_matcher_name'] = line_matcher_name
+ logging.debug(
+ 'updated log history %s after processing %s',
+ log_history, self
+ )
+ self.update_progress_from_log_history(state, log_history)
diff --git a/compass-tasks/log_analyzor/line_matcher.py b/compass-tasks/log_analyzor/line_matcher.py
new file mode 100644
index 0000000..ada9ed6
--- /dev/null
+++ b/compass-tasks/log_analyzor/line_matcher.py
@@ -0,0 +1,206 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to get the progress when found match with a line of the log."""
+import logging
+import re
+
+from abc import ABCMeta
+
+from compass.utils import util
+
+
+class ProgressCalculator(object):
+ """base class to generate progress."""
+
+ __metaclass__ = ABCMeta
+
+ @classmethod
+ def update_progress(
+ cls, progress_data, message,
+ severity, log_history
+ ):
+ """Update progress with the given progress_data, message and severity.
+
+ :param progress_data: installing progress.
+ :type progress_data: float between 0 to 1.
+ :param message: installing progress message.
+ :param severity: installing message severity.
+ :param progress: :class:`Progress` instance to update
+ """
+ # the progress is only updated when the new progress
+ # is greater than the stored progress or the progress
+ # to update is the same but the message is different.
+ if (
+ progress_data > log_history['percentage'] or (
+ progress_data == log_history['percentage'] and
+ message != log_history['message']
+ )
+ ):
+ log_history['percentage'] = progress_data
+ if message:
+ log_history['message'] = message
+ if severity:
+ log_history['severity'] = severity
+ logging.debug('update progress to %s', log_history)
+ else:
+ logging.debug('ignore update progress %s to %s',
+ progress_data, log_history)
+
+ def update(self, message, severity, log_history):
+ """vritual method to update progress by message and severity.
+
+ :param message: installing message.
+ :param severity: installing severity.
+ """
+ raise NotImplementedError(str(self))
+
+ def __repr__(self):
+ return self.__class__.__name__
+
+
+class IncrementalProgress(ProgressCalculator):
+ """Class to increment the progress."""
+
+ def __init__(self, min_progress,
+ max_progress, incremental_ratio):
+ super(IncrementalProgress, self).__init__()
+ if not 0.0 <= min_progress <= max_progress <= 1.0:
+ raise IndexError(
+ '%s restriction is not mat: 0.0 <= min_progress(%s)'
+ ' <= max_progress(%s) <= 1.0' % (
+ self.__class__.__name__, min_progress, max_progress))
+
+ if not 0.0 <= incremental_ratio <= 1.0:
+ raise IndexError(
+ '%s restriction is not mat: '
+ '0.0 <= incremental_ratio(%s) <= 1.0' % (
+ self.__class__.__name__, incremental_ratio))
+
+ self.min_progress_ = min_progress
+ self.max_progress_ = max_progress
+ self.incremental_progress_ = (
+ incremental_ratio * (max_progress - min_progress))
+
+ def __str__(self):
+ return '%s[%s:%s:%s]' % (
+ self.__class__.__name__,
+ self.min_progress_,
+ self.max_progress_,
+ self.incremental_progress_
+ )
+
+ def update(self, message, severity, log_history):
+ """update progress from message and severity."""
+ progress_data = max(
+ self.min_progress_,
+ min(
+ self.max_progress_,
+ log_history['percentage'] + self.incremental_progress_
+ )
+ )
+ self.update_progress(progress_data,
+ message, severity, log_history)
+
+
+class RelativeProgress(ProgressCalculator):
+ """class to update progress to the given relative progress."""
+
+ def __init__(self, progress):
+ super(RelativeProgress, self).__init__()
+ if not 0.0 <= progress <= 1.0:
+ raise IndexError(
+ '%s restriction is not mat: 0.0 <= progress(%s) <= 1.0' % (
+ self.__class__.__name__, progress))
+
+ self.progress_ = progress
+
+ def __str__(self):
+ return '%s[%s]' % (self.__class__.__name__, self.progress_)
+
+ def update(self, message, severity, log_history):
+ """update progress from message and severity."""
+ self.update_progress(
+ self.progress_, message, severity, log_history)
+
+
+class SameProgress(ProgressCalculator):
+ """class to update message and severity for progress."""
+
+ def update(self, message, severity, log_history):
+ """update progress from the message and severity."""
+ self.update_progress(log_history['percentage'], message,
+ severity, log_history)
+
+
+class LineMatcher(object):
+ """Progress matcher for each line."""
+
+ def __init__(self, pattern, progress=None,
+ message_template='', severity=None,
+ unmatch_sameline_next_matcher_name='',
+ unmatch_nextline_next_matcher_name='',
+ match_sameline_next_matcher_name='',
+ match_nextline_next_matcher_name=''):
+ self.regex_ = re.compile(pattern)
+ if not progress:
+ self.progress_ = SameProgress()
+ elif isinstance(progress, ProgressCalculator):
+ self.progress_ = progress
+ elif isinstance(progress, (int, long, float)):
+ self.progress_ = RelativeProgress(progress)
+ else:
+ raise TypeError(
+ 'progress unsupport type %s: %s' % (
+ type(progress), progress))
+
+ self.message_template_ = message_template
+ self.severity_ = severity
+ self.unmatch_sameline_ = unmatch_sameline_next_matcher_name
+ self.unmatch_nextline_ = unmatch_nextline_next_matcher_name
+ self.match_sameline_ = match_sameline_next_matcher_name
+ self.match_nextline_ = match_nextline_next_matcher_name
+
+ def __repr__(self):
+ return '%r[pattern:%r, message_template:%r, severity:%r]' % (
+ self.__class__.__name__, self.regex_.pattern,
+ self.message_template_, self.severity_)
+
+ def update_progress(self, line, log_history):
+ """Update progress by the line.
+
+ :param line: one line in log file to indicate the installing progress.
+ .. note::
+ The line may be partial if the latest line of the log file is
+ not the whole line. But the whole line may be resent
+ in the next run.
+ :param progress: the :class:`Progress` instance to update.
+ """
+ mat = self.regex_.search(line)
+ if not mat:
+ return (
+ self.unmatch_sameline_,
+ self.unmatch_nextline_)
+
+ try:
+ message = self.message_template_ % mat.groupdict()
+ except Exception as error:
+ logging.error('failed to get message %s %% %s in line matcher %s',
+ self.message_template_, mat.groupdict(), self)
+ raise error
+
+ self.progress_.update(message, self.severity_, log_history)
+ return (
+ self.match_sameline_,
+ self.match_nextline_)
diff --git a/compass-tasks/log_analyzor/progress_calculator.py b/compass-tasks/log_analyzor/progress_calculator.py
new file mode 100644
index 0000000..b0f35f2
--- /dev/null
+++ b/compass-tasks/log_analyzor/progress_calculator.py
@@ -0,0 +1,208 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""module to provide updating installing process function.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import logging
+
+from compass.log_analyzor.adapter_matcher import OSMatcher
+from compass.log_analyzor.adapter_matcher import PackageMatcher
+from compass.log_analyzor.environment import ENV_GLOBALS
+from compass.log_analyzor.environment import ENV_LOCALS
+from compass.log_analyzor.file_matcher import FileReaderFactory
+
+from compass.utils import setting_wrapper as setting
+from compass.utils import util
+
+OS_ADAPTER_CONFIGURATIONS = None
+PACKAGE_ADAPTER_CONFIGURATIONS = None
+PROGRESS_CALCULATOR_CONFIGURATIONS = None
+
+
+def _load_calculator_configurations(force=False):
+ global PROGRESS_CALCULATOR_CONFIGURATIONS
+ if force or PROGRESS_CALCULATOR_CONFIGURATIONS is None:
+ env_locals = {}
+ env_locals.update(ENV_GLOBALS)
+ env_locals.update(ENV_LOCALS)
+ PROGRESS_CALCULATOR_CONFIGURATIONS = util.load_configs(
+ setting.PROGRESS_CALCULATOR_DIR,
+ env_locals=env_locals
+ )
+ if not PROGRESS_CALCULATOR_CONFIGURATIONS:
+ logging.info('No configuration found for progress calculator.')
+
+ global OS_ADAPTER_CONFIGURATIONS
+ if force or OS_ADAPTER_CONFIGURATIONS is None:
+ OS_ADAPTER_CONFIGURATIONS = []
+ for progress_calculator_configuration in (
+ PROGRESS_CALCULATOR_CONFIGURATIONS
+ ):
+ if 'OS_LOG_CONFIGURATIONS' in (
+ progress_calculator_configuration
+ ):
+ os_installer_configurations = (
+ progress_calculator_configuration['OS_LOG_CONFIGURATIONS']
+ )
+ for os_installer_configuration in os_installer_configurations:
+ OS_ADAPTER_CONFIGURATIONS.append(OSMatcher(
+ os_installer_name=(
+ os_installer_configuration['os_installer_name']
+ ),
+ os_pattern=os_installer_configuration['os_pattern'],
+ item_matcher=(
+ os_installer_configuration['item_matcher']
+ ),
+ file_reader_factory=FileReaderFactory(
+ os_installer_configuration['logdir']
+ )
+ ))
+ if not OS_ADAPTER_CONFIGURATIONS:
+ logging.info(
+ 'no OS_LOG_CONFIGURATIONS section found '
+ 'in progress calculator.'
+ )
+ else:
+ logging.debug(
+ 'OS_ADAPTER_CONFIGURATIONS is\n%s',
+ OS_ADAPTER_CONFIGURATIONS
+ )
+
+ global PACKAGE_ADAPTER_CONFIGURATIONS
+ if force or PACKAGE_ADAPTER_CONFIGURATIONS is None:
+ PACKAGE_ADAPTER_CONFIGURATIONS = []
+ for progress_calculator_configuration in (
+ PROGRESS_CALCULATOR_CONFIGURATIONS
+ ):
+ if 'ADAPTER_LOG_CONFIGURATIONS' in (
+ progress_calculator_configuration
+ ):
+ package_installer_configurations = (
+ progress_calculator_configuration[
+ 'ADAPTER_LOG_CONFIGURATIONS'
+ ]
+ )
+ for package_installer_configuration in (
+ package_installer_configurations
+ ):
+ PACKAGE_ADAPTER_CONFIGURATIONS.append(PackageMatcher(
+ package_installer_name=(
+ package_installer_configuration[
+ 'package_installer_name'
+ ]
+ ),
+ adapter_pattern=(
+ package_installer_configuration['adapter_pattern']
+ ),
+ item_matcher=(
+ package_installer_configuration['item_matcher']
+ ),
+ file_reader_factory=FileReaderFactory(
+ package_installer_configuration['logdir']
+ )
+ ))
+ if not PACKAGE_ADAPTER_CONFIGURATIONS:
+ logging.info(
+ 'no PACKAGE_LOG_CONFIGURATIONS section found '
+ 'in progress calculator.'
+ )
+ else:
+ logging.debug(
+ 'PACKAGE_ADAPTER_CONFIGURATIONS is\n%s',
+ PACKAGE_ADAPTER_CONFIGURATIONS
+ )
+
+
+def load_calculator_configurations(force_reload=False):
+ _load_calculator_configurations(force=force_reload)
+
+
+def _get_os_matcher(os_installer_name, os_name):
+ """Get OS adapter matcher by os name and installer name."""
+ _load_calculator_configurations()
+ for configuration in OS_ADAPTER_CONFIGURATIONS:
+ if configuration.match(os_installer_name, os_name):
+ return configuration
+ else:
+ logging.debug('configuration %s does not match %s and %s',
+ configuration, os_name, os_installer_name)
+ logging.error('No configuration found for os installer %s os %s',
+ os_installer_name, os_name)
+ return None
+
+
+def _get_package_matcher(
+ package_installer_name, adapter_name
+):
+ """Get package adapter matcher by adapter name and installer name."""
+ _load_calculator_configurations()
+ for configuration in PACKAGE_ADAPTER_CONFIGURATIONS:
+ if configuration.match(
+ package_installer_name,
+ adapter_name
+ ):
+ return configuration
+ else:
+ logging.debug('configuration %s does not match %s and %s',
+ configuration, adapter_name,
+ package_installer_name)
+ logging.error('No configuration found for package installer %s adapter %s',
+ package_installer_name, adapter_name)
+ return None
+
+
+def update_host_progress(host_mappping):
+ for host_id, (host, host_state, host_log_history_mapping) in (
+ host_mappping.items()
+ ):
+ os_name = host['os_name']
+ os_installer_name = host['os_installer']['name']
+ os_matcher = _get_os_matcher(
+ os_installer_name, os_name
+ )
+ if not os_matcher:
+ continue
+ name = host[setting.HOST_INSTALLATION_LOGDIR_NAME]
+ os_matcher.update_progress(
+ name, host_state, host_log_history_mapping
+ )
+
+
+def update_clusterhost_progress(clusterhost_mapping):
+ for (
+ clusterhost_id,
+ (clusterhost, clusterhost_state, clusterhost_log_history_mapping)
+ ) in (
+ clusterhost_mapping.items()
+ ):
+ adapter_name = clusterhost['adapter_name']
+ package_installer_name = clusterhost['package_installer']['name']
+ package_matcher = _get_package_matcher(
+ package_installer_name,
+ adapter_name
+ )
+ if not package_matcher:
+ continue
+ name = clusterhost[setting.CLUSTERHOST_INATALLATION_LOGDIR_NAME]
+ package_matcher.update_progress(
+ name, clusterhost_state,
+ clusterhost_log_history_mapping
+ )
+
+
+def update_cluster_progress(cluster_mapping):
+ for cluster_id, (cluster, cluster_state) in cluster_mapping.items():
+ pass
diff --git a/compass-tasks/misc/Dockerfile b/compass-tasks/misc/Dockerfile
new file mode 100644
index 0000000..ed47aea
--- /dev/null
+++ b/compass-tasks/misc/Dockerfile
@@ -0,0 +1,53 @@
+from centos:latest
+
+# repos
+COPY misc/compass_install.repo /etc/yum.repos.d/compass_install.repo
+
+RUN rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \
+ sed -i 's/^mirrorlist=https/mirrorlist=http/g' /etc/yum.repos.d/epel.repo && \
+ yum update -y
+
+# packages
+RUN yum --enablerepo=compass_install --nogpgcheck install -y python python-devel git amqp python-pip libffi-devel openssl-devel gcc python-setuptools MySQL-python supervisor redis sshpass python-keyczar vim ansible-2.2.1.0
+
+# code
+RUN mkdir -p /root/compass-tasks
+COPY . /root/compass-tasks
+RUN mkdir -p /root/compass-tasks/compass && \
+ touch /root/compass-tasks/compass/__init__.py
+RUN mv /root/compass-tasks/actions /root/compass-tasks/compass/ && \
+ mv /root/compass-tasks/apiclient /root/compass-tasks/compass/ && \
+ mv /root/compass-tasks/tasks /root/compass-tasks/compass/ && \
+ mv /root/compass-tasks/utils /root/compass-tasks/compass/ && \
+ mv /root/compass-tasks/deployment /root/compass-tasks/compass/ && \
+ mv /root/compass-tasks/db /root/compass-tasks/compass/ && \
+ mv /root/compass-tasks/hdsdiscovery /root/compass-tasks/compass/ && \
+ mv /root/compass-tasks/log_analyzor /root/compass-tasks/compass/
+
+# pip
+RUN easy_install --upgrade pip && \
+ pip install --upgrade pip && \
+ pip install --upgrade setuptools && \
+ pip install --upgrade Flask
+
+# conf
+RUN mkdir -p /etc/compass/ && \
+ mkdir -p /etc/compass/machine_list && \
+ mkdir -p /etc/compass/switch_list && \
+ #cp -rf /root/compass-tasks/conf/* /etc/compass/ && \
+ cd /root/compass-tasks && \
+ python setup.py install
+
+RUN mkdir -p /root/.ssh; \
+ echo "UserKnownHostsFile /dev/null" >> /root/.ssh/config; \
+ echo "StrictHostKeyChecking no" >> /root/.ssh/config
+
+COPY supervisord.conf /etc/supervisord.conf
+COPY start.sh /usr/local/bin/start.sh
+RUN mkdir -p /var/log/compass
+RUN mkdir -p /opt/ansible_callbacks
+#RUN git clone https://github.com/openstack-ansible/openstack-ansible-modules /opt/openstack-ansible-modules
+EXPOSE 6379
+VOLUME ["/var/ansible", "/etc/compass/machine_list", "/etc/compass/switch_list"]
+ENTRYPOINT ["/bin/bash", "-c"]
+CMD ["/usr/local/bin/start.sh"]
diff --git a/compass-tasks/misc/compass_install.repo b/compass-tasks/misc/compass_install.repo
new file mode 100644
index 0000000..6b97ed0
--- /dev/null
+++ b/compass-tasks/misc/compass_install.repo
@@ -0,0 +1,5 @@
+[compass_install]
+name=compass_repo
+baseurl=http://192.168.104.2:9999/download/compass_install/centos7/
+gpgcheck=0
+enabled=1
diff --git a/compass-tasks/requirements.txt b/compass-tasks/requirements.txt
new file mode 100644
index 0000000..7e7ba40
--- /dev/null
+++ b/compass-tasks/requirements.txt
@@ -0,0 +1,23 @@
+amqplib
+argparse
+celery
+Markdown<2.5
+Cheetah<=2.4.1
+Flask
+Flask-Login<=0.3.2
+Flask-RESTful
+Flask-Script
+Flask-SQLAlchemy
+Flask-WTF
+itsdangerous
+importlib
+lazypy
+lockfile
+netaddr
+# MySQL-python
+paramiko
+SQLAlchemy>=0.9.0
+simplejson
+requests
+redis
+ansible==2.2.0
diff --git a/compass-tasks/setup.py b/compass-tasks/setup.py
new file mode 100644
index 0000000..5ee2e12
--- /dev/null
+++ b/compass-tasks/setup.py
@@ -0,0 +1,97 @@
+#!/usr/bin/python
+#
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""setup script."""
+try:
+ from setuptools import find_packages
+except ImportError:
+ from ez_setup import use_setuptools
+ use_setuptools()
+
+
+from setuptools.command.test import test as TestCommand
+from setuptools import setup
+
+
+import os
+import sys
+
+
+# This helps python setup.py test command to utilize tox
+# See the instruction at https://testrun.org/tox/latest/example/basic.html\
+# #integration-with-setuptools-distribute-test-commands
+
+class Tox(TestCommand):
+ """Tox to do the setup."""
+
+ def finalize_options(self):
+ TestCommand.finalize_options(self)
+ self.test_args = []
+ self.test_suite = True
+
+ def run_tests(self):
+ import tox
+ errno = tox.cmdline(self.test_args)
+ sys.exit(errno)
+
+
+INSTALL_REQUIRES_FILE = os.path.join(
+ os.path.dirname(__file__), 'requirements.txt')
+with open(INSTALL_REQUIRES_FILE, 'r') as requires_file:
+ REQUIREMENTS = [line.strip() for line in requires_file if line != '\n']
+
+DATA_FILES_DIR = os.path.join(
+ os.path.dirname(__file__), 'conf')
+DATA_FILES = []
+for parent_dir, sub_dirs, files in os.walk(DATA_FILES_DIR):
+ if files == []:
+ pass
+ for file in files:
+ DATA_FILES.append((parent_dir, [os.path.join(parent_dir, file)]))
+
+setup(
+ name='compass-tasks',
+ version='0.1.0',
+
+ # general info
+ description="""compass-tasks: Compass tasks module""",
+ author='Compass Development Group',
+ author_email='dev@syscompass.org',
+ url='https://github.com/openstack/compass-core',
+ download_url='',
+
+ # dependency
+ install_requires=REQUIREMENTS,
+ packages=find_packages(exclude=['compass.tests']),
+ include_package_data=True,
+ classifiers=[
+ 'Development Status :: 4 - Beta',
+ 'Environment :: Console',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: Information Technology',
+ 'Intended Audience :: System Administrators',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: POSIX :: Linux',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ ],
+ # data
+ # data_files=DATA_FILES,
+ # test,
+ tests_require=['tox'],
+ cmdclass={'test': Tox},
+)
diff --git a/compass-tasks/start.sh b/compass-tasks/start.sh
new file mode 100755
index 0000000..4d3956d
--- /dev/null
+++ b/compass-tasks/start.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+/sbin/init
+/usr/bin/supervisord
+tail -f /dev/null
diff --git a/compass-tasks/supervisord.conf b/compass-tasks/supervisord.conf
new file mode 100644
index 0000000..9d993d3
--- /dev/null
+++ b/compass-tasks/supervisord.conf
@@ -0,0 +1,135 @@
+; Sample supervisor config file.
+
+[unix_http_server]
+file=/var/run/supervisor/supervisor.sock ; (the path to the socket file)
+;chmod=0700 ; sockef file mode (default 0700)
+;chown=nobody:nogroup ; socket file uid:gid owner
+;username=user ; (default is no username (open server))
+;password=123 ; (default is no password (open server))
+
+;[inet_http_server] ; inet (TCP) server disabled by default
+;port=127.0.0.1:9001 ; (ip_address:port specifier, *:port for all iface)
+;username=user ; (default is no username (open server))
+;password=123 ; (default is no password (open server))
+
+[supervisord]
+logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log)
+logfile_maxbytes=50MB ; (max main logfile bytes b4 rotation;default 50MB)
+logfile_backups=10 ; (num of main logfile rotation backups;default 10)
+loglevel=info ; (log level;default info; others: debug,warn,trace)
+pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
+nodaemon=false ; (start in foreground if true;default false)
+minfds=1024 ; (min. avail startup file descriptors;default 1024)
+minprocs=200 ; (min. avail process descriptors;default 200)
+;umask=022 ; (process file creation umask;default 022)
+;user=chrism ; (default is current user, required if root)
+;identifier=supervisor ; (supervisord identifier, default is 'supervisor')
+;directory=/tmp ; (default is not to cd during start)
+;nocleanup=true ; (don't clean up tempfiles at start;default false)
+;childlogdir=/tmp ; ('AUTO' child log dir, default $TEMP)
+;environment=KEY=value ; (key value pairs to add to environment)
+;strip_ansi=false ; (strip ansi escape codes in logs; def. false)
+
+; the below section must remain in the config file for RPC
+; (supervisorctl/web interface) to work, additional interfaces may be
+; added by defining them in separate rpcinterface: sections
+[rpcinterface:supervisor]
+supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
+
+[supervisorctl]
+serverurl=unix:///var/run/supervisor/supervisor.sock ; use a unix:// URL for a unix socket
+;serverurl=http://127.0.0.1:9001 ; use an http:// url to specify an inet socket
+;username=chris ; should be same as http_username if set
+;password=123 ; should be same as http_password if set
+;prompt=mysupervisor ; cmd line prompt (default "supervisor")
+;history_file=~/.sc_history ; use readline history if available
+
+; The below sample program section shows all possible program subsection values,
+; create one or more 'real' program: sections to be able to control them under
+; supervisor.
+
+;[program:theprogramname]
+;command=/bin/cat ; the program (relative uses PATH, can take args)
+;process_name=%(program_name)s ; process_name expr (default %(program_name)s)
+;numprocs=1 ; number of processes copies to start (def 1)
+;directory=/tmp ; directory to cwd to before exec (def no cwd)
+;umask=022 ; umask for process (default None)
+;priority=999 ; the relative start priority (default 999)
+;autostart=true ; start at supervisord start (default: true)
+;autorestart=true ; retstart at unexpected quit (default: true)
+;startsecs=10 ; number of secs prog must stay running (def. 1)
+;startretries=3 ; max # of serial start failures (default 3)
+;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
+;stopsignal=QUIT ; signal used to kill process (default TERM)
+;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
+;user=chrism ; setuid to this UNIX account to run the program
+;redirect_stderr=true ; redirect proc stderr to stdout (default false)
+;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO
+;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
+;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)
+;stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
+;stdout_events_enabled=false ; emit events on stdout writes (default false)
+;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO
+;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
+;stderr_logfile_backups=10 ; # of stderr logfile backups (default 10)
+;stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
+;stderr_events_enabled=false ; emit events on stderr writes (default false)
+;environment=A=1,B=2 ; process environment additions (def no adds)
+;serverurl=AUTO ; override serverurl computation (childutils)
+
+; The below sample eventlistener section shows all possible
+; eventlistener subsection values, create one or more 'real'
+; eventlistener: sections to be able to handle event notifications
+; sent by supervisor.
+
+;[eventlistener:theeventlistenername]
+;command=/bin/eventlistener ; the program (relative uses PATH, can take args)
+;process_name=%(program_name)s ; process_name expr (default %(program_name)s)
+;numprocs=1 ; number of processes copies to start (def 1)
+;events=EVENT ; event notif. types to subscribe to (req'd)
+;buffer_size=10 ; event buffer queue size (default 10)
+;directory=/tmp ; directory to cwd to before exec (def no cwd)
+;umask=022 ; umask for process (default None)
+;priority=-1 ; the relative start priority (default -1)
+;autostart=true ; start at supervisord start (default: true)
+;autorestart=unexpected ; restart at unexpected quit (default: unexpected)
+;startsecs=10 ; number of secs prog must stay running (def. 1)
+;startretries=3 ; max # of serial start failures (default 3)
+;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
+;stopsignal=QUIT ; signal used to kill process (default TERM)
+;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
+;user=chrism ; setuid to this UNIX account to run the program
+;redirect_stderr=true ; redirect proc stderr to stdout (default false)
+;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO
+;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
+;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)
+;stdout_events_enabled=false ; emit events on stdout writes (default false)
+;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO
+;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
+;stderr_logfile_backups ; # of stderr logfile backups (default 10)
+;stderr_events_enabled=false ; emit events on stderr writes (default false)
+;environment=A=1,B=2 ; process environment additions
+;serverurl=AUTO ; override serverurl computation (childutils)
+
+; The below sample group section shows all possible group values,
+; create one or more 'real' group: sections to create "heterogeneous"
+; process groups.
+
+;[group:thegroupname]
+;programs=progname1,progname2 ; each refers to 'x' in [program:x] definitions
+;priority=999 ; the relative start priority (default 999)
+
+; The [include] section can just contain the "files" setting. This
+; setting can list multiple files (separated by whitespace or
+; newlines). It can also contain wildcards. The filenames are
+; interpreted as relative to this file. Included files *cannot*
+; include files themselves.
+
+[program:celeryd]
+command=nohup bash -c "C_FORCE_ROOT=1 CELERY_CONFIG_MODULE=compass.utils.celeryconfig_wrapper /usr/bin/celery worker &> /tmp/celery-worker.log" &
+
+[program:orca-heartbeatd]
+command=nohup bash -c "/usr/bin/redis-server" &
+[include]
+files = supervisord.d/*.ini
+
diff --git a/compass-tasks/tasks/__init__.py b/compass-tasks/tasks/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/tasks/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/tasks/client.py b/compass-tasks/tasks/client.py
new file mode 100644
index 0000000..ca7ad14
--- /dev/null
+++ b/compass-tasks/tasks/client.py
@@ -0,0 +1,33 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to setup celery client.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+
+ .. note::
+ If CELERY_CONFIG_MODULE is set in environment, load celery config from
+ the filename declared in CELERY_CONFIG_MODULE.
+"""
+import os
+
+from celery import Celery
+
+
+celery = Celery(__name__)
+if 'CELERY_CONFIG_MODULE' in os.environ:
+ celery.config_from_envvar('CELERY_CONFIG_MODULE')
+else:
+ from compass.utils import celeryconfig_wrapper as celeryconfig
+ celery.config_from_object(celeryconfig)
diff --git a/compass-tasks/tasks/tasks.py b/compass-tasks/tasks/tasks.py
new file mode 100644
index 0000000..f649afd
--- /dev/null
+++ b/compass-tasks/tasks/tasks.py
@@ -0,0 +1,326 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to define celery tasks.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import logging
+
+from celery.signals import celeryd_init
+from celery.signals import setup_logging
+
+from compass.actions import clean
+from compass.actions import delete
+from compass.actions import deploy
+from compass.actions import install_callback
+from compass.actions import patch
+from compass.actions import poll_switch
+from compass.actions import update_progress
+from compass.db.api import adapter_holder as adapter_api
+from compass.db.api import database
+from compass.db.api import metadata_holder as metadata_api
+from compass.log_analyzor import progress_calculator
+
+from compass.tasks.client import celery
+from compass.utils import flags
+from compass.utils import logsetting
+from compass.utils import setting_wrapper as setting
+
+
+@celeryd_init.connect()
+def global_celery_init(**_):
+ """Initialization code."""
+ flags.init()
+ flags.OPTIONS.logfile = setting.CELERY_LOGFILE
+ logsetting.init()
+ database.init()
+ adapter_api.load_adapters()
+ metadata_api.load_metadatas()
+ adapter_api.load_flavors()
+ progress_calculator.load_calculator_configurations()
+
+
+@setup_logging.connect()
+def tasks_setup_logging(**_):
+ """Setup logging options from compass setting."""
+ flags.init()
+ flags.OPTIONS.logfile = setting.CELERY_LOGFILE
+ logsetting.init()
+
+
+@celery.task(name='compass.tasks.pollswitch')
+def pollswitch(
+ poller_email, ip_addr, credentials,
+ req_obj='mac', oper='SCAN'
+):
+ """Query switch and return expected result.
+
+ :param ip_addr: switch ip address.
+ :type ip_addr: str
+ :param credentials: switch credentials
+ :type credentials: dict
+ :param reqObj: the object requested to query from switch.
+ :type reqObj: str
+ :param oper: the operation to query the switch (SCAN, GET, SET).
+ :type oper: str
+ """
+ try:
+ poll_switch.poll_switch(
+ poller_email, ip_addr, credentials,
+ req_obj=req_obj, oper=oper
+ )
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.cluster_health')
+def health_check(cluster_id, send_report_url, useremail):
+ """Verify the deployed cluster functionally works.
+
+ :param cluster_id: ID of the cluster
+ :param send_report_url: The URL which reports should send back
+ """
+ try:
+ deploy.health_check(cluster_id, send_report_url, useremail)
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.deploy_cluster')
+def deploy_cluster(deployer_email, cluster_id, clusterhost_ids):
+ """Deploy the given cluster.
+
+ :param cluster_id: id of the cluster
+ :type cluster_id: int
+ :param clusterhost_ids: the id of the hosts in the cluster
+ :type clusterhost_ids: list of int
+ """
+ try:
+ deploy.deploy(cluster_id, clusterhost_ids, deployer_email)
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.redeploy_cluster')
+def redeploy_cluster(deployer_email, cluster_id):
+ """Redeploy the given cluster.
+
+ :param cluster_id: id of the cluster
+ :type cluster_id: int
+ """
+ try:
+ deploy.redeploy(cluster_id, deployer_email)
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.patch_cluster')
+def patch_cluster(patcher_email, cluster_id):
+ """Patch the existing cluster.
+
+ :param cluster_id: id of the cluster
+ :type cluster_id: int
+ """
+ try:
+ patch.patch(cluster_id, patcher_email)
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.reinstall_cluster')
+def reinstall_cluster(installer_email, cluster_id, clusterhost_ids):
+ """reinstall the given cluster.
+
+ :param cluster_id: id of the cluster
+ :type cluster_id: int
+ :param clusterhost_ids: the id of the hosts in the cluster
+ :type clusterhost_ids: list of int
+ """
+ try:
+ deploy.redeploy(cluster_id, clusterhost_ids, installer_email)
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.delete_cluster')
+def delete_cluster(
+ deleter_email, cluster_id, clusterhost_ids,
+ delete_underlying_host=False
+):
+ """Delete the given cluster.
+
+ :param cluster_id: id of the cluster
+ :type cluster_id: int
+ :param clusterhost_ids: the id of the hosts in the cluster
+ :type clusterhost_ids: list of int
+ """
+ try:
+ delete.delete_cluster(
+ cluster_id, clusterhost_ids, deleter_email,
+ delete_underlying_host=delete_underlying_host
+ )
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.delete_cluster_host')
+def delete_cluster_host(
+ deleter_email, cluster_id, host_id,
+ delete_underlying_host=False
+):
+ """Delte the given cluster host.
+
+ :param cluster_id: id of the cluster
+ :type cluster_id: int
+ :param host_id: id of the host
+ :type host_id: int
+ """
+ try:
+ delete.delete_cluster_host(
+ cluster_id, host_id, deleter_email,
+ delete_underlying_host=delete_underlying_host
+ )
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.delete_host')
+def delete_host(deleter_email, host_id, cluster_ids):
+ """Delete the given host.
+
+ :param host_id: id of the host
+ :type host_id: int
+ :param cluster_ids: list of cluster id
+ :type cluster_ids: list of int
+ """
+ try:
+ delete.delete_host(
+ host_id, cluster_ids, deleter_email
+ )
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.clean_os_installer')
+def clean_os_installer(
+ os_installer_name, os_installer_settings
+):
+ """Clean os installer."""
+ try:
+ clean.clean_os_installer(
+ os_installer_name, os_installer_settings
+ )
+ except Exception as error:
+ logging.excception(error)
+
+
+@celery.task(name='compass.tasks.clean_package_installer')
+def clean_package_installer(
+ package_installer_name, package_installer_settings
+):
+ """Clean package installer."""
+ try:
+ clean.clean_package_installer(
+ package_installer_name, package_installer_settings
+ )
+ except Exception as error:
+ logging.excception(error)
+
+
+@celery.task(name='compass.tasks.poweron_host')
+def poweron_host(host_id):
+ """Deploy the given cluster."""
+ pass
+
+
+@celery.task(name='compass.tasks.poweroff_host')
+def poweroff_host(host_id):
+ """Deploy the given cluster."""
+ pass
+
+
+@celery.task(name='compass.tasks.reset_host')
+def reset_host(host_id):
+ """Deploy the given cluster."""
+ pass
+
+
+@celery.task(name='compass.tasks.poweron_machine')
+def poweron_machine(machine_id):
+ """Deploy the given cluster."""
+ pass
+
+
+@celery.task(name='compass.tasks.poweroff_machine')
+def poweroff_machine(machine_id):
+ """Deploy the given cluster."""
+ pass
+
+
+@celery.task(name='compass.tasks.reset_machine')
+def reset_machine(machine_id):
+ """Deploy the given cluster."""
+ pass
+
+
+@celery.task(name='compass.tasks.os_installed')
+def os_installed(
+ host_id, clusterhosts_ready,
+ clusters_os_ready
+):
+ """callback when os is installed."""
+ try:
+ install_callback.os_installed(
+ host_id, clusterhosts_ready,
+ clusters_os_ready
+ )
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.package_installed')
+def package_installed(
+ cluster_id, host_id, cluster_ready, host_ready
+):
+ """callback when package is installed."""
+ try:
+ install_callback.package_installed(
+ cluster_id, host_id, cluster_ready, host_ready
+ )
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.cluster_installed')
+def cluster_installed(
+ cluster_id, clusterhosts_ready
+):
+ """callback when package is installed."""
+ try:
+ install_callback.cluster_installed(
+ cluster_id, clusterhosts_ready
+ )
+ except Exception as error:
+ logging.exception(error)
+
+
+@celery.task(name='compass.tasks.update_progress')
+def update_clusters_progress():
+ """Calculate the installing progress of the given cluster."""
+ logging.info('update_clusters_progress')
+ try:
+ update_progress.update_progress()
+ except Exception as error:
+ logging.exception(error)
diff --git a/compass-tasks/utils/__init__.py b/compass-tasks/utils/__init__.py
new file mode 100644
index 0000000..4ee55a4
--- /dev/null
+++ b/compass-tasks/utils/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/compass-tasks/utils/celeryconfig_wrapper.py b/compass-tasks/utils/celeryconfig_wrapper.py
new file mode 100644
index 0000000..b6644ba
--- /dev/null
+++ b/compass-tasks/utils/celeryconfig_wrapper.py
@@ -0,0 +1,44 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""celeryconfig wrapper.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import logging
+import os.path
+import urllib
+
+from compass.utils import setting_wrapper as setting
+
+
+# CELERY_RESULT_BACKEND = 'amqp://'
+
+# BROKER_URL = 'amqp://guest:guest@localhost:5672//'
+
+
+CELERY_IMPORTS = ('compass.tasks.tasks',)
+
+
+if setting.CELERYCONFIG_FILE:
+ CELERY_CONFIG = os.path.join(
+ str(setting.CELERYCONFIG_DIR),
+ str(setting.CELERYCONFIG_FILE))
+
+ try:
+ logging.info('load celery config from %s', CELERY_CONFIG)
+ execfile(CELERY_CONFIG, globals(), locals())
+ except Exception as error:
+ logging.exception(error)
+ raise error
diff --git a/compass-tasks/utils/flags.py b/compass-tasks/utils/flags.py
new file mode 100644
index 0000000..a3169f5
--- /dev/null
+++ b/compass-tasks/utils/flags.py
@@ -0,0 +1,91 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to load flags.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+import sys
+
+from optparse import OptionParser
+
+
+class Flags(object):
+ """Class to store flags."""
+
+ PARSER = OptionParser()
+ PARSED_OPTIONS = None
+
+ @classmethod
+ def parse_args(cls):
+ """parse args."""
+ (options, argv) = Flags.PARSER.parse_args()
+ sys.argv = [sys.argv[0]] + argv
+ Flags.PARSED_OPTIONS = options
+
+ def __getattr__(self, name):
+ if Flags.PARSED_OPTIONS and hasattr(Flags.PARSED_OPTIONS, name):
+ return getattr(Flags.PARSED_OPTIONS, name)
+
+ for option in Flags.PARSER.option_list:
+ if option.dest == name:
+ return option.default
+
+ raise AttributeError('Option instance has no attribute %s' % name)
+
+ def __setattr__(self, name, value):
+ if Flags.PARSED_OPTIONS and hasattr(Flags.PARSED_OPTIONS, name):
+ setattr(Flags.PARSED_OPTIONS, name, value)
+ return
+
+ for option in Flags.PARSER.option_list:
+ if option.dest == name:
+ option.default = value
+ return
+
+ object.__setattr__(self, name, value)
+
+
+OPTIONS = Flags()
+
+
+def init():
+ """Init flag parsing."""
+ OPTIONS.parse_args()
+
+
+def add(flagname, **kwargs):
+ """Add a flag name and its setting.
+
+ :param flagname: flag name declared in cmd as --<flagname>=...
+ :type flagname: str
+ """
+ Flags.PARSER.add_option('--%s' % flagname,
+ dest=flagname, **kwargs)
+
+
+def add_bool(flagname, default=True, **kwargs):
+ """Add a bool flag name and its setting.
+
+ :param flagname: flag name declared in cmd as --[no]<flagname>.
+ :type flagname: str
+ :param default: default value
+ :type default: bool
+ """
+ Flags.PARSER.add_option('--%s' % flagname,
+ dest=flagname, default=default,
+ action="store_true", **kwargs)
+ Flags.PARSER.add_option('--no%s' % flagname,
+ dest=flagname,
+ action="store_false", **kwargs)
diff --git a/compass-tasks/utils/logsetting.py b/compass-tasks/utils/logsetting.py
new file mode 100644
index 0000000..836ebcb
--- /dev/null
+++ b/compass-tasks/utils/logsetting.py
@@ -0,0 +1,108 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to setup logging configuration.
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+
+import logging
+import logging.handlers
+import os
+import os.path
+import sys
+
+from compass.utils import flags
+from compass.utils import setting_wrapper as setting
+
+
+flags.add('loglevel',
+ help='logging level', default=setting.DEFAULT_LOGLEVEL)
+flags.add('logdir',
+ help='logging directory', default=setting.DEFAULT_LOGDIR)
+flags.add('logfile',
+ help='logging filename', default=None)
+flags.add('log_interval', type='int',
+ help='log interval', default=setting.DEFAULT_LOGINTERVAL)
+flags.add('log_interval_unit',
+ help='log interval unit', default=setting.DEFAULT_LOGINTERVAL_UNIT)
+flags.add('log_format',
+ help='log format', default=setting.DEFAULT_LOGFORMAT)
+flags.add('log_backup_count', type='int',
+ help='log backup count', default=setting.DEFAULT_LOGBACKUPCOUNT)
+
+
+# mapping str setting in flag --loglevel to logging level.
+LOGLEVEL_MAPPING = {
+ 'finest': logging.DEBUG - 2, # more detailed log.
+ 'fine': logging.DEBUG - 1, # detailed log.
+ 'debug': logging.DEBUG,
+ 'info': logging.INFO,
+ 'warning': logging.WARNING,
+ 'error': logging.ERROR,
+ 'critical': logging.CRITICAL,
+}
+
+
+logging.addLevelName(LOGLEVEL_MAPPING['fine'], 'fine')
+logging.addLevelName(LOGLEVEL_MAPPING['finest'], 'finest')
+
+
+# disable logging when logsetting.init not called
+logging.getLogger().setLevel(logging.CRITICAL)
+
+
+def getLevelByName(level_name):
+ """Get log level by level name."""
+ return LOGLEVEL_MAPPING[level_name]
+
+
+def init():
+ """Init loggsetting. It should be called after flags.init."""
+ loglevel = flags.OPTIONS.loglevel.lower()
+ logdir = flags.OPTIONS.logdir
+ logfile = flags.OPTIONS.logfile
+ logger = logging.getLogger()
+ if logger.handlers:
+ for handler in logger.handlers:
+ logger.removeHandler(handler)
+
+ if logdir:
+ if not logfile:
+ logfile = '%s.log' % os.path.basename(sys.argv[0])
+
+ handler = logging.handlers.TimedRotatingFileHandler(
+ os.path.join(logdir, logfile),
+ when=flags.OPTIONS.log_interval_unit,
+ interval=flags.OPTIONS.log_interval,
+ backupCount=flags.OPTIONS.log_backup_count)
+ else:
+ if not logfile:
+ handler = logging.StreamHandler(sys.stderr)
+ else:
+ handler = logging.handlers.TimedRotatingFileHandler(
+ logfile,
+ when=flags.OPTIONS.log_interval_unit,
+ interval=flags.OPTIONS.log_interval,
+ backupCount=flags.OPTIONS.log_backup_count)
+
+ if loglevel in LOGLEVEL_MAPPING:
+ logger.setLevel(LOGLEVEL_MAPPING[loglevel])
+ handler.setLevel(LOGLEVEL_MAPPING[loglevel])
+
+ formatter = logging.Formatter(
+ flags.OPTIONS.log_format)
+
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
diff --git a/compass-tasks/utils/setting_wrapper.py b/compass-tasks/utils/setting_wrapper.py
new file mode 100644
index 0000000..0b3e9f7
--- /dev/null
+++ b/compass-tasks/utils/setting_wrapper.py
@@ -0,0 +1,175 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""comapss setting wrapper.
+
+ .. moduleauthor:: Xiaodong Wang ,xiaodongwang@huawei.com>
+"""
+import datetime
+import lazypy
+import logging
+import os
+import os.path
+
+
+# default setting
+CONFIG_DIR = os.environ.get('COMPASS_CONFIG_DIR', '/etc/compass')
+SQLALCHEMY_DATABASE_URI = 'sqlite://'
+SQLALCHEMY_DATABASE_POOL_TYPE = 'static'
+COBBLER_INSTALLATION_LOGDIR = '/var/log/cobbler/anamon'
+CHEF_INSTALLATION_LOGDIR = '/var/log/chef'
+INSTALLATION_LOGDIR = {
+ 'CobblerInstaller': COBBLER_INSTALLATION_LOGDIR,
+ 'ChefInstaller': CHEF_INSTALLATION_LOGDIR
+}
+CLUSTERHOST_INATALLATION_LOGDIR_NAME = 'name'
+HOST_INSTALLATION_LOGDIR_NAME = 'name'
+DEFAULT_LOGLEVEL = 'debug'
+DEFAULT_LOGDIR = '/tmp'
+DEFAULT_LOGINTERVAL = 1
+DEFAULT_LOGINTERVAL_UNIT = 'h'
+DEFAULT_LOGFORMAT = (
+ '%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s')
+DEFAULT_LOGBACKUPCOUNT = 5
+WEB_LOGFILE = ''
+CELERY_LOGFILE = ''
+CELERYCONFIG_DIR = lazypy.delay(lambda: CONFIG_DIR)
+CELERYCONFIG_FILE = ''
+PROGRESS_UPDATE_INTERVAL = 30
+POLLSWITCH_INTERVAL = 60
+SWITCHES = [
+]
+
+USER_AUTH_HEADER_NAME = 'X-Auth-Token'
+USER_TOKEN_DURATION = '2h'
+COMPASS_ADMIN_EMAIL = 'admin@huawei.com'
+COMPASS_ADMIN_PASSWORD = 'admin'
+COMPASS_DEFAULT_PERMISSIONS = [
+ 'list_permissions',
+]
+SWITCHES_DEFAULT_FILTERS = []
+DEFAULT_SWITCH_IP = '0.0.0.0'
+DEFAULT_SWITCH_PORT = 0
+
+COMPASS_SUPPORTED_PROXY = 'http://127.0.0.1:3128'
+COMPASS_SUPPORTED_DEFAULT_NOPROXY = ['127.0.0.1']
+COMPASS_SUPPORTED_NTP_SERVER = '127.0.0.1'
+COMPASS_SUPPORTED_DNS_SERVERS = ['127.0.0.1']
+COMPASS_SUPPORTED_DOMAINS = []
+COMPASS_SUPPORTED_DEFAULT_GATEWAY = '127.0.0.1'
+COMPASS_SUPPORTED_LOCAL_REPO = 'http://127.0.0.1'
+
+PROGRESS_UPDATE_PID_FILE = '/var/run/progress_update.pid'
+
+PROXY_URL_PREFIX = 'http://10.145.81.205:5000'
+
+OS_INSTALLER_DIR = ''
+PACKAGE_INSTALLER_DIR = ''
+OS_DIR = ''
+ADAPTER_DIR = ''
+OS_METADATA_DIR = ''
+PACKAGE_METADATA_DIR = ''
+FLAVOR_METADATA_DIR = ''
+OS_FIELD_DIR = ''
+PACKAGE_FIELD_DIR = ''
+FLAVOR_FIELD_DIR = ''
+ADAPTER_ROLE_DIR = ''
+ADAPTER_FLAVOR_DIR = ''
+VALIDATOR_DIR = ''
+CALLBACK_DIR = ''
+TMPL_DIR = ''
+MACHINE_LIST_DIR = ''
+PROGRESS_CALCULATOR_DIR = ''
+OS_MAPPING_DIR = ''
+FLAVOR_MAPPING_DIR = ''
+PLUGINS_DIR = ''
+
+if (
+ 'COMPASS_IGNORE_SETTING' in os.environ and
+ os.environ['COMPASS_IGNORE_SETTING']
+):
+ pass
+else:
+ if 'COMPASS_SETTING' in os.environ:
+ SETTING = os.environ['COMPASS_SETTING']
+ else:
+ SETTING = '/etc/compass/setting'
+
+ try:
+ logging.info('load setting from %s', SETTING)
+ execfile(SETTING, globals(), locals())
+ except Exception as error:
+ logging.exception(error)
+ raise error
+
+if not OS_INSTALLER_DIR:
+ OS_INSTALLER_DIR = os.path.join(CONFIG_DIR, 'os_installer')
+
+if not PACKAGE_INSTALLER_DIR:
+ PACKAGE_INSTALLER_DIR = os.path.join(CONFIG_DIR, 'package_installer')
+
+if not OS_DIR:
+ OS_DIR = os.path.join(CONFIG_DIR, 'os')
+
+if not ADAPTER_DIR:
+ ADAPTER_DIR = os.path.join(CONFIG_DIR, 'adapter')
+
+if not OS_METADATA_DIR:
+ OS_METADATA_DIR = os.path.join(CONFIG_DIR, 'os_metadata')
+
+if not PACKAGE_METADATA_DIR:
+ PACKAGE_METADATA_DIR = os.path.join(CONFIG_DIR, 'package_metadata')
+
+if not FLAVOR_METADATA_DIR:
+ FLAVOR_METADATA_DIR = os.path.join(CONFIG_DIR, 'flavor_metadata')
+
+if not OS_FIELD_DIR:
+ OS_FIELD_DIR = os.path.join(CONFIG_DIR, 'os_field')
+
+if not PACKAGE_FIELD_DIR:
+ PACKAGE_FIELD_DIR = os.path.join(CONFIG_DIR, 'package_field')
+
+if not FLAVOR_FIELD_DIR:
+ FLAVOR_FIELD_DIR = os.path.join(CONFIG_DIR, 'flavor_field')
+
+if not ADAPTER_ROLE_DIR:
+ ADAPTER_ROLE_DIR = os.path.join(CONFIG_DIR, 'role')
+
+if not ADAPTER_FLAVOR_DIR:
+ ADAPTER_FLAVOR_DIR = os.path.join(CONFIG_DIR, 'flavor')
+
+if not VALIDATOR_DIR:
+ VALIDATOR_DIR = os.path.join(CONFIG_DIR, 'validator')
+
+if not CALLBACK_DIR:
+ CALLBACK_DIR = os.path.join(CONFIG_DIR, 'callback')
+
+if not TMPL_DIR:
+ TMPL_DIR = os.path.join(CONFIG_DIR, 'templates')
+
+if not MACHINE_LIST_DIR:
+ MACHINE_LIST_DIR = os.path.join(CONFIG_DIR, 'machine_list')
+
+if not PROGRESS_CALCULATOR_DIR:
+ PROGRESS_CALCULATOR_DIR = os.path.join(CONFIG_DIR, 'progress_calculator')
+
+if not OS_MAPPING_DIR:
+ OS_MAPPING_DIR = os.path.join(CONFIG_DIR, 'os_mapping')
+
+if not FLAVOR_MAPPING_DIR:
+ FLAVOR_MAPPING_DIR = os.path.join(CONFIG_DIR, 'flavor_mapping')
+
+if not PLUGINS_DIR:
+ PLUGINS_DIR = os.environ.get('COMPASS_PLUGINS_DIR',
+ os.path.join(CONFIG_DIR, 'plugins'))
diff --git a/compass-tasks/utils/util.py b/compass-tasks/utils/util.py
new file mode 100644
index 0000000..39978ca
--- /dev/null
+++ b/compass-tasks/utils/util.py
@@ -0,0 +1,395 @@
+# Copyright 2014 Huawei Technologies Co. Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to provider util functions in all compass code
+
+ .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
+"""
+
+import crypt
+import datetime
+import logging
+import os
+import os.path
+import re
+import setting_wrapper as setting
+import sys
+import warnings
+
+
+def deprecated(func):
+ """This is a decorator which can be used to mark functions as deprecated.
+
+ It will result in a warning being emitted when the function is used.
+ """
+ def new_func(*args, **kwargs):
+ warnings.warn(
+ "Call to deprecated function %s." % func.__name__,
+ category=DeprecationWarning
+ )
+ return func(*args, **kwargs)
+
+ new_func.__name__ = func.__name__
+ new_func.__doc__ = func.__doc__
+ new_func.__dict__.update(func.__dict__)
+ return new_func
+
+
+def parse_datetime(date_time, exception_class=Exception):
+ """Parse datetime str to get datetime object.
+
+ The date time format is %Y-%m-%d %H:%M:%S
+ """
+ try:
+ return datetime.datetime.strptime(
+ date_time, '%Y-%m-%d %H:%M:%S'
+ )
+ except Exception as error:
+ logging.exception(error)
+ raise exception_class(
+ 'date time %s format is invalid' % date_time
+ )
+
+
+def parse_datetime_range(date_time_range, exception_class=Exception):
+ """parse datetime range str to pair of datetime objects.
+
+ The date time range format is %Y-%m-%d %H:%M:%S,%Y-%m-%d %H:%M:%S
+ """
+ try:
+ start, end = date_time_range.split(',')
+ except Exception as error:
+ logging.exception(error)
+ raise exception_class(
+ 'there is no `,` in date time range %s' % date_time_range
+ )
+ if start:
+ start_datetime = parse_datetime(start, exception_class)
+ else:
+ start_datetime = None
+ if end:
+ end_datetime = parse_datetime(end, exception_class)
+ else:
+ end_datetime = None
+ return start_datetime, end_datetime
+
+
+def parse_request_arg_dict(arg, exception_class=Exception):
+ """parse string to dict.
+
+ The str is formatted like a=b;c=d and parsed to
+ {'a': 'b', 'c': 'd'}
+ """
+ arg_dict = {}
+ arg_pairs = arg.split(';')
+ for arg_pair in arg_pairs:
+ try:
+ arg_name, arg_value = arg_pair.split('=', 1)
+ except Exception as error:
+ logging.exception(error)
+ raise exception_class(
+ 'there is no `=` in %s' % arg_pair
+ )
+ arg_dict[arg_name] = arg_value
+ return arg_dict
+
+
+def format_datetime(date_time):
+ """Generate string from datetime object."""
+ return date_time.strftime("%Y-%m-%d %H:%M:%S")
+
+
+def merge_dict(lhs, rhs, override=True):
+ """Merge nested right dict into left nested dict recursively.
+
+ :param lhs: dict to be merged into.
+ :type lhs: dict
+ :param rhs: dict to merge from.
+ :type rhs: dict
+ :param override: the value in rhs overide the value in left if True.
+ :type override: boolean
+ """
+ if not isinstance(lhs, dict) or not isinstance(rhs, dict):
+ if override:
+ return rhs
+ else:
+ return lhs
+
+ for key, value in rhs.items():
+ if key not in lhs:
+ lhs[key] = rhs[key]
+ else:
+ lhs[key] = merge_dict(lhs[key], value, override)
+
+ return lhs
+
+
+def recursive_merge_dict(name, all_dicts, parents):
+ """Recursively merge parent dict into base dict."""
+ parent_name = parents.get(name, None)
+ base_dict = all_dicts.get(name, {})
+ if not parent_name:
+ return base_dict
+ merged = recursive_merge_dict(parent_name, all_dicts, parents)
+ return merge_dict(base_dict, merged, override=False)
+
+
+def encrypt(value, crypt_method=None):
+ """Get encrypted value."""
+ if not crypt_method:
+ if hasattr(crypt, 'METHOD_MD5'):
+ crypt_method = crypt.METHOD_MD5
+ else:
+ # for python2.7, copy python2.6 METHOD_MD5 logic here.
+ from random import choice
+ import string
+
+ _saltchars = string.ascii_letters + string.digits + './'
+
+ def _mksalt():
+ """generate salt."""
+ salt = '$1$'
+ salt += ''.join(choice(_saltchars) for _ in range(8))
+ return salt
+
+ crypt_method = _mksalt()
+
+ return crypt.crypt(value, crypt_method)
+
+
+def parse_time_interval(time_interval_str):
+ """parse string of time interval to time interval.
+
+ supported time interval unit: ['d', 'w', 'h', 'm', 's']
+ Examples:
+ time_interval_str: '3d 2h' time interval to 3 days and 2 hours.
+ """
+ if not time_interval_str:
+ return 0
+
+ time_interval_tuple = [
+ time_interval_element
+ for time_interval_element in time_interval_str.split(' ')
+ if time_interval_element
+ ]
+ time_interval_dict = {}
+ time_interval_unit_mapping = {
+ 'd': 'days',
+ 'w': 'weeks',
+ 'h': 'hours',
+ 'm': 'minutes',
+ 's': 'seconds'
+ }
+ for time_interval_element in time_interval_tuple:
+ mat = re.match(r'^([+-]?\d+)(w|d|h|m|s).*', time_interval_element)
+ if not mat:
+ continue
+
+ time_interval_value = int(mat.group(1))
+ time_interval_unit = time_interval_unit_mapping[mat.group(2)]
+ time_interval_dict[time_interval_unit] = (
+ time_interval_dict.get(time_interval_unit, 0) + time_interval_value
+ )
+
+ time_interval = datetime.timedelta(**time_interval_dict)
+ if sys.version_info[0:2] > (2, 6):
+ return time_interval.total_seconds()
+ else:
+ return (
+ time_interval.microseconds + (
+ time_interval.seconds + time_interval.days * 24 * 3600
+ ) * 1e6
+ ) / 1e6
+
+
+def get_plugins_config_files(name, suffix=".conf"):
+ """walk through each of plugin to find all the config files in the"""
+ """name directory"""
+
+ plugins_path = setting.PLUGINS_DIR
+ files = []
+ if os.path.exists(plugins_path):
+ for plugin in os.listdir(plugins_path):
+ plugin_path = os.path.join(plugins_path, plugin)
+ plugin_config = os.path.join(plugin_path, name)
+ if os.path.exists(plugin_config):
+ for component in os.listdir(plugin_config):
+ if not component.endswith(suffix):
+ continue
+ files.append(os.path.join(plugin_config, component))
+ return files
+
+
+def load_configs(
+ config_dir, config_name_suffix='.conf',
+ env_globals={}, env_locals={}
+):
+ """Load configurations from config dir."""
+ """The config file could be in the config_dir or in plugins config_dir"""
+ """The plugins config_dir is formed as, for example /etc/compass/adapter"""
+ """Then the plugins config_dir is /etc/compass/plugins/xxx/adapter"""
+
+ # TODO(Carl) instead of using config_dir, it should use a name such as
+ # adapter etc, however, doing it requires a lot client sites changes,
+ # will do it later.
+
+ configs = []
+ config_files = []
+ config_dir = str(config_dir)
+
+ """search for config_dir"""
+ if os.path.exists(config_dir):
+ for component in os.listdir(config_dir):
+ if not component.endswith(config_name_suffix):
+ continue
+ config_files.append(os.path.join(config_dir, component))
+
+ """search for plugins config_dir"""
+ index = config_dir.rfind("/")
+
+ config_files.extend(get_plugins_config_files(config_dir[index + 1:],
+ config_name_suffix))
+
+ if not config_files:
+ logging.error('path %s and plugins does not exist', config_dir)
+ for path in config_files:
+ logging.debug('load config from %s', path)
+ config_globals = {}
+ config_globals.update(env_globals)
+ config_locals = {}
+ config_locals.update(env_locals)
+ try:
+ execfile(path, config_globals, config_locals)
+ except Exception as error:
+ logging.exception(error)
+ raise error
+ configs.append(config_locals)
+ return configs
+
+
+def pretty_print(*contents):
+ """pretty print contents."""
+ if len(contents) == 0:
+ print ""
+ else:
+ print "\n".join(content for content in contents)
+
+
+def get_switch_machines_from_file(filename):
+ """get switch machines from file."""
+ switches = []
+ switch_machines = {}
+ with open(filename) as switch_file:
+ for line in switch_file:
+ line = line.strip()
+ if not line:
+ # ignore empty line
+ continue
+
+ if line.startswith('#'):
+ # ignore comments
+ continue
+
+ columns = [column for column in line.split(',')]
+ if not columns:
+ # ignore empty line
+ continue
+
+ if columns[0] == 'switch':
+ (switch_ip, switch_vendor, switch_version,
+ switch_community, switch_state) = columns[1:]
+ switches.append({
+ 'ip': switch_ip,
+ 'vendor': switch_vendor,
+ 'credentials': {
+ 'version': switch_version,
+ 'community': switch_community,
+ },
+ 'state': switch_state,
+ })
+ elif columns[0] == 'machine':
+ switch_ip, switch_port, mac = columns[1:]
+ switch_machines.setdefault(switch_ip, []).append({
+ 'mac': mac,
+ 'port': switch_port,
+ })
+
+ return (switches, switch_machines)
+
+
+def execute_cli_by_ssh(cmd, host, username, password=None,
+ keyfile='/root/.ssh/id_rsa', nowait=False):
+ """SSH to execute script on remote machine
+
+ :param host: ip of the remote machine
+ :param username: username to access the remote machine
+ :param password: password to access the remote machine
+ :param cmd: command to execute
+
+ """
+ if not cmd:
+ logging.error("No command found!")
+ raise Exception('No command found!')
+
+ if nowait:
+ cmd = "nohup %s >/dev/null 2>&1 &" % cmd
+
+ stdin = None
+ stdout = None
+ stderr = None
+ try:
+ import paramiko
+ from paramiko import ssh_exception
+
+ client = paramiko.SSHClient()
+ client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+
+ if password:
+ client.connect(host, username=username, password=password)
+ else:
+ client.load_system_host_keys()
+ client.connect(
+ host, username=username,
+ key_filename=keyfile, look_for_keys=True
+ )
+ stdin, stdout, stderr = client.exec_command(cmd)
+ result = stdout.readlines()
+ logging.info("result of command '%s' is '%s'!" % (cmd, result))
+ return result
+
+ except ImportError:
+ err_msg = "Cannot find Paramiko package!"
+ logging.error(err_msg)
+ raise ImportError(err_msg)
+
+ except (ssh_exception.BadHostKeyException,
+ ssh_exception.AuthenticationException,
+ ssh_exception.SSHException):
+
+ err_msg = 'SSH connection error or command execution failed!'
+ logging.error(err_msg)
+ raise Exception(err_msg)
+
+ except Exception as exc:
+ logging.error(
+ 'Failed to execute command "%s", exception is %s' % (cmd, exc)
+ )
+ raise Exception(exc)
+
+ finally:
+ for resource in [stdin, stdout, stderr]:
+ if resource:
+ resource.close()
+
+ client.close()